From 901b908577b429c139823faff53797720749e8c6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 16 Aug 2024 14:51:31 -0400 Subject: [PATCH 001/179] Smash key-gen into independent crate --- .github/workflows/tests.yml | 1 + Cargo.toml | 1 + deny.toml | 1 + processor/key-gen/Cargo.toml | 97 +++++++++++++++++++ processor/key-gen/LICENSE | 15 +++ .../{src/key_gen.rs => key-gen/src/lib.rs} | 0 6 files changed, 115 insertions(+) create mode 100644 processor/key-gen/Cargo.toml create mode 100644 processor/key-gen/LICENSE rename processor/{src/key_gen.rs => key-gen/src/lib.rs} (100%) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 05c259725..fabfaba9d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -39,6 +39,7 @@ jobs: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p serai-message-queue \ -p serai-processor-messages \ + -p serai-processor-key-gen \ -p serai-processor \ -p tendermint-machine \ -p tributary-chain \ diff --git a/Cargo.toml b/Cargo.toml index bce4ebe38..f0bdd6a89 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,6 +70,7 @@ members = [ "message-queue", "processor/messages", + "processor/key-gen", "processor", "coordinator/tributary/tendermint", diff --git a/deny.toml b/deny.toml index e5c72f0cf..0d82cb8ab 100644 --- a/deny.toml +++ b/deny.toml @@ -46,6 +46,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-message-queue" }, { allow = ["AGPL-3.0"], name = "serai-processor-messages" }, + { allow = ["AGPL-3.0"], name = "serai-processor-key-gen" }, { allow = ["AGPL-3.0"], name = "serai-processor" }, { allow = ["AGPL-3.0"], name = "tributary-chain" }, diff --git a/processor/key-gen/Cargo.toml b/processor/key-gen/Cargo.toml new file mode 100644 index 000000000..ed6e73838 --- /dev/null +++ b/processor/key-gen/Cargo.toml @@ -0,0 +1,97 @@ +[package] +name = "serai-processor-key-gen" +version = "0.1.0" +description = "Key generation for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/key-gen" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +# Macros +async-trait = { version = "0.1", default-features = false } +zeroize = { version = "1", default-features = false, features = ["std"] } +thiserror = { version = "1", default-features = false } + +# Libs +rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } +rand_chacha = { version = "0.3", default-features = false, features = ["std"] } + +# Encoders +const-hex = { version = "1", default-features = false } +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +serde_json = { version = "1", default-features = false, features = ["std"] } + +# Cryptography +ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } + +blake2 = { version = "0.10", default-features = false, features = ["std"] } +transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] } +ec-divisors = { package = "ec-divisors", path = "../crypto/evrf/divisors", default-features = false } +dkg = { package = "dkg", path = "../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } +frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } +frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } + +# Bitcoin/Ethereum +k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } + +# Bitcoin +secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } +bitcoin-serai = { path = "../networks/bitcoin", default-features = false, features = ["std"], optional = true } + +# Ethereum +ethereum-serai = { path = "../networks/ethereum", default-features = false, optional = true } + +# Monero +dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } +monero-simple-request-rpc = { path = "../networks/monero/rpc/simple-request", default-features = false, optional = true } +monero-wallet = { path = "../networks/monero/wallet", default-features = false, features = ["std", "multisig", "compile-time-generators"], optional = true } + +# Application +log = { version = "0.4", default-features = false, features = ["std"] } +env_logger = { version = "0.10", default-features = false, features = ["humantime"], optional = true } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +zalloc = { path = "../common/zalloc" } +serai-db = { path = "../common/db" } +serai-env = { path = "../common/env", optional = true } +# TODO: Replace with direct usage of primitives +serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } + +messages = { package = "serai-processor-messages", path = "./messages" } + +message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true } + +[dev-dependencies] +frost = { package = "modular-frost", path = "../crypto/frost", features = ["tests"] } + +sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } + +ethereum-serai = { path = "../networks/ethereum", default-features = false, features = ["tests"] } + +dockertest = "0.4" +serai-docker-tests = { path = "../tests/docker" } + +[features] +secp256k1 = ["k256", "dkg/evrf-secp256k1", "frost/secp256k1"] +bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] + +ethereum = ["secp256k1", "ethereum-serai/tests"] + +ed25519 = ["dalek-ff-group", "dkg/evrf-ed25519", "frost/ed25519"] +monero = ["ed25519", "monero-simple-request-rpc", "monero-wallet", "serai-client/monero"] + +binaries = ["env_logger", "serai-env", "message-queue"] +parity-db = ["serai-db/parity-db"] +rocksdb = ["serai-db/rocksdb"] diff --git a/processor/key-gen/LICENSE b/processor/key-gen/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/key-gen/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/src/key_gen.rs b/processor/key-gen/src/lib.rs similarity index 100% rename from processor/src/key_gen.rs rename to processor/key-gen/src/lib.rs From 722e86b84b61cf0def306ea9804bc7a3e6d43f05 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 16 Aug 2024 17:01:45 -0400 Subject: [PATCH 002/179] Smash key-gen out of processor Resolves some bad assumptions made regarding keys being unique or not. --- Cargo.lock | 21 +- processor/Cargo.toml | 1 - processor/key-gen/Cargo.toml | 78 ++----- processor/key-gen/README.md | 8 + processor/key-gen/src/db.rs | 144 ++++++++++++ processor/key-gen/src/generators.rs | 38 +++ processor/key-gen/src/lib.rs | 349 +++++++++------------------- processor/src/lib.rs | 2 +- processor/src/main.rs | 2 +- 9 files changed, 333 insertions(+), 310 deletions(-) create mode 100644 processor/key-gen/README.md create mode 100644 processor/key-gen/src/db.rs create mode 100644 processor/key-gen/src/generators.rs diff --git a/Cargo.lock b/Cargo.lock index ff21fe66c..62952da0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8564,7 +8564,6 @@ version = "0.1.0" dependencies = [ "async-trait", "bitcoin-serai", - "blake2", "borsh", "ciphersuite", "const-hex", @@ -8600,6 +8599,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-processor-key-gen" +version = "0.1.0" +dependencies = [ + "blake2", + "borsh", + "ciphersuite", + "dkg", + "ec-divisors", + "flexible-transcript", + "log", + "parity-scale-codec", + "rand_chacha", + "rand_core", + "serai-db", + "serai-processor-messages", + "serai-validator-sets-primitives", + "zeroize", +] + [[package]] name = "serai-processor-messages" version = "0.1.0" diff --git a/processor/Cargo.toml b/processor/Cargo.toml index fa2f643c3..2d386f2d0 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -36,7 +36,6 @@ serde_json = { version = "1", default-features = false, features = ["std"] } # Cryptography ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } -blake2 = { version = "0.10", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] } ec-divisors = { package = "ec-divisors", path = "../crypto/evrf/divisors", default-features = false } dkg = { package = "dkg", path = "../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } diff --git a/processor/key-gen/Cargo.toml b/processor/key-gen/Cargo.toml index ed6e73838..f1f005647 100644 --- a/processor/key-gen/Cargo.toml +++ b/processor/key-gen/Cargo.toml @@ -13,85 +13,35 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[package.metadata.cargo-machete] +ignored = ["scale"] + [lints] workspace = true [dependencies] # Macros -async-trait = { version = "0.1", default-features = false } zeroize = { version = "1", default-features = false, features = ["std"] } -thiserror = { version = "1", default-features = false } # Libs rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } -# Encoders -const-hex = { version = "1", default-features = false } -hex = { version = "0.4", default-features = false, features = ["std"] } -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } -borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -serde_json = { version = "1", default-features = false, features = ["std"] } - # Cryptography -ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } - blake2 = { version = "0.10", default-features = false, features = ["std"] } -transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] } -ec-divisors = { package = "ec-divisors", path = "../crypto/evrf/divisors", default-features = false } -dkg = { package = "dkg", path = "../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } -frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } -frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std"] } +ec-divisors = { package = "ec-divisors", path = "../../crypto/evrf/divisors", default-features = false } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } +dkg = { package = "dkg", path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } -# Bitcoin/Ethereum -k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } +# Substrate +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } -# Bitcoin -secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } -bitcoin-serai = { path = "../networks/bitcoin", default-features = false, features = ["std"], optional = true } - -# Ethereum -ethereum-serai = { path = "../networks/ethereum", default-features = false, optional = true } - -# Monero -dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } -monero-simple-request-rpc = { path = "../networks/monero/rpc/simple-request", default-features = false, optional = true } -monero-wallet = { path = "../networks/monero/wallet", default-features = false, features = ["std", "multisig", "compile-time-generators"], optional = true } +# Encoders +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } # Application log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"], optional = true } -tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } - -zalloc = { path = "../common/zalloc" } -serai-db = { path = "../common/db" } -serai-env = { path = "../common/env", optional = true } -# TODO: Replace with direct usage of primitives -serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } - -messages = { package = "serai-processor-messages", path = "./messages" } - -message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true } - -[dev-dependencies] -frost = { package = "modular-frost", path = "../crypto/frost", features = ["tests"] } - -sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } - -ethereum-serai = { path = "../networks/ethereum", default-features = false, features = ["tests"] } - -dockertest = "0.4" -serai-docker-tests = { path = "../tests/docker" } - -[features] -secp256k1 = ["k256", "dkg/evrf-secp256k1", "frost/secp256k1"] -bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] - -ethereum = ["secp256k1", "ethereum-serai/tests"] - -ed25519 = ["dalek-ff-group", "dkg/evrf-ed25519", "frost/ed25519"] -monero = ["ed25519", "monero-simple-request-rpc", "monero-wallet", "serai-client/monero"] - -binaries = ["env_logger", "serai-env", "message-queue"] -parity-db = ["serai-db/parity-db"] -rocksdb = ["serai-db/rocksdb"] +serai-db = { path = "../../common/db" } +messages = { package = "serai-processor-messages", path = "../messages" } diff --git a/processor/key-gen/README.md b/processor/key-gen/README.md new file mode 100644 index 000000000..c28357ba0 --- /dev/null +++ b/processor/key-gen/README.md @@ -0,0 +1,8 @@ +# Key Generation + +This library implements the Distributed Key Generation (DKG) for the Serai +protocol. Two invocations of the eVRF-based DKG are performed, one for Ristretto +(to have a key to oraclize values onto the Serai blockchain with) and one for +the external network's curve. + +This library is interacted with via the `serai-processor-messages::key_gen` API. diff --git a/processor/key-gen/src/db.rs b/processor/key-gen/src/db.rs new file mode 100644 index 000000000..d597cb7e4 --- /dev/null +++ b/processor/key-gen/src/db.rs @@ -0,0 +1,144 @@ +use core::marker::PhantomData; +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::EvrfCurve}; + +use serai_validator_sets_primitives::Session; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db}; + +use crate::KeyGenParams; + +pub(crate) struct Params { + pub(crate) t: u16, + pub(crate) n: u16, + pub(crate) substrate_evrf_public_keys: + Vec<<::EmbeddedCurve as Ciphersuite>::G>, + pub(crate) network_evrf_public_keys: + Vec<<::EmbeddedCurve as Ciphersuite>::G>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +struct RawParams { + t: u16, + substrate_evrf_public_keys: Vec<[u8; 32]>, + network_evrf_public_keys: Vec>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +pub(crate) struct Participations { + pub(crate) substrate_participations: HashMap>, + pub(crate) network_participations: HashMap>, +} + +create_db!( + KeyGenDb { + ParamsDb: (session: &Session) -> RawParams, + ParticipationsDb: (session: &Session) -> Participations, + KeySharesDb: (session: &Session) -> Vec, + } +); + +pub(crate) struct KeyGenDb(PhantomData

); +impl KeyGenDb

{ + pub(crate) fn set_params(txn: &mut impl DbTxn, session: Session, params: Params

) { + assert_eq!(params.substrate_evrf_public_keys.len(), params.network_evrf_public_keys.len()); + + ParamsDb::set( + txn, + &session, + &RawParams { + t: params.t, + substrate_evrf_public_keys: params + .substrate_evrf_public_keys + .into_iter() + .map(|key| key.to_bytes()) + .collect(), + network_evrf_public_keys: params + .network_evrf_public_keys + .into_iter() + .map(|key| key.to_bytes().as_ref().to_vec()) + .collect(), + }, + ) + } + + pub(crate) fn params(getter: &impl Get, session: Session) -> Option> { + ParamsDb::get(getter, &session).map(|params| Params { + t: params.t, + n: params + .network_evrf_public_keys + .len() + .try_into() + .expect("amount of keys exceeded the amount allowed during a DKG"), + substrate_evrf_public_keys: params + .substrate_evrf_public_keys + .into_iter() + .map(|key| { + <::EmbeddedCurve as Ciphersuite>::read_G(&mut key.as_slice()) + .unwrap() + }) + .collect(), + network_evrf_public_keys: params + .network_evrf_public_keys + .into_iter() + .map(|key| { + <::EmbeddedCurve as Ciphersuite>::read_G::<&[u8]>( + &mut key.as_ref(), + ) + .unwrap() + }) + .collect(), + }) + } + + pub(crate) fn set_participations( + txn: &mut impl DbTxn, + session: Session, + participations: &Participations, + ) { + ParticipationsDb::set(txn, &session, participations) + } + pub(crate) fn participations(getter: &impl Get, session: Session) -> Option { + ParticipationsDb::get(getter, &session) + } + + pub(crate) fn set_key_shares( + txn: &mut impl DbTxn, + session: Session, + substrate_keys: &[ThresholdKeys], + network_keys: &[ThresholdKeys], + ) { + assert_eq!(substrate_keys.len(), network_keys.len()); + + let mut keys = Zeroizing::new(vec![]); + for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) { + keys.extend(substrate_keys.serialize().as_slice()); + keys.extend(network_keys.serialize().as_slice()); + } + KeySharesDb::set(txn, &session, &keys); + } + + #[allow(clippy::type_complexity)] + pub(crate) fn key_shares( + getter: &impl Get, + session: Session, + ) -> Option<(Vec>, Vec>)> { + let keys = KeySharesDb::get(getter, &session)?; + let mut keys: &[u8] = keys.as_ref(); + + let mut substrate_keys = vec![]; + let mut network_keys = vec![]; + while !keys.is_empty() { + substrate_keys.push(ThresholdKeys::new(ThresholdCore::read(&mut keys).unwrap())); + let mut these_network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys).unwrap()); + P::tweak_keys(&mut these_network_keys); + network_keys.push(these_network_keys); + } + Some((substrate_keys, network_keys)) + } +} diff --git a/processor/key-gen/src/generators.rs b/processor/key-gen/src/generators.rs new file mode 100644 index 000000000..3570ca6e6 --- /dev/null +++ b/processor/key-gen/src/generators.rs @@ -0,0 +1,38 @@ +use core::any::{TypeId, Any}; +use std::{ + sync::{LazyLock, Mutex}, + collections::HashMap, +}; + +use dkg::evrf::*; + +use serai_validator_sets_primitives::MAX_KEY_SHARES_PER_SET; + +/// A cache of the generators used by the eVRF DKG. +/// +/// This performs a lookup of the Ciphersuite to its generators. Since the Ciphersuite is a +/// generic, this takes advantage of `Any`. This static is isolated in a module to ensure +/// correctness can be evaluated solely by reviewing these few lines of code. +/// +/// This is arguably over-engineered as of right now, as we only need generators for Ristretto +/// and N::Curve. By having this HashMap, we enable de-duplication of the Ristretto == N::Curve +/// case, and we automatically support the n-curve case (rather than hard-coding to the 2-curve +/// case). +static GENERATORS: LazyLock>> = + LazyLock::new(|| Mutex::new(HashMap::new())); + +pub(crate) fn generators() -> &'static EvrfGenerators { + GENERATORS + .lock() + .unwrap() + .entry(TypeId::of::()) + .or_insert_with(|| { + // If we haven't prior needed generators for this Ciphersuite, generate new ones + Box::leak(Box::new(EvrfGenerators::::new( + ((MAX_KEY_SHARES_PER_SET * 2 / 3) + 1).try_into().unwrap(), + MAX_KEY_SHARES_PER_SET.try_into().unwrap(), + ))) + }) + .downcast_ref() + .unwrap() +} diff --git a/processor/key-gen/src/lib.rs b/processor/key-gen/src/lib.rs index a059c350f..8d4e911fb 100644 --- a/processor/key-gen/src/lib.rs +++ b/processor/key-gen/src/lib.rs @@ -1,7 +1,8 @@ -use std::{ - io, - collections::{HashSet, HashMap}, -}; +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::{io, collections::HashMap}; use zeroize::Zeroizing; @@ -14,156 +15,41 @@ use ciphersuite::{ group::{Group, GroupEncoding}, Ciphersuite, Ristretto, }; -use dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::*}; +use dkg::{Participant, ThresholdKeys, evrf::*}; use log::info; -use serai_client::validator_sets::primitives::{Session, KeyPair}; +use serai_validator_sets_primitives::Session; use messages::key_gen::*; -use crate::{Get, DbTxn, Db, create_db, networks::Network}; - -mod generators { - use core::any::{TypeId, Any}; - use std::{ - sync::{LazyLock, Mutex}, - collections::HashMap, - }; - - use frost::dkg::evrf::*; +use serai_db::{DbTxn, Db}; - use serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET; - - /// A cache of the generators used by the eVRF DKG. - /// - /// This performs a lookup of the Ciphersuite to its generators. Since the Ciphersuite is a - /// generic, this takes advantage of `Any`. This static is isolated in a module to ensure - /// correctness can be evaluated solely by reviewing these few lines of code. - /// - /// This is arguably over-engineered as of right now, as we only need generators for Ristretto - /// and N::Curve. By having this HashMap, we enable de-duplication of the Ristretto == N::Curve - /// case, and we automatically support the n-curve case (rather than hard-coding to the 2-curve - /// case). - static GENERATORS: LazyLock>> = - LazyLock::new(|| Mutex::new(HashMap::new())); - - pub(crate) fn generators() -> &'static EvrfGenerators { - GENERATORS - .lock() - .unwrap() - .entry(TypeId::of::()) - .or_insert_with(|| { - // If we haven't prior needed generators for this Ciphersuite, generate new ones - Box::leak(Box::new(EvrfGenerators::::new( - ((MAX_KEY_SHARES_PER_SET * 2 / 3) + 1).try_into().unwrap(), - MAX_KEY_SHARES_PER_SET.try_into().unwrap(), - ))) - }) - .downcast_ref() - .unwrap() - } -} +mod generators; use generators::generators; -#[derive(Debug)] -pub struct KeyConfirmed { - pub substrate_keys: Vec>, - pub network_keys: Vec>, -} - -create_db!( - KeyGenDb { - ParamsDb: (session: &Session) -> (u16, Vec<[u8; 32]>, Vec>), - ParticipationDb: (session: &Session) -> ( - HashMap>, - HashMap>, - ), - // GeneratedKeysDb, KeysDb use `()` for their value as we manually serialize their values - // TODO: Don't do that - GeneratedKeysDb: (session: &Session) -> (), - // These do assume a key is only used once across sets, which holds true if the threshold is - // honest - // TODO: Remove this assumption - KeysDb: (network_key: &[u8]) -> (), - SessionDb: (network_key: &[u8]) -> Session, - NetworkKeyDb: (session: Session) -> Vec, - } -); - -impl GeneratedKeysDb { - #[allow(clippy::type_complexity)] - fn read_keys( - getter: &impl Get, - key: &[u8], - ) -> Option<(Vec, (Vec>, Vec>))> { - let keys_vec = getter.get(key)?; - let mut keys_ref: &[u8] = keys_vec.as_ref(); - - let mut substrate_keys = vec![]; - let mut network_keys = vec![]; - while !keys_ref.is_empty() { - substrate_keys.push(ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap())); - let mut these_network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); - N::tweak_keys(&mut these_network_keys); - network_keys.push(these_network_keys); - } - Some((keys_vec, (substrate_keys, network_keys))) - } +mod db; +use db::{Params, Participations, KeyGenDb}; - fn save_keys( - txn: &mut impl DbTxn, - session: &Session, - substrate_keys: &[ThresholdKeys], - network_keys: &[ThresholdKeys], - ) { - let mut keys = Zeroizing::new(vec![]); - for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) { - keys.extend(substrate_keys.serialize().as_slice()); - keys.extend(network_keys.serialize().as_slice()); - } - txn.put(Self::key(session), keys); - } -} +/// Parameters for a key generation. +pub trait KeyGenParams { + /// The ID for this instantiation. + const ID: &'static str; -impl KeysDb { - fn confirm_keys( - txn: &mut impl DbTxn, - session: Session, - key_pair: &KeyPair, - ) -> (Vec>, Vec>) { - let (keys_vec, keys) = - GeneratedKeysDb::read_keys::(txn, &GeneratedKeysDb::key(&session)).unwrap(); - assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes()); - assert_eq!( - { - let network_key: &[u8] = key_pair.1.as_ref(); - network_key - }, - keys.1[0].group_key().to_bytes().as_ref(), - ); - txn.put(Self::key(key_pair.1.as_ref()), keys_vec); - NetworkKeyDb::set(txn, session, &key_pair.1.clone().into_inner()); - SessionDb::set(txn, key_pair.1.as_ref(), &session); - keys - } + /// The curve used for the external network. + type ExternalNetworkCurve: EvrfCurve< + EmbeddedCurve: Ciphersuite< + G: ec_divisors::DivisorCurve::F>, + >, + >; - #[allow(clippy::type_complexity)] - fn keys( - getter: &impl Get, - network_key: &::G, - ) -> Option<(Session, (Vec>, Vec>))> { - let res = - GeneratedKeysDb::read_keys::(getter, &Self::key(network_key.to_bytes().as_ref()))?.1; - assert_eq!(&res.1[0].group_key(), network_key); - Some((SessionDb::get(getter, network_key.to_bytes().as_ref()).unwrap(), res)) - } + /// Tweaks keys as necessary/beneficial. + fn tweak_keys(keys: &mut ThresholdKeys); - pub fn substrate_keys_by_session( - getter: &impl Get, - session: Session, - ) -> Option>> { - let network_key = NetworkKeyDb::get(getter, session)?; - Some(GeneratedKeysDb::read_keys::(getter, &Self::key(&network_key))?.1 .0) + /// Encode keys as optimal. + /// + /// A default implementation is provided which calls the traditional `to_bytes`. + fn encode_key(key: ::G) -> Vec { + key.to_bytes().as_ref().to_vec() } } @@ -242,49 +128,44 @@ fn coerce_keys( (keys, faulty) } +/// An instance of the Serai key generation protocol. #[derive(Debug)] -pub struct KeyGen { +pub struct KeyGen { db: D, substrate_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, - network_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, + network_evrf_private_key: + Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, } -impl KeyGen { +impl KeyGen { + /// Create a new key generation instance. #[allow(clippy::new_ret_no_self)] pub fn new( db: D, substrate_evrf_private_key: Zeroizing< <::EmbeddedCurve as Ciphersuite>::F, >, - network_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, - ) -> KeyGen { + network_evrf_private_key: Zeroizing< + <::EmbeddedCurve as Ciphersuite>::F, + >, + ) -> KeyGen { KeyGen { db, substrate_evrf_private_key, network_evrf_private_key } } - pub fn in_set(&self, session: &Session) -> bool { - // We determine if we're in set using if we have the parameters for a session's key generation - // We only have these if we were told to generate a key for this session - ParamsDb::get(&self.db, session).is_some() - } - + /// Fetch the key shares for a specific session. #[allow(clippy::type_complexity)] - pub fn keys( + pub fn key_shares( &self, - key: &::G, - ) -> Option<(Session, (Vec>, Vec>))> { + session: Session, + ) -> Option<(Vec>, Vec>)> { // This is safe, despite not having a txn, since it's a static value // It doesn't change over time/in relation to other operations - KeysDb::keys::(&self.db, key) - } - - pub fn substrate_keys_by_session( - &self, - session: Session, - ) -> Option>> { - KeysDb::substrate_keys_by_session::(&self.db, session) + // It is solely set or unset + KeyGenDb::

::key_shares(&self.db, session) } + /// Handle a message from the coordinator. pub fn handle( &mut self, txn: &mut D::Transaction<'_>, @@ -292,10 +173,10 @@ impl KeyGen { ) -> Vec { const SUBSTRATE_KEY_CONTEXT: &[u8] = b"substrate"; const NETWORK_KEY_CONTEXT: &[u8] = b"network"; - fn context(session: Session, key_context: &[u8]) -> [u8; 32] { + fn context(session: Session, key_context: &[u8]) -> [u8; 32] { // TODO2: Also embed the chain ID/genesis block let mut transcript = RecommendedTranscript::new(b"Serai eVRF Key Gen"); - transcript.append_message(b"network", N::ID); + transcript.append_message(b"network", P::ID.as_bytes()); transcript.append_message(b"session", session.0.to_le_bytes()); transcript.append_message(b"key", key_context); (&(&transcript.challenge(b"context"))[.. 32]).try_into().unwrap() @@ -308,64 +189,68 @@ impl KeyGen { // Unzip the vector of eVRF keys let substrate_evrf_public_keys = evrf_public_keys.iter().map(|(key, _)| *key).collect::>(); + let (substrate_evrf_public_keys, mut faulty) = + coerce_keys::(&substrate_evrf_public_keys); + let network_evrf_public_keys = evrf_public_keys.into_iter().map(|(_, key)| key).collect::>(); - - let mut participation = Vec::with_capacity(2048); - let mut faulty = HashSet::new(); + let (network_evrf_public_keys, additional_faulty) = + coerce_keys::(&network_evrf_public_keys); + faulty.extend(additional_faulty); // Participate for both Substrate and the network fn participate( context: [u8; 32], threshold: u16, - evrf_public_keys: &[impl AsRef<[u8]>], + evrf_public_keys: &[::G], evrf_private_key: &Zeroizing<::F>, - faulty: &mut HashSet, output: &mut impl io::Write, ) { - let (coerced_keys, faulty_is) = coerce_keys::(evrf_public_keys); - for faulty_i in faulty_is { - faulty.insert(faulty_i); - } let participation = EvrfDkg::::participate( &mut OsRng, generators(), context, threshold, - &coerced_keys, + evrf_public_keys, evrf_private_key, ); participation.unwrap().write(output).unwrap(); } + + let mut participation = Vec::with_capacity(2048); participate::( - context::(session, SUBSTRATE_KEY_CONTEXT), + context::

(session, SUBSTRATE_KEY_CONTEXT), threshold, &substrate_evrf_public_keys, &self.substrate_evrf_private_key, - &mut faulty, &mut participation, ); - participate::( - context::(session, NETWORK_KEY_CONTEXT), + participate::( + context::

(session, NETWORK_KEY_CONTEXT), threshold, &network_evrf_public_keys, &self.network_evrf_private_key, - &mut faulty, &mut participation, ); // Save the params - ParamsDb::set( + KeyGenDb::

::set_params( txn, - &session, - &(threshold, substrate_evrf_public_keys, network_evrf_public_keys), + session, + Params { + t: threshold, + n: substrate_evrf_public_keys + .len() + .try_into() + .expect("amount of keys exceeded the amount allowed during a DKG"), + substrate_evrf_public_keys, + network_evrf_public_keys, + }, ); // Send back our Participation and all faulty parties - let mut faulty = faulty.into_iter().collect::>(); - faulty.sort(); - let mut res = Vec::with_capacity(faulty.len() + 1); + faulty.sort_unstable(); for faulty in faulty { res.push(ProcessorMessage::Blame { session, participant: faulty }); } @@ -377,13 +262,8 @@ impl KeyGen { CoordinatorMessage::Participation { session, participant, participation } => { info!("received participation from {:?} for {:?}", participant, session); - let (threshold, substrate_evrf_public_keys, network_evrf_public_keys) = - ParamsDb::get(txn, &session).unwrap(); - - let n = substrate_evrf_public_keys - .len() - .try_into() - .expect("performing a key gen with more than u16::MAX participants"); + let Params { t: threshold, n, substrate_evrf_public_keys, network_evrf_public_keys } = + KeyGenDb::

::params(txn, session).unwrap(); // Read these `Participation`s // If they fail basic sanity checks, fail fast @@ -399,7 +279,8 @@ impl KeyGen { return blame; }; let len_at_network_participation_start_pos = participation.len(); - let Ok(network_participation) = Participation::::read(&mut participation, n) + let Ok(network_participation) = + Participation::::read(&mut participation, n) else { return blame; }; @@ -413,16 +294,15 @@ impl KeyGen { // If we've already generated these keys, we don't actually need to save these // participations and continue. We solely have to verify them, as to identify malicious // participants and prevent DoSs, before returning - if txn.get(GeneratedKeysDb::key(&session)).is_some() { + if self.key_shares(session).is_some() { info!("already finished generating a key for {:?}", session); match EvrfDkg::::verify( &mut OsRng, generators(), - context::(session, SUBSTRATE_KEY_CONTEXT), + context::

(session, SUBSTRATE_KEY_CONTEXT), threshold, - // Ignores the list of participants who were faulty, as they were prior blamed - &coerce_keys::(&substrate_evrf_public_keys).0, + &substrate_evrf_public_keys, &HashMap::from([(participant, substrate_participation)]), ) .unwrap() @@ -434,13 +314,12 @@ impl KeyGen { } } - match EvrfDkg::::verify( + match EvrfDkg::::verify( &mut OsRng, generators(), - context::(session, NETWORK_KEY_CONTEXT), + context::

(session, NETWORK_KEY_CONTEXT), threshold, - // Ignores the list of participants who were faulty, as they were prior blamed - &coerce_keys::(&network_evrf_public_keys).0, + &network_evrf_public_keys, &HashMap::from([(participant, network_participation)]), ) .unwrap() @@ -467,17 +346,22 @@ impl KeyGen { // Since these are valid `Participation`s, save them let (mut substrate_participations, mut network_participations) = - ParticipationDb::get(txn, &session) - .unwrap_or((HashMap::with_capacity(1), HashMap::with_capacity(1))); + KeyGenDb::

::participations(txn, session).map_or_else( + || (HashMap::with_capacity(1), HashMap::with_capacity(1)), + |p| (p.substrate_participations, p.network_participations), + ); assert!( substrate_participations.insert(participant, substrate_participation).is_none() && network_participations.insert(participant, network_participation).is_none(), "received participation for someone multiple times" ); - ParticipationDb::set( + KeyGenDb::

::set_participations( txn, - &session, - &(substrate_participations.clone(), network_participations.clone()), + session, + &Participations { + substrate_participations: substrate_participations.clone(), + network_participations: network_participations.clone(), + }, ); // This block is taken from the eVRF DKG itself to evaluate the amount participating @@ -510,12 +394,12 @@ impl KeyGen { } // If we now have the threshold participating, verify their `Participation`s - fn verify_dkg( + fn verify_dkg( txn: &mut impl DbTxn, session: Session, true_if_substrate_false_if_network: bool, threshold: u16, - evrf_public_keys: &[impl AsRef<[u8]>], + evrf_public_keys: &[::G], substrate_participations: &mut HashMap>, network_participations: &mut HashMap>, ) -> Result, Vec> { @@ -542,7 +426,7 @@ impl KeyGen { match EvrfDkg::::verify( &mut OsRng, generators(), - context::( + context::

( session, if true_if_substrate_false_if_network { SUBSTRATE_KEY_CONTEXT @@ -551,8 +435,7 @@ impl KeyGen { }, ), threshold, - // Ignores the list of participants who were faulty, as they were prior blamed - &coerce_keys::(evrf_public_keys).0, + evrf_public_keys, &participations, ) .unwrap() @@ -570,10 +453,13 @@ impl KeyGen { blames.push(ProcessorMessage::Blame { session, participant }); } // Since we removed `Participation`s, write the updated versions to the database - ParticipationDb::set( + KeyGenDb::

::set_participations( txn, - &session, - &(substrate_participations.clone(), network_participations.clone()), + session, + &Participations { + substrate_participations: substrate_participations.clone(), + network_participations: network_participations.clone(), + }, ); Err(blames)? } @@ -586,7 +472,7 @@ impl KeyGen { } } - let substrate_dkg = match verify_dkg::( + let substrate_dkg = match verify_dkg::( txn, session, true, @@ -601,7 +487,7 @@ impl KeyGen { Err(blames) => return blames, }; - let network_dkg = match verify_dkg::( + let network_dkg = match verify_dkg::( txn, session, false, @@ -623,38 +509,17 @@ impl KeyGen { let mut network_keys = network_dkg.keys(&self.network_evrf_private_key); // Tweak the keys for the network for network_keys in &mut network_keys { - N::tweak_keys(network_keys); + P::tweak_keys(network_keys); } - GeneratedKeysDb::save_keys::(txn, &session, &substrate_keys, &network_keys); + KeyGenDb::

::set_key_shares(txn, session, &substrate_keys, &network_keys); // Since no one we verified was invalid, and we had the threshold, yield the new keys vec![ProcessorMessage::GeneratedKeyPair { session, substrate_key: substrate_keys[0].group_key().to_bytes(), - // TODO: This can be made more efficient since tweaked keys may be a subset of keys - network_key: network_keys[0].group_key().to_bytes().as_ref().to_vec(), + network_key: P::encode_key(network_keys[0].group_key()), }] } } } - - // This should only be called if we're participating, hence taking our instance - #[allow(clippy::unused_self)] - pub fn confirm( - &mut self, - txn: &mut D::Transaction<'_>, - session: Session, - key_pair: &KeyPair, - ) -> KeyConfirmed { - info!( - "Confirmed key pair {} {} for {:?}", - hex::encode(key_pair.0), - hex::encode(&key_pair.1), - session, - ); - - let (substrate_keys, network_keys) = KeysDb::confirm_keys::(txn, session, key_pair); - - KeyConfirmed { substrate_keys, network_keys } - } } diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 19f67508b..bbff33f6a 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -6,7 +6,7 @@ pub use plan::*; mod db; pub(crate) use db::*; -mod key_gen; +use serai_processor_key_gen as key_gen; pub mod networks; pub(crate) mod multisigs; diff --git a/processor/src/main.rs b/processor/src/main.rs index 2d05ad4dc..49406aaf3 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -48,7 +48,7 @@ pub use db::*; mod coordinator; pub use coordinator::*; -mod key_gen; +use serai_processor_key_gen as key_gen; use key_gen::{SessionDb, KeyConfirmed, KeyGen}; mod signer; From 914184c07fef9978617766cf37eba0a50eb5c9f0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 18 Aug 2024 22:43:13 -0400 Subject: [PATCH 003/179] Smash out the signer Abstract, to be done for the transactions, the batches, the cosigns, the slash reports, everything. It has a minimal API itself, intending to be as clear as possible. --- .github/workflows/tests.yml | 1 + Cargo.lock | 13 + Cargo.toml | 1 + deny.toml | 1 + processor/LICENSE | 2 +- processor/frost-attempt-manager/Cargo.toml | 29 ++ processor/frost-attempt-manager/LICENSE | 15 ++ processor/frost-attempt-manager/README.md | 6 + .../frost-attempt-manager/src/individual.rs | 251 ++++++++++++++++++ processor/frost-attempt-manager/src/lib.rs | 92 +++++++ processor/key-gen/src/lib.rs | 8 +- processor/messages/src/lib.rs | 62 ++--- 12 files changed, 442 insertions(+), 39 deletions(-) create mode 100644 processor/frost-attempt-manager/Cargo.toml create mode 100644 processor/frost-attempt-manager/LICENSE create mode 100644 processor/frost-attempt-manager/README.md create mode 100644 processor/frost-attempt-manager/src/individual.rs create mode 100644 processor/frost-attempt-manager/src/lib.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fabfaba9d..5aa3d234f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -40,6 +40,7 @@ jobs: -p serai-message-queue \ -p serai-processor-messages \ -p serai-processor-key-gen \ + -p serai-processor-frost-attempt-manager \ -p serai-processor \ -p tendermint-machine \ -p tributary-chain \ diff --git a/Cargo.lock b/Cargo.lock index 62952da0d..3de56915f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8599,6 +8599,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-processor-frost-attempt-manager" +version = "0.1.0" +dependencies = [ + "hex", + "log", + "modular-frost", + "rand_core", + "serai-db", + "serai-processor-messages", + "serai-validator-sets-primitives", +] + [[package]] name = "serai-processor-key-gen" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index f0bdd6a89..ddfaf1f20 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,6 +71,7 @@ members = [ "processor/messages", "processor/key-gen", + "processor/frost-attempt-manager", "processor", "coordinator/tributary/tendermint", diff --git a/deny.toml b/deny.toml index 0d82cb8ab..ea61fcc1c 100644 --- a/deny.toml +++ b/deny.toml @@ -47,6 +47,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-messages" }, { allow = ["AGPL-3.0"], name = "serai-processor-key-gen" }, + { allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" }, { allow = ["AGPL-3.0"], name = "serai-processor" }, { allow = ["AGPL-3.0"], name = "tributary-chain" }, diff --git a/processor/LICENSE b/processor/LICENSE index c425427c8..41d5a2616 100644 --- a/processor/LICENSE +++ b/processor/LICENSE @@ -1,6 +1,6 @@ AGPL-3.0-only license -Copyright (c) 2022-2023 Luke Parker +Copyright (c) 2022-2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as diff --git a/processor/frost-attempt-manager/Cargo.toml b/processor/frost-attempt-manager/Cargo.toml new file mode 100644 index 000000000..7a9abe01c --- /dev/null +++ b/processor/frost-attempt-manager/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "serai-processor-frost-attempt-manager" +version = "0.1.0" +description = "A manager of multiple attempts of FROST signing protocols" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/frost-attempt-manager" +authors = ["Luke Parker "] +keywords = ["frost", "multisig", "threshold"] +edition = "2021" +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } + +frost = { package = "modular-frost", path = "../../crypto/frost", version = "^0.8.1", default-features = false } + +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } + +hex = { version = "0.4", default-features = false, features = ["std"] } +log = { version = "0.4", default-features = false, features = ["std"] } +serai-db = { path = "../../common/db" } +messages = { package = "serai-processor-messages", path = "../messages" } diff --git a/processor/frost-attempt-manager/LICENSE b/processor/frost-attempt-manager/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/frost-attempt-manager/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/frost-attempt-manager/README.md b/processor/frost-attempt-manager/README.md new file mode 100644 index 000000000..c7b0be259 --- /dev/null +++ b/processor/frost-attempt-manager/README.md @@ -0,0 +1,6 @@ +# FROST Attempt Manager + +A library for helper structures to manage various attempts of a FROST signing +protocol. + +This library is interacted with via the `serai-processor-messages::sign` API. diff --git a/processor/frost-attempt-manager/src/individual.rs b/processor/frost-attempt-manager/src/individual.rs new file mode 100644 index 000000000..f64ad4537 --- /dev/null +++ b/processor/frost-attempt-manager/src/individual.rs @@ -0,0 +1,251 @@ +use std::collections::HashMap; + +use rand_core::OsRng; + +use frost::{ + Participant, FrostError, + sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, +}; + +use serai_validator_sets_primitives::Session; + +use messages::sign::{SignId, ProcessorMessage}; + +/// An instance of a signing protocol with re-attempts handled internally. +#[allow(clippy::type_complexity)] +pub(crate) struct SigningProtocol { + // The session this signing protocol is being conducted by. + session: Session, + // The `i` of our first, or starting, set of key shares we will be signing with. + // The key shares we sign with are expected to be continguous from this position. + start_i: Participant, + // The ID of this signing protocol. + id: [u8; 32], + // This accepts a vector of `root` machines in order to support signing with multiple key shares. + root: Vec, + preprocessed: HashMap, HashMap>)>, + // Here, we drop to a single machine as we only need one to complete the signature. + shared: HashMap< + u32, + ( + >::SignatureMachine, + HashMap>, + ), + >, +} + +impl SigningProtocol { + /// Create a new signing protocol. + pub(crate) fn new(session: Session, start_i: Participant, id: [u8; 32], root: Vec) -> Self { + log::info!("starting signing protocol {}", hex::encode(id)); + + Self { + session, + start_i, + id, + root, + preprocessed: HashMap::with_capacity(1), + shared: HashMap::with_capacity(1), + } + } + + /// Start a new attempt of the signing protocol. + /// + /// Returns the (serialized) preprocesses for the attempt. + pub(crate) fn attempt(&mut self, attempt: u32) -> Vec { + /* + We'd get slashed as malicious if we: + 1) Preprocessed + 2) Rebooted + 3) On reboot, preprocessed again, sending new preprocesses which would be deduplicated by + the message-queue + 4) Got sent preprocesses + 5) Sent a share based on our new preprocesses, yet with everyone else expecting it to be + based on our old preprocesses + + We avoid this by saving to the DB we preprocessed before sending our preprocessed, and only + keeping our preprocesses for this instance of the processor. Accordingly, on reboot, we will + flag the prior preprocess and not send new preprocesses. + + We also won't send the share we were supposed to, unfortunately, yet caching/reloading the + preprocess has enough safety issues it isn't worth the headache. + */ + // TODO + + log::debug!("attemting a new instance of signing protocol {}", hex::encode(self.id)); + + let mut our_preprocesses = HashMap::with_capacity(self.root.len()); + let mut preprocessed = Vec::with_capacity(self.root.len()); + let mut preprocesses = Vec::with_capacity(self.root.len()); + for (i, machine) in self.root.iter().enumerate() { + let (machine, preprocess) = machine.clone().preprocess(&mut OsRng); + preprocessed.push(machine); + + let mut this_preprocess = Vec::with_capacity(64); + preprocess.write(&mut this_preprocess).unwrap(); + + our_preprocesses.insert( + Participant::new( + u16::from(self.start_i) + u16::try_from(i).expect("signing with 2**16 machines"), + ) + .expect("start_i + i exceeded the valid indexes for a Participant"), + this_preprocess.clone(), + ); + preprocesses.push(this_preprocess); + } + assert!(self.preprocessed.insert(attempt, (preprocessed, our_preprocesses)).is_none()); + + vec![ProcessorMessage::Preprocesses { + id: SignId { session: self.session, id: self.id, attempt }, + preprocesses, + }] + } + + /// Handle preprocesses for the signing protocol. + /// + /// Returns the (serialized) shares for the attempt. + pub(crate) fn preprocesses( + &mut self, + attempt: u32, + serialized_preprocesses: HashMap>, + ) -> Vec { + log::debug!("handling preprocesses for signing protocol {}", hex::encode(self.id)); + + let Some((machines, our_serialized_preprocesses)) = self.preprocessed.remove(&attempt) else { + return vec![]; + }; + + let mut msgs = Vec::with_capacity(1); + + let mut preprocesses = + HashMap::with_capacity(serialized_preprocesses.len() + our_serialized_preprocesses.len()); + for (i, serialized_preprocess) in + serialized_preprocesses.into_iter().chain(our_serialized_preprocesses) + { + let mut serialized_preprocess = serialized_preprocess.as_slice(); + let Ok(preprocess) = machines[0].read_preprocess(&mut serialized_preprocess) else { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + }; + if !serialized_preprocess.is_empty() { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + } + preprocesses.insert(i, preprocess); + } + // We throw out our preprocessed machines here, despite the fact they haven't been invalidated + // We could reuse them with a new set of valid preprocesses + // https://github.com/serai-dex/serai/issues/588 + if !msgs.is_empty() { + return msgs; + } + + let mut our_shares = HashMap::with_capacity(self.root.len()); + let mut shared = Vec::with_capacity(machines.len()); + let mut shares = Vec::with_capacity(machines.len()); + for (i, machine) in machines.into_iter().enumerate() { + let i = Participant::new( + u16::from(self.start_i) + u16::try_from(i).expect("signing with 2**16 machines"), + ) + .expect("start_i + i exceeded the valid indexes for a Participant"); + + let mut preprocesses = preprocesses.clone(); + assert!(preprocesses.remove(&i).is_some()); + + // TODO: Replace this with `()`, which requires making the message type part of the trait + let (machine, share) = match machine.sign(preprocesses, &[]) { + Ok((machine, share)) => (machine, share), + Err(e) => match e { + FrostError::InternalError(_) | + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) | + FrostError::InvalidShare(_) => { + panic!("FROST had an error which shouldn't be reachable: {e:?}"); + } + FrostError::InvalidPreprocess(i) => { + msgs + .push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + return msgs; + } + }, + }; + shared.push(machine); + + let mut this_share = Vec::with_capacity(32); + share.write(&mut this_share).unwrap(); + + our_shares.insert(i, this_share.clone()); + shares.push(this_share); + } + + assert!(self.shared.insert(attempt, (shared.swap_remove(0), our_shares)).is_none()); + log::debug!( + "successfully handled preprocesses for signing protocol {}, sending shares", + hex::encode(self.id) + ); + msgs.push(ProcessorMessage::Shares { + id: SignId { session: self.session, id: self.id, attempt }, + shares, + }); + msgs + } + + /// Process shares for the signing protocol. + /// + /// Returns the signature produced by the protocol. + pub(crate) fn shares( + &mut self, + attempt: u32, + serialized_shares: HashMap>, + ) -> Result> { + log::debug!("handling shares for signing protocol {}", hex::encode(self.id)); + + let Some((machine, our_serialized_shares)) = self.shared.remove(&attempt) else { Err(vec![])? }; + + let mut msgs = Vec::with_capacity(1); + + let mut shares = HashMap::with_capacity(serialized_shares.len() + our_serialized_shares.len()); + for (i, serialized_share) in our_serialized_shares.into_iter().chain(serialized_shares) { + let mut serialized_share = serialized_share.as_slice(); + let Ok(share) = machine.read_share(&mut serialized_share) else { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + }; + if !serialized_share.is_empty() { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + } + shares.insert(i, share); + } + if !msgs.is_empty() { + Err(msgs)?; + } + + assert!(shares.remove(&self.start_i).is_some()); + + let signature = match machine.complete(shares) { + Ok(signature) => signature, + Err(e) => match e { + FrostError::InternalError(_) | + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) | + FrostError::InvalidPreprocess(_) => { + panic!("FROST had an error which shouldn't be reachable: {e:?}"); + } + FrostError::InvalidShare(i) => { + Err(vec![ProcessorMessage::InvalidParticipant { session: self.session, participant: i }])? + } + }, + }; + + log::info!("finished signing for protocol {}", hex::encode(self.id)); + + Ok(signature) + } +} diff --git a/processor/frost-attempt-manager/src/lib.rs b/processor/frost-attempt-manager/src/lib.rs new file mode 100644 index 000000000..e7e51d30d --- /dev/null +++ b/processor/frost-attempt-manager/src/lib.rs @@ -0,0 +1,92 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::collections::HashMap; + +use frost::{Participant, sign::PreprocessMachine}; + +use serai_validator_sets_primitives::Session; + +use messages::sign::{ProcessorMessage, CoordinatorMessage}; + +mod individual; +use individual::SigningProtocol; + +/// A response to handling a message from the coordinator. +pub enum Response { + /// Messages to send to the coordinator. + Messages(Vec), + /// A produced signature. + Signature(M::Signature), +} + +/// A manager of attempts for a variety of signing protocols. +pub struct AttemptManager { + session: Session, + start_i: Participant, + active: HashMap<[u8; 32], SigningProtocol>, +} + +impl AttemptManager { + /// Create a new attempt manager. + pub fn new(session: Session, start_i: Participant) -> Self { + AttemptManager { session, start_i, active: HashMap::new() } + } + + /// Register a signing protocol to attempt. + pub fn register(&mut self, id: [u8; 32], machines: Vec) { + self.active.insert(id, SigningProtocol::new(self.session, self.start_i, id, machines)); + } + + /// Retire a signing protocol. + /// + /// This frees all memory used for it and means no further messages will be handled for it. + /// This does not stop the protocol from being re-registered and further worked on (with + /// undefined behavior) then. The higher-level context must never call `register` again with this + /// ID. + // TODO: Also have the DB for this SigningProtocol cleaned up here. + pub fn retire(&mut self, id: [u8; 32]) { + log::info!("retiring signing protocol {}", hex::encode(id)); + self.active.remove(&id); + } + + /// Handle a message for a signing protocol. + pub fn handle(&mut self, msg: CoordinatorMessage) -> Response { + match msg { + CoordinatorMessage::Preprocesses { id, preprocesses } => { + let Some(protocol) = self.active.get_mut(&id.id) else { + log::trace!( + "handling preprocesses for signing protocol {}, which we're not actively running", + hex::encode(id.id) + ); + return Response::Messages(vec![]); + }; + Response::Messages(protocol.preprocesses(id.attempt, preprocesses)) + } + CoordinatorMessage::Shares { id, shares } => { + let Some(protocol) = self.active.get_mut(&id.id) else { + log::trace!( + "handling shares for signing protocol {}, which we're not actively running", + hex::encode(id.id) + ); + return Response::Messages(vec![]); + }; + match protocol.shares(id.attempt, shares) { + Ok(signature) => Response::Signature(signature), + Err(messages) => Response::Messages(messages), + } + } + CoordinatorMessage::Reattempt { id } => { + let Some(protocol) = self.active.get_mut(&id.id) else { + log::trace!( + "reattempting signing protocol {}, which we're not actively running", + hex::encode(id.id) + ); + return Response::Messages(vec![]); + }; + Response::Messages(protocol.attempt(id.attempt)) + } + } + } +} diff --git a/processor/key-gen/src/lib.rs b/processor/key-gen/src/lib.rs index 8d4e911fb..3d8c35526 100644 --- a/processor/key-gen/src/lib.rs +++ b/processor/key-gen/src/lib.rs @@ -17,8 +17,6 @@ use ciphersuite::{ }; use dkg::{Participant, ThresholdKeys, evrf::*}; -use log::info; - use serai_validator_sets_primitives::Session; use messages::key_gen::*; @@ -184,7 +182,7 @@ impl KeyGen { match msg { CoordinatorMessage::GenerateKey { session, threshold, evrf_public_keys } => { - info!("Generating new key. Session: {session:?}"); + log::info!("Generating new key. Session: {session:?}"); // Unzip the vector of eVRF keys let substrate_evrf_public_keys = @@ -260,7 +258,7 @@ impl KeyGen { } CoordinatorMessage::Participation { session, participant, participation } => { - info!("received participation from {:?} for {:?}", participant, session); + log::info!("received participation from {:?} for {:?}", participant, session); let Params { t: threshold, n, substrate_evrf_public_keys, network_evrf_public_keys } = KeyGenDb::

::params(txn, session).unwrap(); @@ -295,7 +293,7 @@ impl KeyGen { // participations and continue. We solely have to verify them, as to identify malicious // participants and prevent DoSs, before returning if self.key_shares(session).is_some() { - info!("already finished generating a key for {:?}", session); + log::info!("already finished generating a key for {:?}", session); match EvrfDkg::::verify( &mut OsRng, diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 98af97ce7..096fddb9e 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -22,7 +22,6 @@ pub mod key_gen { #[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { // Instructs the Processor to begin the key generation process. - // TODO: Should this be moved under Substrate? GenerateKey { session: Session, threshold: u16, evrf_public_keys: Vec<([u8; 32], Vec)> }, // Received participations for the specified key generation protocol. Participation { session: Session, participant: Participant, participation: Vec }, @@ -93,6 +92,8 @@ pub mod sign { pub attempt: u32, } + // TODO: Make this generic to the ID once we introduce topics into the message-queue and remove + // the global ProcessorMessage/CoordinatorMessage #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { // Received preprocesses for the specified signing protocol. @@ -101,8 +102,10 @@ pub mod sign { Shares { id: SignId, shares: HashMap> }, // Re-attempt a signing protocol. Reattempt { id: SignId }, + /* TODO // Completed a signing protocol already. Completed { session: Session, id: [u8; 32], tx: Vec }, + */ } impl CoordinatorMessage { @@ -114,8 +117,8 @@ pub mod sign { match self { CoordinatorMessage::Preprocesses { id, .. } | CoordinatorMessage::Shares { id, .. } | - CoordinatorMessage::Reattempt { id } => id.session, - CoordinatorMessage::Completed { session, .. } => *session, + CoordinatorMessage::Reattempt { id, .. } => id.session, + // TODO CoordinatorMessage::Completed { session, .. } => *session, } } } @@ -123,13 +126,13 @@ pub mod sign { #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { // Participant sent an invalid message during the sign protocol. - InvalidParticipant { id: SignId, participant: Participant }, - // Created preprocess for the specified signing protocol. - Preprocess { id: SignId, preprocesses: Vec> }, - // Signed share for the specified signing protocol. - Share { id: SignId, shares: Vec> }, + InvalidParticipant { session: Session, participant: Participant }, + // Created preprocesses for the specified signing protocol. + Preprocesses { id: SignId, preprocesses: Vec> }, + // Signed shares for the specified signing protocol. + Shares { id: SignId, shares: Vec> }, // Completed a signing protocol already. - Completed { session: Session, id: [u8; 32], tx: Vec }, + // TODO Completed { session: Session, id: [u8; 32], tx: Vec }, } } @@ -165,10 +168,6 @@ pub mod coordinator { pub enum CoordinatorMessage { CosignSubstrateBlock { id: SubstrateSignId, block_number: u64 }, SignSlashReport { id: SubstrateSignId, report: Vec<([u8; 32], u32)> }, - SubstratePreprocesses { id: SubstrateSignId, preprocesses: HashMap }, - SubstrateShares { id: SubstrateSignId, shares: HashMap }, - // Re-attempt a batch signing protocol. - BatchReattempt { id: SubstrateSignId }, } impl CoordinatorMessage { @@ -192,9 +191,9 @@ pub mod coordinator { SubstrateBlockAck { block: u64, plans: Vec }, InvalidParticipant { id: SubstrateSignId, participant: Participant }, CosignPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> }, + // TODO: Remove BatchPreprocess? Why does this take a BlockHash here and not in its + // SubstrateSignId? BatchPreprocess { id: SubstrateSignId, block: BlockHash, preprocesses: Vec<[u8; 64]> }, - SlashReportPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> }, - SubstrateShare { id: SubstrateSignId, shares: Vec<[u8; 32]> }, // TODO: Make these signatures [u8; 64]? CosignedBlock { block_number: u64, block: [u8; 32], signature: Vec }, SignedSlashReport { session: Session, signature: Vec }, @@ -327,19 +326,19 @@ impl CoordinatorMessage { } CoordinatorMessage::Sign(msg) => { let (sub, id) = match msg { - // Unique since SignId includes a hash of the network, and specific transaction info - sign::CoordinatorMessage::Preprocesses { id, .. } => (0, id.encode()), - sign::CoordinatorMessage::Shares { id, .. } => (1, id.encode()), - sign::CoordinatorMessage::Reattempt { id } => (2, id.encode()), + // Unique since SignId + sign::CoordinatorMessage::Preprocesses { id, .. } => (0, id), + sign::CoordinatorMessage::Shares { id, .. } => (1, id), + sign::CoordinatorMessage::Reattempt { id, .. } => (2, id), // The coordinator should report all reported completions to the processor // Accordingly, the intent is a combination of plan ID and actual TX // While transaction alone may suffice, that doesn't cover cross-chain TX ID conflicts, // which are possible - sign::CoordinatorMessage::Completed { id, tx, .. } => (3, (id, tx).encode()), + // TODO sign::CoordinatorMessage::Completed { id, tx, .. } => (3, (id, tx).encode()), }; let mut res = vec![COORDINATOR_UID, TYPE_SIGN_UID, sub]; - res.extend(&id); + res.extend(id.encode()); res } CoordinatorMessage::Coordinator(msg) => { @@ -349,10 +348,6 @@ impl CoordinatorMessage { // Unique since there's only one of these per session/attempt, and ID is inclusive to // both coordinator::CoordinatorMessage::SignSlashReport { id, .. } => (1, id.encode()), - // Unique since this embeds the batch ID (including its network) and attempt - coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. } => (2, id.encode()), - coordinator::CoordinatorMessage::SubstrateShares { id, .. } => (3, id.encode()), - coordinator::CoordinatorMessage::BatchReattempt { id, .. } => (4, id.encode()), }; let mut res = vec![COORDINATOR_UID, TYPE_COORDINATOR_UID, sub]; @@ -404,12 +399,15 @@ impl ProcessorMessage { } ProcessorMessage::Sign(msg) => { let (sub, id) = match msg { + // Unique since we'll only fatally slash a a participant once + sign::ProcessorMessage::InvalidParticipant { session, participant } => { + (0, (session, u16::from(*participant)).encode()) + } // Unique since SignId - sign::ProcessorMessage::InvalidParticipant { id, .. } => (0, id.encode()), - sign::ProcessorMessage::Preprocess { id, .. } => (1, id.encode()), - sign::ProcessorMessage::Share { id, .. } => (2, id.encode()), + sign::ProcessorMessage::Preprocesses { id, .. } => (1, id.encode()), + sign::ProcessorMessage::Shares { id, .. } => (2, id.encode()), // Unique since a processor will only sign a TX once - sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()), + // TODO sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()), }; let mut res = vec![PROCESSOR_UID, TYPE_SIGN_UID, sub]; @@ -423,11 +421,9 @@ impl ProcessorMessage { coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()), coordinator::ProcessorMessage::CosignPreprocess { id, .. } => (2, id.encode()), coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (3, id.encode()), - coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } => (4, id.encode()), - coordinator::ProcessorMessage::SubstrateShare { id, .. } => (5, id.encode()), // Unique since only one instance of a signature matters - coordinator::ProcessorMessage::CosignedBlock { block, .. } => (6, block.encode()), - coordinator::ProcessorMessage::SignedSlashReport { .. } => (7, vec![]), + coordinator::ProcessorMessage::CosignedBlock { block, .. } => (4, block.encode()), + coordinator::ProcessorMessage::SignedSlashReport { .. } => (5, vec![]), }; let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub]; From 50ad4502028c49fef7e8d7e82938becfc3dee19e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 19 Aug 2024 00:41:18 -0400 Subject: [PATCH 004/179] Cleanup DB handling a bit in key-gen/attempt-manager --- Cargo.lock | 2 + processor/frost-attempt-manager/Cargo.toml | 4 ++ .../frost-attempt-manager/src/individual.rs | 38 +++++++++++++++++-- processor/frost-attempt-manager/src/lib.rs | 31 ++++++++++----- processor/key-gen/src/db.rs | 21 +++++----- processor/key-gen/src/lib.rs | 8 ++-- 6 files changed, 77 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3de56915f..f5e1151d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8603,9 +8603,11 @@ dependencies = [ name = "serai-processor-frost-attempt-manager" version = "0.1.0" dependencies = [ + "borsh", "hex", "log", "modular-frost", + "parity-scale-codec", "rand_core", "serai-db", "serai-processor-messages", diff --git a/processor/frost-attempt-manager/Cargo.toml b/processor/frost-attempt-manager/Cargo.toml index 7a9abe01c..01c1e4c5b 100644 --- a/processor/frost-attempt-manager/Cargo.toml +++ b/processor/frost-attempt-manager/Cargo.toml @@ -25,5 +25,9 @@ serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primi hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serai-db = { path = "../../common/db" } + messages = { package = "serai-processor-messages", path = "../messages" } diff --git a/processor/frost-attempt-manager/src/individual.rs b/processor/frost-attempt-manager/src/individual.rs index f64ad4537..d7f4eec0d 100644 --- a/processor/frost-attempt-manager/src/individual.rs +++ b/processor/frost-attempt-manager/src/individual.rs @@ -9,11 +9,19 @@ use frost::{ use serai_validator_sets_primitives::Session; +use serai_db::{Get, DbTxn, Db, create_db}; use messages::sign::{SignId, ProcessorMessage}; +create_db!( + FrostAttemptManager { + Attempted: (id: [u8; 32]) -> u32, + } +); + /// An instance of a signing protocol with re-attempts handled internally. #[allow(clippy::type_complexity)] -pub(crate) struct SigningProtocol { +pub(crate) struct SigningProtocol { + db: D, // The session this signing protocol is being conducted by. session: Session, // The `i` of our first, or starting, set of key shares we will be signing with. @@ -34,12 +42,19 @@ pub(crate) struct SigningProtocol { >, } -impl SigningProtocol { +impl SigningProtocol { /// Create a new signing protocol. - pub(crate) fn new(session: Session, start_i: Participant, id: [u8; 32], root: Vec) -> Self { + pub(crate) fn new( + db: D, + session: Session, + start_i: Participant, + id: [u8; 32], + root: Vec, + ) -> Self { log::info!("starting signing protocol {}", hex::encode(id)); Self { + db, session, start_i, id, @@ -70,7 +85,15 @@ impl SigningProtocol { We also won't send the share we were supposed to, unfortunately, yet caching/reloading the preprocess has enough safety issues it isn't worth the headache. */ - // TODO + { + let mut txn = self.db.txn(); + let prior_attempted = Attempted::get(&txn, self.id); + if Some(attempt) <= prior_attempted { + return vec![]; + } + Attempted::set(&mut txn, self.id, &attempt); + txn.commit(); + } log::debug!("attemting a new instance of signing protocol {}", hex::encode(self.id)); @@ -248,4 +271,11 @@ impl SigningProtocol { Ok(signature) } + + /// Cleanup the database entries for a specified signing protocol. + pub(crate) fn cleanup(db: &mut D, id: [u8; 32]) { + let mut txn = db.txn(); + Attempted::del(&mut txn, id); + txn.commit(); + } } diff --git a/processor/frost-attempt-manager/src/lib.rs b/processor/frost-attempt-manager/src/lib.rs index e7e51d30d..cd8452fa5 100644 --- a/processor/frost-attempt-manager/src/lib.rs +++ b/processor/frost-attempt-manager/src/lib.rs @@ -8,6 +8,7 @@ use frost::{Participant, sign::PreprocessMachine}; use serai_validator_sets_primitives::Session; +use serai_db::Db; use messages::sign::{ProcessorMessage, CoordinatorMessage}; mod individual; @@ -22,21 +23,28 @@ pub enum Response { } /// A manager of attempts for a variety of signing protocols. -pub struct AttemptManager { +pub struct AttemptManager { + db: D, session: Session, start_i: Participant, - active: HashMap<[u8; 32], SigningProtocol>, + active: HashMap<[u8; 32], SigningProtocol>, } -impl AttemptManager { +impl AttemptManager { /// Create a new attempt manager. - pub fn new(session: Session, start_i: Participant) -> Self { - AttemptManager { session, start_i, active: HashMap::new() } + pub fn new(db: D, session: Session, start_i: Participant) -> Self { + AttemptManager { db, session, start_i, active: HashMap::new() } } /// Register a signing protocol to attempt. - pub fn register(&mut self, id: [u8; 32], machines: Vec) { - self.active.insert(id, SigningProtocol::new(self.session, self.start_i, id, machines)); + /// + /// This ID must be unique across all sessions, attempt managers, protocols, etc. + pub fn register(&mut self, id: [u8; 32], machines: Vec) -> Vec { + let mut protocol = + SigningProtocol::new(self.db.clone(), self.session, self.start_i, id, machines); + let messages = protocol.attempt(0); + self.active.insert(id, protocol); + messages } /// Retire a signing protocol. @@ -45,10 +53,13 @@ impl AttemptManager { /// This does not stop the protocol from being re-registered and further worked on (with /// undefined behavior) then. The higher-level context must never call `register` again with this /// ID. - // TODO: Also have the DB for this SigningProtocol cleaned up here. pub fn retire(&mut self, id: [u8; 32]) { - log::info!("retiring signing protocol {}", hex::encode(id)); - self.active.remove(&id); + if self.active.remove(&id).is_none() { + log::info!("retiring protocol {}, which we didn't register/already retired", hex::encode(id)); + } else { + log::info!("retired signing protocol {}", hex::encode(id)); + } + SigningProtocol::::cleanup(&mut self.db, id); } /// Handle a message for a signing protocol. diff --git a/processor/key-gen/src/db.rs b/processor/key-gen/src/db.rs index d597cb7e4..e82b84a52 100644 --- a/processor/key-gen/src/db.rs +++ b/processor/key-gen/src/db.rs @@ -36,10 +36,10 @@ pub(crate) struct Participations { } create_db!( - KeyGenDb { - ParamsDb: (session: &Session) -> RawParams, - ParticipationsDb: (session: &Session) -> Participations, - KeySharesDb: (session: &Session) -> Vec, + KeyGen { + Params: (session: &Session) -> RawParams, + Participations: (session: &Session) -> Participations, + KeyShares: (session: &Session) -> Vec, } ); @@ -48,7 +48,7 @@ impl KeyGenDb

{ pub(crate) fn set_params(txn: &mut impl DbTxn, session: Session, params: Params

) { assert_eq!(params.substrate_evrf_public_keys.len(), params.network_evrf_public_keys.len()); - ParamsDb::set( + Params::set( txn, &session, &RawParams { @@ -68,7 +68,7 @@ impl KeyGenDb

{ } pub(crate) fn params(getter: &impl Get, session: Session) -> Option> { - ParamsDb::get(getter, &session).map(|params| Params { + Params::get(getter, &session).map(|params| Params { t: params.t, n: params .network_evrf_public_keys @@ -101,12 +101,13 @@ impl KeyGenDb

{ session: Session, participations: &Participations, ) { - ParticipationsDb::set(txn, &session, participations) + Participations::set(txn, &session, participations) } pub(crate) fn participations(getter: &impl Get, session: Session) -> Option { - ParticipationsDb::get(getter, &session) + Participations::get(getter, &session) } + // Set the key shares for a session. pub(crate) fn set_key_shares( txn: &mut impl DbTxn, session: Session, @@ -120,7 +121,7 @@ impl KeyGenDb

{ keys.extend(substrate_keys.serialize().as_slice()); keys.extend(network_keys.serialize().as_slice()); } - KeySharesDb::set(txn, &session, &keys); + KeyShares::set(txn, &session, &keys); } #[allow(clippy::type_complexity)] @@ -128,7 +129,7 @@ impl KeyGenDb

{ getter: &impl Get, session: Session, ) -> Option<(Vec>, Vec>)> { - let keys = KeySharesDb::get(getter, &session)?; + let keys = KeyShares::get(getter, &session)?; let mut keys: &[u8] = keys.as_ref(); let mut substrate_keys = vec![]; diff --git a/processor/key-gen/src/lib.rs b/processor/key-gen/src/lib.rs index 3d8c35526..607534126 100644 --- a/processor/key-gen/src/lib.rs +++ b/processor/key-gen/src/lib.rs @@ -182,7 +182,7 @@ impl KeyGen { match msg { CoordinatorMessage::GenerateKey { session, threshold, evrf_public_keys } => { - log::info!("Generating new key. Session: {session:?}"); + log::info!("generating new key, session: {session:?}"); // Unzip the vector of eVRF keys let substrate_evrf_public_keys = @@ -258,7 +258,7 @@ impl KeyGen { } CoordinatorMessage::Participation { session, participant, participation } => { - log::info!("received participation from {:?} for {:?}", participant, session); + log::debug!("received participation from {:?} for {:?}", participant, session); let Params { t: threshold, n, substrate_evrf_public_keys, network_evrf_public_keys } = KeyGenDb::

::params(txn, session).unwrap(); @@ -293,7 +293,7 @@ impl KeyGen { // participations and continue. We solely have to verify them, as to identify malicious // participants and prevent DoSs, before returning if self.key_shares(session).is_some() { - log::info!("already finished generating a key for {:?}", session); + log::debug!("already finished generating a key for {:?}", session); match EvrfDkg::::verify( &mut OsRng, @@ -511,6 +511,8 @@ impl KeyGen { } KeyGenDb::

::set_key_shares(txn, session, &substrate_keys, &network_keys); + log::info!("generated key, session: {session:?}"); + // Since no one we verified was invalid, and we had the threshold, yield the new keys vec![ProcessorMessage::GeneratedKeyPair { session, From 99661aaab222620a1fa4787ef4e033c0e2e251e0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 19 Aug 2024 00:42:38 -0400 Subject: [PATCH 005/179] Move scanner.rs to scanner/lib.rs --- .github/workflows/tests.yml | 1 + Cargo.toml | 1 + processor/scanner/Cargo.toml | 33 +++++++++++++++++++ processor/scanner/LICENSE | 15 +++++++++ processor/scanner/README.md | 12 +++++++ .../scanner.rs => scanner/src/lib.rs} | 0 6 files changed, 62 insertions(+) create mode 100644 processor/scanner/Cargo.toml create mode 100644 processor/scanner/LICENSE create mode 100644 processor/scanner/README.md rename processor/{src/multisigs/scanner.rs => scanner/src/lib.rs} (100%) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5aa3d234f..385d54c41 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -41,6 +41,7 @@ jobs: -p serai-processor-messages \ -p serai-processor-key-gen \ -p serai-processor-frost-attempt-manager \ + -p serai-processor-scanner \ -p serai-processor \ -p tendermint-machine \ -p tributary-chain \ diff --git a/Cargo.toml b/Cargo.toml index ddfaf1f20..8d6d9416b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,6 +72,7 @@ members = [ "processor/messages", "processor/key-gen", "processor/frost-attempt-manager", + "processor/scanner", "processor", "coordinator/tributary/tendermint", diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml new file mode 100644 index 000000000..f3b5ad37c --- /dev/null +++ b/processor/scanner/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "serai-processor-scanner" +version = "0.1.0" +description = "Scanner of abstract blockchains for Serai" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scanner" +authors = ["Luke Parker "] +keywords = ["frost", "multisig", "threshold"] +edition = "2021" +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } + +frost = { package = "modular-frost", path = "../../crypto/frost", version = "^0.8.1", default-features = false } + +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } + +hex = { version = "0.4", default-features = false, features = ["std"] } +log = { version = "0.4", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +serai-db = { path = "../../common/db" } + +messages = { package = "serai-processor-messages", path = "../messages" } diff --git a/processor/scanner/LICENSE b/processor/scanner/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/scanner/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scanner/README.md b/processor/scanner/README.md new file mode 100644 index 000000000..f6c6ccc64 --- /dev/null +++ b/processor/scanner/README.md @@ -0,0 +1,12 @@ +# Scanner + +A scanner of arbitrary blockchains for Serai. + +This scanner has two distinct roles: + +1) Scanning blocks for received outputs contained within them +2) Scanning blocks for the completion of eventualities + +While these can be optimized into a single structure, they are written as two +distinct structures (with the associated overhead) for clarity and simplicity +reasons. diff --git a/processor/src/multisigs/scanner.rs b/processor/scanner/src/lib.rs similarity index 100% rename from processor/src/multisigs/scanner.rs rename to processor/scanner/src/lib.rs From 6bc607b1b6d545652b841de1b0540a690df2cc86 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 19 Aug 2024 17:33:57 -0400 Subject: [PATCH 006/179] Extend serai-db with support for generic keys/values --- common/db/src/create_db.rs | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/common/db/src/create_db.rs b/common/db/src/create_db.rs index abd86e464..7be1e1c86 100644 --- a/common/db/src/create_db.rs +++ b/common/db/src/create_db.rs @@ -38,13 +38,18 @@ pub fn serai_db_key( #[macro_export] macro_rules! create_db { ($db_name: ident { - $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)* + $( + $field_name: ident: + $(<$($generic_name: tt: $generic_type: tt),+>)?( + $($arg: ident: $arg_type: ty),* + ) -> $field_type: ty$(,)? + )* }) => { $( #[derive(Clone, Debug)] pub(crate) struct $field_name; impl $field_name { - pub(crate) fn key($($arg: $arg_type),*) -> Vec { + pub(crate) fn key$(<$($generic_name: $generic_type),+>)?($($arg: $arg_type),*) -> Vec { use scale::Encode; $crate::serai_db_key( stringify!($db_name).as_bytes(), @@ -52,18 +57,31 @@ macro_rules! create_db { ($($arg),*).encode() ) } - pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) { - let key = $field_name::key($($arg),*); + pub(crate) fn set$(<$($generic_name: $generic_type),+>)?( + txn: &mut impl DbTxn + $(, $arg: $arg_type)*, + data: &$field_type + ) { + let key = $field_name::key$(::<$($generic_name),+>)?($($arg),*); txn.put(&key, borsh::to_vec(data).unwrap()); } - pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> { - getter.get($field_name::key($($arg),*)).map(|data| { + pub(crate) fn get$(<$($generic_name: $generic_type),+>)?( + getter: &impl Get, + $($arg: $arg_type),* + ) -> Option<$field_type> { + getter.get($field_name::key$(::<$($generic_name),+>)?($($arg),*)).map(|data| { borsh::from_slice(data.as_ref()).unwrap() }) } + // Returns a PhantomData of all generic types so if the generic was only used in the value, + // not the keys, this doesn't have unused generic types #[allow(dead_code)] - pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) { - txn.del(&$field_name::key($($arg),*)) + pub(crate) fn del$(<$($generic_name: $generic_type),+>)?( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + ) -> core::marker::PhantomData<($($($generic_name),+)?)> { + txn.del(&$field_name::key$(::<$($generic_name),+>)?($($arg),*)); + core::marker::PhantomData } } )* From 090c0c13f4e0b344596c33f73feaf95a9e89166f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 20 Aug 2024 11:57:56 -0400 Subject: [PATCH 007/179] Definition and delineation of tasks within the scanner Also defines primitives for the processor. --- .github/workflows/tests.yml | 1 + Cargo.lock | 28 +++ Cargo.toml | 2 + processor/frost-attempt-manager/Cargo.toml | 3 + processor/primitives/Cargo.toml | 27 +++ processor/primitives/LICENSE | 15 ++ processor/primitives/README.md | 3 + processor/primitives/src/lib.rs | 167 ++++++++++++++++++ processor/scanner/Cargo.toml | 18 +- processor/scanner/src/db.rs | 162 ++++++++++++++++++ processor/scanner/src/eventuality.rs | 0 processor/scanner/src/index.rs | 72 ++++++++ processor/scanner/src/lib.rs | 189 ++++++++++----------- processor/scanner/src/scan.rs | 73 ++++++++ processor/src/multisigs/mod.rs | 2 + 15 files changed, 655 insertions(+), 107 deletions(-) create mode 100644 processor/primitives/Cargo.toml create mode 100644 processor/primitives/LICENSE create mode 100644 processor/primitives/README.md create mode 100644 processor/primitives/src/lib.rs create mode 100644 processor/scanner/src/db.rs create mode 100644 processor/scanner/src/eventuality.rs create mode 100644 processor/scanner/src/index.rs create mode 100644 processor/scanner/src/scan.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 385d54c41..5032676f7 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -41,6 +41,7 @@ jobs: -p serai-processor-messages \ -p serai-processor-key-gen \ -p serai-processor-frost-attempt-manager \ + -p serai-processor-primitives \ -p serai-processor-scanner \ -p serai-processor \ -p tendermint-machine \ diff --git a/Cargo.lock b/Cargo.lock index f5e1151d3..230ed22fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8647,6 +8647,34 @@ dependencies = [ "serai-validator-sets-primitives", ] +[[package]] +name = "serai-processor-primitives" +version = "0.1.0" +dependencies = [ + "async-trait", + "borsh", + "group", + "parity-scale-codec", + "serai-primitives", +] + +[[package]] +name = "serai-processor-scanner" +version = "0.1.0" +dependencies = [ + "async-trait", + "borsh", + "group", + "hex", + "log", + "parity-scale-codec", + "serai-db", + "serai-processor-messages", + "serai-processor-primitives", + "thiserror", + "tokio", +] + [[package]] name = "serai-processor-tests" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 8d6d9416b..7ad08a517 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,6 +72,8 @@ members = [ "processor/messages", "processor/key-gen", "processor/frost-attempt-manager", + + "processor/primitives", "processor/scanner", "processor", diff --git a/processor/frost-attempt-manager/Cargo.toml b/processor/frost-attempt-manager/Cargo.toml index 01c1e4c5b..a01acf0fd 100644 --- a/processor/frost-attempt-manager/Cargo.toml +++ b/processor/frost-attempt-manager/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.79" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[package.metadata.cargo-machete] +ignored = ["borsh", "scale"] + [lints] workspace = true diff --git a/processor/primitives/Cargo.toml b/processor/primitives/Cargo.toml new file mode 100644 index 000000000..dd59c0a8e --- /dev/null +++ b/processor/primitives/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "serai-processor-primitives" +version = "0.1.0" +description = "Primitives for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +async-trait = { version = "0.1", default-features = false } + +group = { version = "0.13", default-features = false } + +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } diff --git a/processor/primitives/LICENSE b/processor/primitives/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/primitives/README.md b/processor/primitives/README.md new file mode 100644 index 000000000..d616993cb --- /dev/null +++ b/processor/primitives/README.md @@ -0,0 +1,3 @@ +# Primitives + +Primitive types/traits/structs used by the Processor. diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs new file mode 100644 index 000000000..535dd14f7 --- /dev/null +++ b/processor/primitives/src/lib.rs @@ -0,0 +1,167 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::fmt::Debug; +use std::io; + +use group::GroupEncoding; + +use serai_primitives::Balance; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +/// An ID for an output/transaction/block/etc. +/// +/// IDs don't need to implement `Copy`, enabling `[u8; 33]`, `[u8; 64]` to be used. IDs are still +/// bound to being of a constant-size, where `Default::default()` returns an instance of such size +/// (making `Vec` invalid as an `Id`). +pub trait Id: + Send + + Sync + + Clone + + Default + + PartialEq + + AsRef<[u8]> + + AsMut<[u8]> + + Debug + + Encode + + Decode + + BorshSerialize + + BorshDeserialize +{ +} +impl Id for [u8; N] where [u8; N]: Default {} + +/// The type of the output. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum OutputType { + /// An output received to the address external payments use. + /// + /// This is reported to Substrate in a `Batch`. + External, + + /// A branch output. + /// + /// Given a known output set, and a known series of outbound transactions, we should be able to + /// form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs + /// in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, + /// say S[1], build off S[0], we need to observe when S[0] is included on-chain. + /// + /// We cannot. + /// + /// Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to + /// create S[0], and the actual payment info behind it, we cannot observe it on the blockchain + /// unless we participated in creating it. Locking the entire schedule, when we cannot sign for + /// the entire schedule at once, to a single signing set isn't feasible. + /// + /// While any member of the active signing set can provide data enabling other signers to + /// participate, it's several KB of data which we then have to code communication for. + /// The other option is to simply not observe S[0]. Instead, observe a TX with an identical + /// output to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a + /// malicious actor, has sent us a forged TX which is... equally as usable? So who cares? + /// + /// The only issue is if we have multiple outputs on-chain with identical amounts and purposes. + /// Accordingly, when the scheduler makes a plan for when a specific output is available, it + /// shouldn't set that plan. It should *push* that plan to a queue of plans to perform when + /// instances of that output occur. + Branch, + + /// A change output. + /// + /// This should be added to the available UTXO pool with no further action taken. It does not + /// need to be reported (though we do still need synchrony on the block it's in). There's no + /// explicit expectation for the usage of this output at time of recipience. + Change, + + /// A forwarded output from the prior multisig. + /// + /// This is distinguished for technical reasons around detecting when a multisig should be + /// retired. + Forwarded, +} + +impl OutputType { + fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&[match self { + OutputType::External => 0, + OutputType::Branch => 1, + OutputType::Change => 2, + OutputType::Forwarded => 3, + }]) + } + + fn read(reader: &mut R) -> io::Result { + let mut byte = [0; 1]; + reader.read_exact(&mut byte)?; + Ok(match byte[0] { + 0 => OutputType::External, + 1 => OutputType::Branch, + 2 => OutputType::Change, + 3 => OutputType::Forwarded, + _ => Err(io::Error::other("invalid OutputType"))?, + }) + } +} + +/// A received output. +pub trait ReceivedOutput: + Send + Sync + Sized + Clone + PartialEq + Eq + Debug +{ + /// The type used to identify this output. + type Id: 'static + Id; + + /// The type of this output. + fn kind(&self) -> OutputType; + + /// The ID of this output. + fn id(&self) -> Self::Id; + /// The key this output was received by. + fn key(&self) -> K; + + /// The presumed origin for this output. + /// + /// This is used as the address to refund coins to if we can't handle the output as desired + /// (unless overridden). + fn presumed_origin(&self) -> Option; + + /// The balance associated with this output. + fn balance(&self) -> Balance; + /// The arbitrary data (presumably an InInstruction) associated with this output. + fn data(&self) -> &[u8]; + + /// Write this output. + fn write(&self, writer: &mut W) -> io::Result<()>; + /// Read an output. + fn read(reader: &mut R) -> io::Result; +} + +/// A block from an external network. +#[async_trait::async_trait] +pub trait Block: Send + Sync + Sized + Clone + Debug { + /// The type used to identify blocks. + type Id: 'static + Id; + /// The ID of this block. + fn id(&self) -> Self::Id; + /// The ID of the parent block. + fn parent(&self) -> Self::Id; +} + +/// A wrapper for a group element which implements the borsh traits. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct BorshG(pub G); +impl BorshSerialize for BorshG { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + writer.write_all(self.0.to_bytes().as_ref()) + } +} +impl BorshDeserialize for BorshG { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let mut repr = G::Repr::default(); + reader.read_exact(repr.as_mut())?; + Ok(Self( + Option::::from(G::from_bytes(&repr)).ok_or(borsh::io::Error::other("invalid point"))?, + )) + } +} diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index f3b5ad37c..670581d93 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -17,17 +17,23 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } +# Macros +async-trait = { version = "0.1", default-features = false } +thiserror = { version = "1", default-features = false } -frost = { package = "modular-frost", path = "../../crypto/frost", version = "^0.8.1", default-features = false } +# Encoders +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } +# Cryptography +group = { version = "0.13", default-features = false } -hex = { version = "0.4", default-features = false, features = ["std"] } +# Application log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } -borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serai-db = { path = "../../common/db" } messages = { package = "serai-processor-messages", path = "../messages" } +primitives = { package = "serai-processor-primitives", path = "../primitives" } diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs new file mode 100644 index 000000000..8bd7d9447 --- /dev/null +++ b/processor/scanner/src/db.rs @@ -0,0 +1,162 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db}; + +use primitives::{Id, Block, BorshG}; + +use crate::ScannerFeed; + +// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. +trait Borshy: BorshSerialize + BorshDeserialize {} +impl Borshy for T {} + +#[derive(BorshSerialize, BorshDeserialize)] +struct SeraiKey { + activation_block_number: u64, + retirement_block_number: Option, + key: K, +} + +create_db!( + Scanner { + BlockId: (number: u64) -> I, + BlockNumber: (id: I) -> u64, + + ActiveKeys: () -> Vec>, + + // The latest finalized block to appear of a blockchain + LatestFinalizedBlock: () -> u64, + // The latest block which it's safe to scan (dependent on what Serai has acknowledged scanning) + LatestScannableBlock: () -> u64, + // The next block to scan for received outputs + NextToScanForOutputsBlock: () -> u64, + // The next block to check for resolving eventualities + NextToCheckForEventualitiesBlock: () -> u64, + + // If a block was notable + /* + A block is notable if one of three conditions are met: + + 1) We activated a key within this block. + 2) We retired a key within this block. + 3) We received outputs within this block. + + The first two conditions, and the reasoning for them, is extensively documented in + `spec/processor/Multisig Rotation.md`. The third is obvious (as any block we receive outputs + in needs synchrony so that we can spend the received outputs). + + We save if a block is notable here by either the scan for received outputs task or the + check for eventuality completion task. Once a block has been processed by both, the reporting + task will report any notable blocks. Finally, the task which sets the block safe to scan to + makes its decision based on the notable blocks and the acknowledged blocks. + */ + // This collapses from `bool` to `()`, using if the value was set for true and false otherwise + NotableBlock: (number: u64) -> (), + } +); + +pub(crate) struct ScannerDb(PhantomData); +impl ScannerDb { + pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: ::Id) { + BlockId::set(txn, number, &id); + BlockNumber::set(txn, id, &number); + } + pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option<::Id> { + BlockId::get(getter, number) + } + pub(crate) fn block_number(getter: &impl Get, id: ::Id) -> Option { + BlockNumber::get(getter, id) + } + + // activation_block_number is inclusive, so the key will be scanned for starting at the specified + // block + pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: S::Key) { + let mut keys: Vec>> = ActiveKeys::get(txn).unwrap_or(vec![]); + for key_i in &keys { + if key == key_i.key.0 { + panic!("queueing a key prior queued"); + } + } + keys.push(SeraiKey { + activation_block_number, + retirement_block_number: None, + key: BorshG(key), + }); + ActiveKeys::set(txn, &keys); + } + // retirement_block_number is inclusive, so the key will no longer be scanned for as of the + // specified block + pub(crate) fn retire_key(txn: &mut impl DbTxn, retirement_block_number: u64, key: S::Key) { + let mut keys: Vec>> = + ActiveKeys::get(txn).expect("retiring key yet no active keys"); + + assert!(keys.len() > 1, "retiring our only key"); + for i in 0 .. keys.len() { + if key == keys[i].key.0 { + keys[i].retirement_block_number = Some(retirement_block_number); + ActiveKeys::set(txn, &keys); + return; + } + + // This is not the key in question, but since it's older, it already should've been queued + // for retirement + assert!( + keys[i].retirement_block_number.is_some(), + "older key wasn't retired before newer key" + ); + } + panic!("retiring key yet not present in keys") + } + pub(crate) fn keys(getter: &impl Get) -> Option>>> { + ActiveKeys::get(getter) + } + + pub(crate) fn set_start_block( + txn: &mut impl DbTxn, + start_block: u64, + id: ::Id, + ) { + Self::set_block(txn, start_block, id); + LatestFinalizedBlock::set(txn, &start_block); + LatestScannableBlock::set(txn, &start_block); + NextToScanForOutputsBlock::set(txn, &start_block); + NextToCheckForEventualitiesBlock::set(txn, &start_block); + } + + pub(crate) fn set_latest_finalized_block(txn: &mut impl DbTxn, latest_finalized_block: u64) { + LatestFinalizedBlock::set(txn, &latest_finalized_block); + } + pub(crate) fn latest_finalized_block(getter: &impl Get) -> Option { + LatestFinalizedBlock::get(getter) + } + + pub(crate) fn set_latest_scannable_block(txn: &mut impl DbTxn, latest_scannable_block: u64) { + LatestScannableBlock::set(txn, &latest_scannable_block); + } + pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option { + LatestScannableBlock::get(getter) + } + + pub(crate) fn set_next_to_scan_for_outputs_block( + txn: &mut impl DbTxn, + next_to_scan_for_outputs_block: u64, + ) { + NextToScanForOutputsBlock::set(txn, &next_to_scan_for_outputs_block); + } + pub(crate) fn next_to_scan_for_outputs_block(getter: &impl Get) -> Option { + NextToScanForOutputsBlock::get(getter) + } + + pub(crate) fn set_next_to_check_for_eventualities_block( + txn: &mut impl DbTxn, + next_to_check_for_eventualities_block: u64, + ) { + NextToCheckForEventualitiesBlock::set(txn, &next_to_check_for_eventualities_block); + } + pub(crate) fn next_to_check_for_eventualities_block(getter: &impl Get) -> Option { + NextToCheckForEventualitiesBlock::get(getter) + } +} diff --git a/processor/scanner/src/eventuality.rs b/processor/scanner/src/eventuality.rs new file mode 100644 index 000000000..e69de29bb diff --git a/processor/scanner/src/index.rs b/processor/scanner/src/index.rs new file mode 100644 index 000000000..66477cdbe --- /dev/null +++ b/processor/scanner/src/index.rs @@ -0,0 +1,72 @@ +use serai_db::{Db, DbTxn}; + +use primitives::{Id, Block}; + +// TODO: Localize to IndexDb? +use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; + +/* + This processor should build its own index of the blockchain, yet only for finalized blocks which + are safe to process. For Proof of Work blockchains, which only have probabilistic finality, these + are the set of sufficiently confirmed blocks. For blockchains with finality, these are the + finalized blocks. + + This task finds the finalized blocks, verifies they're continguous, and saves their IDs. +*/ +struct IndexFinalizedTask { + db: D, + feed: S, +} + +#[async_trait::async_trait] +impl ContinuallyRan for IndexFinalizedTask { + async fn run_instance(&mut self) -> Result<(), String> { + // Fetch the latest finalized block + let our_latest_finalized = ScannerDb::::latest_finalized_block(&self.db) + .expect("IndexTask run before writing the start block"); + let latest_finalized = match self.feed.latest_finalized_block_number().await { + Ok(latest_finalized) => latest_finalized, + Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?, + }; + + // Index the hashes of all blocks until the latest finalized block + for b in (our_latest_finalized + 1) ..= latest_finalized { + let block = match self.feed.block_by_number(b).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, + }; + + // Check this descends from our indexed chain + { + let expected_parent = + ScannerDb::::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block"); + if block.parent() != expected_parent { + panic!( + "current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})", + hex::encode(block.parent()), + b - 1, + hex::encode(expected_parent) + ); + } + } + + // Update the latest finalized block + let mut txn = self.db.txn(); + ScannerDb::::set_block(&mut txn, b, block.id()); + ScannerDb::::set_latest_finalized_block(&mut txn, b); + txn.commit(); + } + + Ok(()) + } +} + +/* + The processor can't index the blockchain unilaterally. It needs to develop a totally ordered view + of the blockchain. That requires consensus with other validators on when certain keys are set to + activate (and retire). We solve this by only scanning `n` blocks ahead of the last agreed upon + block, then waiting for Serai to acknowledge the block. This lets us safely schedule events after + this `n` block window (as demonstrated/proven with `mini`). + + TODO +*/ diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 1b25e1086..736a62b96 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -1,25 +1,91 @@ -use core::marker::PhantomData; -use std::{ - sync::Arc, - io::Read, - time::Duration, - collections::{VecDeque, HashSet, HashMap}, -}; - -use ciphersuite::group::GroupEncoding; -use frost::curve::Ciphersuite; - -use log::{info, debug, warn}; -use tokio::{ - sync::{RwLockReadGuard, RwLockWriteGuard, RwLock, mpsc}, - time::sleep, -}; - -use crate::{ - Get, DbTxn, Db, - networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network}, -}; +use core::fmt::Debug; +use primitives::{ReceivedOutput, Block}; + +mod db; +mod index; + +/// A feed usable to scan a blockchain. +/// +/// This defines the primitive types used, along with various getters necessary for indexing. +#[async_trait::async_trait] +pub trait ScannerFeed: Send + Sync { + /// The type of the key used to receive coins on this blockchain. + type Key: group::Group + group::GroupEncoding; + + /// The type of the address used to specify who to send coins to on this blockchain. + type Address; + + /// The type representing a received (and spendable) output. + type Output: ReceivedOutput; + + /// The representation of a block for this blockchain. + /// + /// A block is defined as a consensus event associated with a set of transactions. It is not + /// necessary to literally define it as whatever the external network defines as a block. For + /// external networks which finalize block(s), this block type should be a representation of all + /// transactions within a finalization event. + type Block: Block; + + /// An error encountered when fetching data from the blockchain. + /// + /// This MUST be an ephemeral error. Retrying fetching data from the blockchain MUST eventually + /// resolve without manual intervention. + type EphemeralError: Debug; + + /// Fetch the number of the latest finalized block. + /// + /// The block number is its zero-indexed position within a linear view of the external network's + /// consensus. The genesis block accordingly has block number 0. + async fn latest_finalized_block_number(&self) -> Result; + + /// Fetch a block by its number. + async fn block_by_number(&self, number: u64) -> Result; + + /// Scan a block for its outputs. + async fn scan_for_outputs( + &self, + block: &Self::Block, + key: Self::Key, + ) -> Result; +} + +#[async_trait::async_trait] +pub(crate) trait ContinuallyRan: Sized { + async fn run_instance(&mut self) -> Result<(), String>; + + async fn continually_run(mut self) { + // The default number of seconds to sleep before running the task again + let default_sleep_before_next_task = 5; + // The current number of seconds to sleep before running the task again + // We increment this upon errors in order to not flood the logs with errors + let mut current_sleep_before_next_task = default_sleep_before_next_task; + let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| { + let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task; + // Set a limit of sleeping for two minutes + *current_sleep_before_next_task = new_sleep.max(120); + }; + + loop { + match self.run_instance().await { + Ok(()) => { + // Upon a successful (error-free) loop iteration, reset the amount of time we sleep + current_sleep_before_next_task = default_sleep_before_next_task; + } + Err(e) => { + log::debug!("{}", e); + increase_sleep_before_next_task(&mut current_sleep_before_next_task); + } + } + + // Don't run the task again for another few seconds + // This is at the start of the loop so we can continue without skipping this delay + tokio::time::sleep(core::time::Duration::from_secs(current_sleep_before_next_task)).await; + } + } +} + +/* #[derive(Clone, Debug)] pub enum ScannerEvent { // Block scanned @@ -44,86 +110,6 @@ pub type ScannerEventChannel = mpsc::UnboundedReceiver>; #[derive(Clone, Debug)] struct ScannerDb(PhantomData, PhantomData); impl ScannerDb { - fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { - D::key(b"SCANNER", dst, key) - } - - fn block_key(number: usize) -> Vec { - Self::scanner_key(b"block_id", u64::try_from(number).unwrap().to_le_bytes()) - } - fn block_number_key(id: &>::Id) -> Vec { - Self::scanner_key(b"block_number", id) - } - fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &>::Id) { - txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes()); - txn.put(Self::block_key(number), id); - } - fn block(getter: &G, number: usize) -> Option<>::Id> { - getter.get(Self::block_key(number)).map(|id| { - let mut res = >::Id::default(); - res.as_mut().copy_from_slice(&id); - res - }) - } - fn block_number(getter: &G, id: &>::Id) -> Option { - getter - .get(Self::block_number_key(id)) - .map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap()) - } - - fn keys_key() -> Vec { - Self::scanner_key(b"keys", b"") - } - fn register_key( - txn: &mut D::Transaction<'_>, - activation_number: usize, - key: ::G, - ) { - let mut keys = txn.get(Self::keys_key()).unwrap_or(vec![]); - - let key_bytes = key.to_bytes(); - - let key_len = key_bytes.as_ref().len(); - assert_eq!(keys.len() % (8 + key_len), 0); - - // Sanity check this key isn't already present - let mut i = 0; - while i < keys.len() { - if &keys[(i + 8) .. ((i + 8) + key_len)] == key_bytes.as_ref() { - panic!("adding {} as a key yet it was already present", hex::encode(key_bytes)); - } - i += 8 + key_len; - } - - keys.extend(u64::try_from(activation_number).unwrap().to_le_bytes()); - keys.extend(key_bytes.as_ref()); - txn.put(Self::keys_key(), keys); - } - fn keys(getter: &G) -> Vec<(usize, ::G)> { - let bytes_vec = getter.get(Self::keys_key()).unwrap_or(vec![]); - let mut bytes: &[u8] = bytes_vec.as_ref(); - - // Assumes keys will be 32 bytes when calculating the capacity - // If keys are larger, this may allocate more memory than needed - // If keys are smaller, this may require additional allocations - // Either are fine - let mut res = Vec::with_capacity(bytes.len() / (8 + 32)); - while !bytes.is_empty() { - let mut activation_number = [0; 8]; - bytes.read_exact(&mut activation_number).unwrap(); - let activation_number = u64::from_le_bytes(activation_number).try_into().unwrap(); - - res.push((activation_number, N::Curve::read_G(&mut bytes).unwrap())); - } - res - } - fn retire_key(txn: &mut D::Transaction<'_>) { - let keys = Self::keys(txn); - assert_eq!(keys.len(), 2); - txn.del(Self::keys_key()); - Self::register_key(txn, keys[1].0, keys[1].1); - } - fn seen_key(id: &>::Id) -> Vec { Self::scanner_key(b"seen", id) } @@ -737,3 +723,4 @@ impl Scanner { } } } +*/ diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs new file mode 100644 index 000000000..6f784a7e5 --- /dev/null +++ b/processor/scanner/src/scan.rs @@ -0,0 +1,73 @@ +use serai_db::{Db, DbTxn}; + +use primitives::{Id, Block}; + +// TODO: Localize to ScanDb? +use crate::{db::ScannerDb, ScannerFeed}; + +struct ScanForOutputsTask { + db: D, + feed: S, +} + +#[async_trait::async_trait] +impl ContinuallyRan for ScanForOutputsTask { + async fn run_instance(&mut self) -> Result<(), String> { + // Fetch the safe to scan block + let latest_scannable = ScannerDb::::latest_scannable_block(&self.db).expect("ScanForOutputsTask run before writing the start block"); + // Fetch the next block to scan + let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db).expect("ScanForOutputsTask run before writing the start block"); + + for b in next_to_scan ..= latest_scannable { + let block = match self.feed.block_by_number(b).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, + }; + + // Check the ID of this block is the expected ID + { + let expected = ScannerDb::::block_id(b).expect("scannable block didn't have its ID saved"); + if block.id() != expected { + panic!("finalized chain reorganized from {} to {} at {}", hex::encode(expected), hex::encode(block.id()), b); + } + } + + log::info!("scanning block: {} ({b})", hex::encode(block.id())); + + let keys = ScannerDb::::keys(&self.db).expect("scanning for a blockchain without any keys set"); + // Remove all the retired keys + while let Some(retire_at) = keys[0].retirement_block_number { + if retire_at <= b { + keys.remove(0); + } + } + assert!(keys.len() <= 2); + + // Scan for each key + for key in keys { + // If this key has yet to active, skip it + if key.activation_block_number > b { + continue; + } + + let mut outputs = vec![]; + for output in network.scan_for_outputs(&block, key).awaits { + assert_eq!(output.key(), key); + // TODO: Check for dust + outputs.push(output); + } + } + + let mut txn = self.db.txn(); + // Update the latest scanned block + ScannerDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); + // TODO: If this had outputs, yield them and mark this block notable + /* + A block is notable if it's an activation, had outputs, or a retirement block. + */ + txn.commit(); + } + + Ok(()) + } +} diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index 12f017151..92ea0271a 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -18,10 +18,12 @@ use log::{info, error}; use tokio::time::sleep; +/* TODO #[cfg(not(test))] mod scanner; #[cfg(test)] pub mod scanner; +*/ use scanner::{ScannerEvent, ScannerHandle, Scanner}; From e19a12616199a60d333e7bec4716a879ab9e7a4f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 20 Aug 2024 16:24:18 -0400 Subject: [PATCH 008/179] Flesh out new scanner a bit more Adds the task to mark blocks safe to scan, and outlines the task to report blocks. --- processor/scanner/src/db.rs | 60 +++++++++++++++++++++-- processor/scanner/src/eventuality.rs | 1 + processor/scanner/src/index.rs | 27 +++++----- processor/scanner/src/lib.rs | 65 ++++++++++++++++++++++--- processor/scanner/src/report.rs | 50 +++++++++++++++++++ processor/scanner/src/safe.rs | 73 ++++++++++++++++++++++++++++ processor/scanner/src/scan.rs | 15 +++--- 7 files changed, 259 insertions(+), 32 deletions(-) create mode 100644 processor/scanner/src/report.rs create mode 100644 processor/scanner/src/safe.rs diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 8bd7d9447..073d5d420 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -1,11 +1,9 @@ use core::marker::PhantomData; -use group::GroupEncoding; - use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db}; -use primitives::{Id, Block, BorshG}; +use primitives::{Id, ReceivedOutput, Block, BorshG}; use crate::ScannerFeed; @@ -14,7 +12,7 @@ trait Borshy: BorshSerialize + BorshDeserialize {} impl Borshy for T {} #[derive(BorshSerialize, BorshDeserialize)] -struct SeraiKey { +pub(crate) struct SeraiKey { activation_block_number: u64, retirement_block_number: Option, key: K, @@ -35,6 +33,10 @@ create_db!( NextToScanForOutputsBlock: () -> u64, // The next block to check for resolving eventualities NextToCheckForEventualitiesBlock: () -> u64, + // The next block to potentially report + NextToPotentiallyReportBlock: () -> u64, + // The highest acknowledged block + HighestAcknowledgedBlock: () -> u64, // If a block was notable /* @@ -55,6 +57,8 @@ create_db!( */ // This collapses from `bool` to `()`, using if the value was set for true and false otherwise NotableBlock: (number: u64) -> (), + + SerializedOutputs: (block_number: u64) -> Vec, } ); @@ -74,6 +78,10 @@ impl ScannerDb { // activation_block_number is inclusive, so the key will be scanned for starting at the specified // block pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: S::Key) { + // Set this block as notable + NotableBlock::set(txn, activation_block_number, &()); + + // Push the key let mut keys: Vec>> = ActiveKeys::get(txn).unwrap_or(vec![]); for key_i in &keys { if key == key_i.key.0 { @@ -124,6 +132,7 @@ impl ScannerDb { LatestScannableBlock::set(txn, &start_block); NextToScanForOutputsBlock::set(txn, &start_block); NextToCheckForEventualitiesBlock::set(txn, &start_block); + NextToPotentiallyReportBlock::set(txn, &start_block); } pub(crate) fn set_latest_finalized_block(txn: &mut impl DbTxn, latest_finalized_block: u64) { @@ -159,4 +168,47 @@ impl ScannerDb { pub(crate) fn next_to_check_for_eventualities_block(getter: &impl Get) -> Option { NextToCheckForEventualitiesBlock::get(getter) } + + pub(crate) fn set_next_to_potentially_report_block( + txn: &mut impl DbTxn, + next_to_potentially_report_block: u64, + ) { + NextToPotentiallyReportBlock::set(txn, &next_to_potentially_report_block); + } + pub(crate) fn next_to_potentially_report_block(getter: &impl Get) -> Option { + NextToPotentiallyReportBlock::get(getter) + } + + pub(crate) fn set_highest_acknowledged_block( + txn: &mut impl DbTxn, + highest_acknowledged_block: u64, + ) { + HighestAcknowledgedBlock::set(txn, &highest_acknowledged_block); + } + pub(crate) fn highest_acknowledged_block(getter: &impl Get) -> Option { + HighestAcknowledgedBlock::get(getter) + } + + pub(crate) fn set_outputs( + txn: &mut impl DbTxn, + block_number: u64, + outputs: Vec>, + ) { + if outputs.is_empty() { + return; + } + + // Set this block as notable + NotableBlock::set(txn, block_number, &()); + + let mut buf = Vec::with_capacity(outputs.len() * 128); + for output in outputs { + output.write(&mut buf).unwrap(); + } + SerializedOutputs::set(txn, block_number, &buf); + } + + pub(crate) fn is_notable_block(getter: &impl Get, number: u64) -> bool { + NotableBlock::get(getter, number).is_some() + } } diff --git a/processor/scanner/src/eventuality.rs b/processor/scanner/src/eventuality.rs index e69de29bb..70b786d12 100644 --- a/processor/scanner/src/eventuality.rs +++ b/processor/scanner/src/eventuality.rs @@ -0,0 +1 @@ +// TODO diff --git a/processor/scanner/src/index.rs b/processor/scanner/src/index.rs index 66477cdbe..7967d5df3 100644 --- a/processor/scanner/src/index.rs +++ b/processor/scanner/src/index.rs @@ -20,7 +20,7 @@ struct IndexFinalizedTask { #[async_trait::async_trait] impl ContinuallyRan for IndexFinalizedTask { - async fn run_instance(&mut self) -> Result<(), String> { + async fn run_iteration(&mut self) -> Result { // Fetch the latest finalized block let our_latest_finalized = ScannerDb::::latest_finalized_block(&self.db) .expect("IndexTask run before writing the start block"); @@ -29,6 +29,18 @@ impl ContinuallyRan for IndexFinalizedTask { Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?, }; + if latest_finalized < our_latest_finalized { + // Explicitly log this as an error as returned ephemeral errors are logged with debug + // This doesn't panic as the node should sync along our indexed chain, and if it doesn't, + // we'll panic at that point in time + log::error!( + "node is out of sync, latest finalized {} is behind our indexed {}", + latest_finalized, + our_latest_finalized + ); + Err("node is out of sync".to_string())?; + } + // Index the hashes of all blocks until the latest finalized block for b in (our_latest_finalized + 1) ..= latest_finalized { let block = match self.feed.block_by_number(b).await { @@ -57,16 +69,7 @@ impl ContinuallyRan for IndexFinalizedTask { txn.commit(); } - Ok(()) + // Have dependents run if we updated the latest finalized block + Ok(our_latest_finalized != latest_finalized) } } - -/* - The processor can't index the blockchain unilaterally. It needs to develop a totally ordered view - of the blockchain. That requires consensus with other validators on when certain keys are set to - activate (and retire). We solve this by only scanning `n` blocks ahead of the last agreed upon - block, then waiting for Serai to acknowledge the block. This lets us safely schedule events after - this `n` block window (as demonstrated/proven with `mini`). - - TODO -*/ diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 736a62b96..04dcf8249 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -1,4 +1,6 @@ -use core::fmt::Debug; +use core::{fmt::Debug, time::Duration}; + +use tokio::sync::mpsc; use primitives::{ReceivedOutput, Block}; @@ -50,11 +52,50 @@ pub trait ScannerFeed: Send + Sync { ) -> Result; } +/// A handle to immediately run an iteration of a task. +#[derive(Clone)] +pub(crate) struct RunNowHandle(mpsc::Sender<()>); +/// An instruction recipient to immediately run an iteration of a task. +pub(crate) struct RunNowRecipient(mpsc::Receiver<()>); + +impl RunNowHandle { + /// Create a new run-now handle to be assigned to a task. + pub(crate) fn new() -> (Self, RunNowRecipient) { + // Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as + // soon as possible + let (send, recv) = mpsc::channel(1); + (Self(send), RunNowRecipient(recv)) + } + + /// Tell the task to run now (and not whenever its next iteration on a timer is). + /// + /// Panics if the task has been dropped. + pub(crate) fn run_now(&self) { + #[allow(clippy::match_same_arms)] + match self.0.try_send(()) { + Ok(()) => {} + // NOP on full, as this task will already be ran as soon as possible + Err(mpsc::error::TrySendError::Full(())) => {} + Err(mpsc::error::TrySendError::Closed(())) => { + panic!("task was unexpectedly closed when calling run_now") + } + } + } +} + #[async_trait::async_trait] pub(crate) trait ContinuallyRan: Sized { - async fn run_instance(&mut self) -> Result<(), String>; + /// Run an iteration of the task. + /// + /// If this returns `true`, all dependents of the task will immediately have a new iteration ran + /// (without waiting for whatever timer they were already on). + async fn run_iteration(&mut self) -> Result; - async fn continually_run(mut self) { + /// Continually run the task. + /// + /// This returns a channel which can have a message set to immediately trigger a new run of an + /// iteration. + async fn continually_run(mut self, mut run_now: RunNowRecipient, dependents: Vec) { // The default number of seconds to sleep before running the task again let default_sleep_before_next_task = 5; // The current number of seconds to sleep before running the task again @@ -67,10 +108,16 @@ pub(crate) trait ContinuallyRan: Sized { }; loop { - match self.run_instance().await { - Ok(()) => { + match self.run_iteration().await { + Ok(run_dependents) => { // Upon a successful (error-free) loop iteration, reset the amount of time we sleep current_sleep_before_next_task = default_sleep_before_next_task; + + if run_dependents { + for dependent in &dependents { + dependent.run_now(); + } + } } Err(e) => { log::debug!("{}", e); @@ -78,9 +125,11 @@ pub(crate) trait ContinuallyRan: Sized { } } - // Don't run the task again for another few seconds - // This is at the start of the loop so we can continue without skipping this delay - tokio::time::sleep(core::time::Duration::from_secs(current_sleep_before_next_task)).await; + // Don't run the task again for another few seconds UNLESS told to run now + tokio::select! { + () = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {}, + msg = run_now.0.recv() => assert_eq!(msg, Some(()), "run now handle was dropped"), + } } } } diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs new file mode 100644 index 000000000..4d378b9c0 --- /dev/null +++ b/processor/scanner/src/report.rs @@ -0,0 +1,50 @@ +/* + We only report blocks once both tasks, scanning for received ouputs and eventualities, have + processed the block. This ensures we've performed all ncessary options. +*/ + +use serai_db::{Db, DbTxn}; + +use primitives::{Id, Block}; + +// TODO: Localize to ReportDb? +use crate::{db::ScannerDb, ScannerFeed}; + +struct ReportTask { + db: D, + feed: S, +} + +#[async_trait::async_trait] +impl ContinuallyRan for ReportTask { + async fn run_iteration(&mut self) -> Result { + let highest_reportable = { + // Fetch the latest scanned and latest checked block + let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db).expect("ReportTask run before writing the start block"); + let next_to_check = ScannerDb::::next_to_check_for_eventualities_block(&self.db).expect("ReportTask run before writing the start block"); + // If we haven't done any work, return + if (next_to_scan == 0) || (next_to_check == 0) { + return Ok(false); + } + let last_scanned = next_to_scan - 1; + let last_checked = next_to_check - 1; + last_scanned.min(last_checked) + }; + + let next_to_potentially_report = ScannerDb::::next_block_to_potentially_report(&self.db).expect("ReportTask run before writing the start block"); + + for b in next_to_potentially_report ..= highest_reportable { + if ScannerDb::::is_block_notable(b) { + todo!("TODO: Make Batches, which requires handling Forwarded within this crate"); + } + + let mut txn = self.db.txn(); + // Update the next to potentially report block + ScannerDb::::set_next_to_potentially_report_block(&mut txn, b + 1); + txn.commit(); + } + + // Run dependents if we decided to report any blocks + Ok(next_to_potentially_report <= highest_reportable) + } +} diff --git a/processor/scanner/src/safe.rs b/processor/scanner/src/safe.rs new file mode 100644 index 000000000..a5de448d4 --- /dev/null +++ b/processor/scanner/src/safe.rs @@ -0,0 +1,73 @@ +use core::marker::PhantomData; + +use serai_db::{Db, DbTxn}; + +use primitives::{Id, Block}; + +// TODO: Localize to SafeDb? +use crate::{db::ScannerDb, ScannerFeed}; + +/* + We mark blocks safe to scan when they're no more than `(CONFIRMATIONS - 1)` blocks after the + oldest notable block still pending acknowledgement (creating a window of length `CONFIRMATIONS` + when including the block pending acknowledgement). This means that if all known notable blocks + have been acknowledged, and a stretch of non-notable blocks occurs, they'll automatically be + marked safe to scan (since they come before the next oldest notable block still pending + acknowledgement). + + This design lets Serai safely schedule events `CONFIRMATIONS` blocks after the latest + acknowledged block. For an exhaustive proof of this, please see `mini`. +*/ +struct SafeToScanTask { + db: D, + _S: PhantomData, +} + +#[async_trait::async_trait] +impl ContinuallyRan for SafeToScanTask { + async fn run_iteration(&mut self) -> Result { + // First, we fetch the highest acknowledged block + let Some(highest_acknowledged_block) = ScannerDb::::highest_acknowledged_block(&self.db) else { + // If no blocks have been acknowledged, we don't mark any safe + // Once the start block (implicitly safe) has been acknowledged, we proceed from there + return Ok(false); + }; + + let latest_block_known_if_pending_acknowledgement = { + // The next block to potentially report comes after all blocks we've decided to report or not + // If we've decided to report (or not report) a block, we know if it needs acknowledgement + // (and accordingly is pending acknowledgement) + // Accordingly, the block immediately before this is the latest block with a known status + ScannerDb::::next_block_to_potentially_report(&self.db).expect("SafeToScanTask run before writing the start block") - 1 + }; + + let mut oldest_pending_acknowledgement = None; + for b in (highest_acknowledged_block + 1) ..= latest_block_known_if_pending_acknowledgement { + // If the block isn't notable, immediately flag it as acknowledged + if !ScannerDb::::is_block_notable(b) { + let mut txn = self.db.txn(); + ScannerDb::::set_highest_acknowledged_block(&mut txn, b); + txn.commit(); + continue; + } + + oldest_pending_acknowledgement = Some(b); + break; + } + + // `oldest_pending_acknowledgement` is now the oldest block pending acknowledgement or `None` + // If it's `None`, then we were able to implicitly acknowledge all blocks within this span + // Since the safe block is `(CONFIRMATIONS - 1)` blocks after the oldest block still pending + // acknowledgement, and the oldest block still pending acknowledgement is in the future, + // we know the safe block to scan to is + // `>= latest_block_known_if_pending_acknowledgement + (CONFIRMATIONS - 1)` + let oldest_pending_acknowledgement = oldest_pending_acknowledgement.unwrap_or(latest_block_known_if_pending_acknowledgement); + + // Update the latest scannable block + let mut txn = self.db.txn(); + ScannerDb::::set_latest_scannable_block(oldest_pending_acknowledgement + (CONFIRMATIONS - 1)); + txn.commit(); + + Ok(next_to_potentially_report <= highest_reportable) + } +} diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 6f784a7e5..b96486d42 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -12,7 +12,7 @@ struct ScanForOutputsTask { #[async_trait::async_trait] impl ContinuallyRan for ScanForOutputsTask { - async fn run_instance(&mut self) -> Result<(), String> { + async fn run_iteration(&mut self) -> Result { // Fetch the safe to scan block let latest_scannable = ScannerDb::::latest_scannable_block(&self.db).expect("ScanForOutputsTask run before writing the start block"); // Fetch the next block to scan @@ -43,6 +43,7 @@ impl ContinuallyRan for ScanForOutputsTask { } assert!(keys.len() <= 2); + let mut outputs = vec![]; // Scan for each key for key in keys { // If this key has yet to active, skip it @@ -50,7 +51,6 @@ impl ContinuallyRan for ScanForOutputsTask { continue; } - let mut outputs = vec![]; for output in network.scan_for_outputs(&block, key).awaits { assert_eq!(output.key(), key); // TODO: Check for dust @@ -59,15 +59,14 @@ impl ContinuallyRan for ScanForOutputsTask { } let mut txn = self.db.txn(); - // Update the latest scanned block + // Save the outputs + ScannerDb::::set_outputs(&mut txn, b, outputs); + // Update the next to scan block ScannerDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); - // TODO: If this had outputs, yield them and mark this block notable - /* - A block is notable if it's an activation, had outputs, or a retirement block. - */ txn.commit(); } - Ok(()) + // Run dependents if we successfully scanned any blocks + Ok(next_to_scan <= latest_scannable) } } From 26a5475b33623da9316022d6bc5227e3151a1eb2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 20 Aug 2024 16:39:03 -0400 Subject: [PATCH 009/179] Correct misc compilation errors --- processor/scanner/src/db.rs | 8 ++++---- processor/scanner/src/lib.rs | 21 +++++++++++++++++++-- processor/scanner/src/report.rs | 13 ++++++++----- processor/scanner/src/safe.rs | 23 ++++++++++++++++------- processor/scanner/src/scan.rs | 32 +++++++++++++++++++++++--------- 5 files changed, 70 insertions(+), 27 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 073d5d420..c7cbd2538 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -13,9 +13,9 @@ impl Borshy for T {} #[derive(BorshSerialize, BorshDeserialize)] pub(crate) struct SeraiKey { - activation_block_number: u64, - retirement_block_number: Option, - key: K, + pub(crate) activation_block_number: u64, + pub(crate) retirement_block_number: Option, + pub(crate) key: K, } create_db!( @@ -208,7 +208,7 @@ impl ScannerDb { SerializedOutputs::set(txn, block_number, &buf); } - pub(crate) fn is_notable_block(getter: &impl Get, number: u64) -> bool { + pub(crate) fn is_block_notable(getter: &impl Get, number: u64) -> bool { NotableBlock::get(getter, number).is_some() } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 04dcf8249..a6f3e899f 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -6,12 +6,21 @@ use primitives::{ReceivedOutput, Block}; mod db; mod index; +mod scan; +mod eventuality; +mod report; +mod safe; /// A feed usable to scan a blockchain. /// /// This defines the primitive types used, along with various getters necessary for indexing. #[async_trait::async_trait] pub trait ScannerFeed: Send + Sync { + /// The amount of confirmations required for a block to be finalized. + /// + /// This value must be at least `1`. + const CONFIRMATIONS: u64; + /// The type of the key used to receive coins on this blockchain. type Key: group::Group + group::GroupEncoding; @@ -35,11 +44,19 @@ pub trait ScannerFeed: Send + Sync { /// resolve without manual intervention. type EphemeralError: Debug; + /// Fetch the number of the latest block. + /// + /// The block number is its zero-indexed position within a linear view of the external network's + /// consensus. The genesis block accordingly has block number 0. + async fn latest_block_number(&self) -> Result; + /// Fetch the number of the latest finalized block. /// /// The block number is its zero-indexed position within a linear view of the external network's /// consensus. The genesis block accordingly has block number 0. - async fn latest_finalized_block_number(&self) -> Result; + async fn latest_finalized_block_number(&self) -> Result { + Ok(self.latest_block_number().await? - Self::CONFIRMATIONS) + } /// Fetch a block by its number. async fn block_by_number(&self, number: u64) -> Result; @@ -49,7 +66,7 @@ pub trait ScannerFeed: Send + Sync { &self, block: &Self::Block, key: Self::Key, - ) -> Result; + ) -> Result, Self::EphemeralError>; } /// A handle to immediately run an iteration of a task. diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index 4d378b9c0..5c57a3f5d 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -8,7 +8,7 @@ use serai_db::{Db, DbTxn}; use primitives::{Id, Block}; // TODO: Localize to ReportDb? -use crate::{db::ScannerDb, ScannerFeed}; +use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; struct ReportTask { db: D, @@ -20,8 +20,10 @@ impl ContinuallyRan for ReportTask { async fn run_iteration(&mut self) -> Result { let highest_reportable = { // Fetch the latest scanned and latest checked block - let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db).expect("ReportTask run before writing the start block"); - let next_to_check = ScannerDb::::next_to_check_for_eventualities_block(&self.db).expect("ReportTask run before writing the start block"); + let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db) + .expect("ReportTask run before writing the start block"); + let next_to_check = ScannerDb::::next_to_check_for_eventualities_block(&self.db) + .expect("ReportTask run before writing the start block"); // If we haven't done any work, return if (next_to_scan == 0) || (next_to_check == 0) { return Ok(false); @@ -31,10 +33,11 @@ impl ContinuallyRan for ReportTask { last_scanned.min(last_checked) }; - let next_to_potentially_report = ScannerDb::::next_block_to_potentially_report(&self.db).expect("ReportTask run before writing the start block"); + let next_to_potentially_report = ScannerDb::::next_to_potentially_report_block(&self.db) + .expect("ReportTask run before writing the start block"); for b in next_to_potentially_report ..= highest_reportable { - if ScannerDb::::is_block_notable(b) { + if ScannerDb::::is_block_notable(&self.db, b) { todo!("TODO: Make Batches, which requires handling Forwarded within this crate"); } diff --git a/processor/scanner/src/safe.rs b/processor/scanner/src/safe.rs index a5de448d4..a0b4f5471 100644 --- a/processor/scanner/src/safe.rs +++ b/processor/scanner/src/safe.rs @@ -5,7 +5,7 @@ use serai_db::{Db, DbTxn}; use primitives::{Id, Block}; // TODO: Localize to SafeDb? -use crate::{db::ScannerDb, ScannerFeed}; +use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; /* We mark blocks safe to scan when they're no more than `(CONFIRMATIONS - 1)` blocks after the @@ -27,7 +27,8 @@ struct SafeToScanTask { impl ContinuallyRan for SafeToScanTask { async fn run_iteration(&mut self) -> Result { // First, we fetch the highest acknowledged block - let Some(highest_acknowledged_block) = ScannerDb::::highest_acknowledged_block(&self.db) else { + let Some(highest_acknowledged_block) = ScannerDb::::highest_acknowledged_block(&self.db) + else { // If no blocks have been acknowledged, we don't mark any safe // Once the start block (implicitly safe) has been acknowledged, we proceed from there return Ok(false); @@ -38,13 +39,15 @@ impl ContinuallyRan for SafeToScanTask { // If we've decided to report (or not report) a block, we know if it needs acknowledgement // (and accordingly is pending acknowledgement) // Accordingly, the block immediately before this is the latest block with a known status - ScannerDb::::next_block_to_potentially_report(&self.db).expect("SafeToScanTask run before writing the start block") - 1 + ScannerDb::::next_to_potentially_report_block(&self.db) + .expect("SafeToScanTask run before writing the start block") - + 1 }; let mut oldest_pending_acknowledgement = None; for b in (highest_acknowledged_block + 1) ..= latest_block_known_if_pending_acknowledgement { // If the block isn't notable, immediately flag it as acknowledged - if !ScannerDb::::is_block_notable(b) { + if !ScannerDb::::is_block_notable(&self.db, b) { let mut txn = self.db.txn(); ScannerDb::::set_highest_acknowledged_block(&mut txn, b); txn.commit(); @@ -61,13 +64,19 @@ impl ContinuallyRan for SafeToScanTask { // acknowledgement, and the oldest block still pending acknowledgement is in the future, // we know the safe block to scan to is // `>= latest_block_known_if_pending_acknowledgement + (CONFIRMATIONS - 1)` - let oldest_pending_acknowledgement = oldest_pending_acknowledgement.unwrap_or(latest_block_known_if_pending_acknowledgement); + let oldest_pending_acknowledgement = + oldest_pending_acknowledgement.unwrap_or(latest_block_known_if_pending_acknowledgement); + + let old_safe_block = ScannerDb::::latest_scannable_block(&self.db) + .expect("SafeToScanTask run before writing the start block"); + let new_safe_block = oldest_pending_acknowledgement + + (S::CONFIRMATIONS.checked_sub(1).expect("CONFIRMATIONS wasn't at least 1")); // Update the latest scannable block let mut txn = self.db.txn(); - ScannerDb::::set_latest_scannable_block(oldest_pending_acknowledgement + (CONFIRMATIONS - 1)); + ScannerDb::::set_latest_scannable_block(&mut txn, new_safe_block); txn.commit(); - Ok(next_to_potentially_report <= highest_reportable) + Ok(old_safe_block != new_safe_block) } } diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index b96486d42..921650027 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -1,9 +1,9 @@ use serai_db::{Db, DbTxn}; -use primitives::{Id, Block}; +use primitives::{Id, ReceivedOutput, Block}; // TODO: Localize to ScanDb? -use crate::{db::ScannerDb, ScannerFeed}; +use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; struct ScanForOutputsTask { db: D, @@ -14,9 +14,11 @@ struct ScanForOutputsTask { impl ContinuallyRan for ScanForOutputsTask { async fn run_iteration(&mut self) -> Result { // Fetch the safe to scan block - let latest_scannable = ScannerDb::::latest_scannable_block(&self.db).expect("ScanForOutputsTask run before writing the start block"); + let latest_scannable = ScannerDb::::latest_scannable_block(&self.db) + .expect("ScanForOutputsTask run before writing the start block"); // Fetch the next block to scan - let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db).expect("ScanForOutputsTask run before writing the start block"); + let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db) + .expect("ScanForOutputsTask run before writing the start block"); for b in next_to_scan ..= latest_scannable { let block = match self.feed.block_by_number(b).await { @@ -26,15 +28,22 @@ impl ContinuallyRan for ScanForOutputsTask { // Check the ID of this block is the expected ID { - let expected = ScannerDb::::block_id(b).expect("scannable block didn't have its ID saved"); + let expected = + ScannerDb::::block_id(&self.db, b).expect("scannable block didn't have its ID saved"); if block.id() != expected { - panic!("finalized chain reorganized from {} to {} at {}", hex::encode(expected), hex::encode(block.id()), b); + panic!( + "finalized chain reorganized from {} to {} at {}", + hex::encode(expected), + hex::encode(block.id()), + b + ); } } log::info!("scanning block: {} ({b})", hex::encode(block.id())); - let keys = ScannerDb::::keys(&self.db).expect("scanning for a blockchain without any keys set"); + let mut keys = + ScannerDb::::keys(&self.db).expect("scanning for a blockchain without any keys set"); // Remove all the retired keys while let Some(retire_at) = keys[0].retirement_block_number { if retire_at <= b { @@ -51,8 +60,13 @@ impl ContinuallyRan for ScanForOutputsTask { continue; } - for output in network.scan_for_outputs(&block, key).awaits { - assert_eq!(output.key(), key); + for output in self + .feed + .scan_for_outputs(&block, key.key.0) + .await + .map_err(|e| format!("failed to scan block {b}: {e:?}"))? + { + assert_eq!(output.key(), key.key.0); // TODO: Check for dust outputs.push(output); } From a8cc368718eca476e642a3f5a354c1f832058eef Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 20 Aug 2024 16:51:58 -0400 Subject: [PATCH 010/179] Differentiate BlockHeader from Block --- processor/primitives/src/lib.rs | 32 ++++++++++++++++++++++++++++---- processor/scanner/src/db.rs | 30 +++++++++++------------------- processor/scanner/src/index.rs | 4 ++-- processor/scanner/src/lib.rs | 28 +++++++++++----------------- processor/scanner/src/scan.rs | 7 +------ 5 files changed, 53 insertions(+), 48 deletions(-) diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs index 535dd14f7..45f02571f 100644 --- a/processor/primitives/src/lib.rs +++ b/processor/primitives/src/lib.rs @@ -5,7 +5,7 @@ use core::fmt::Debug; use std::io; -use group::GroupEncoding; +use group::{Group, GroupEncoding}; use serai_primitives::Balance; @@ -137,9 +137,8 @@ pub trait ReceivedOutput: fn read(reader: &mut R) -> io::Result; } -/// A block from an external network. -#[async_trait::async_trait] -pub trait Block: Send + Sync + Sized + Clone + Debug { +/// A block header from an external network. +pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { /// The type used to identify blocks. type Id: 'static + Id; /// The ID of this block. @@ -148,6 +147,31 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { fn parent(&self) -> Self::Id; } +/// A block from an external network. +/// +/// A block is defined as a consensus event associated with a set of transactions. It is not +/// necessary to literally define it as whatever the external network defines as a block. For +/// external networks which finalize block(s), this block type should be a representation of all +/// transactions within a period finalization (whether block or epoch). +#[async_trait::async_trait] +pub trait Block: Send + Sync + Sized + Clone + Debug { + /// The type used for this block's header. + type Header: BlockHeader; + + /// The type used to represent keys on this external network. + type Key: Group + GroupEncoding; + /// The type used to represent addresses on this external network. + type Address; + /// The type used to represent received outputs on this external network. + type Output: ReceivedOutput; + + /// The ID of this block. + fn id(&self) -> ::Id; + + /// Scan all outputs within this block to find the outputs spendable by this key. + fn scan_for_outputs(&self, key: Self::Key) -> Vec; +} + /// A wrapper for a group element which implements the borsh traits. #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct BorshG(pub G); diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index c7cbd2538..0edfad97d 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -5,7 +5,7 @@ use serai_db::{Get, DbTxn, create_db}; use primitives::{Id, ReceivedOutput, Block, BorshG}; -use crate::ScannerFeed; +use crate::{ScannerFeed, BlockIdFor, KeyFor, OutputFor}; // The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. trait Borshy: BorshSerialize + BorshDeserialize {} @@ -64,25 +64,25 @@ create_db!( pub(crate) struct ScannerDb(PhantomData); impl ScannerDb { - pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: ::Id) { + pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: BlockIdFor) { BlockId::set(txn, number, &id); BlockNumber::set(txn, id, &number); } - pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option<::Id> { + pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option> { BlockId::get(getter, number) } - pub(crate) fn block_number(getter: &impl Get, id: ::Id) -> Option { + pub(crate) fn block_number(getter: &impl Get, id: BlockIdFor) -> Option { BlockNumber::get(getter, id) } // activation_block_number is inclusive, so the key will be scanned for starting at the specified // block - pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: S::Key) { + pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: KeyFor) { // Set this block as notable NotableBlock::set(txn, activation_block_number, &()); // Push the key - let mut keys: Vec>> = ActiveKeys::get(txn).unwrap_or(vec![]); + let mut keys: Vec>>> = ActiveKeys::get(txn).unwrap_or(vec![]); for key_i in &keys { if key == key_i.key.0 { panic!("queueing a key prior queued"); @@ -97,8 +97,8 @@ impl ScannerDb { } // retirement_block_number is inclusive, so the key will no longer be scanned for as of the // specified block - pub(crate) fn retire_key(txn: &mut impl DbTxn, retirement_block_number: u64, key: S::Key) { - let mut keys: Vec>> = + pub(crate) fn retire_key(txn: &mut impl DbTxn, retirement_block_number: u64, key: KeyFor) { + let mut keys: Vec>>> = ActiveKeys::get(txn).expect("retiring key yet no active keys"); assert!(keys.len() > 1, "retiring our only key"); @@ -118,15 +118,11 @@ impl ScannerDb { } panic!("retiring key yet not present in keys") } - pub(crate) fn keys(getter: &impl Get) -> Option>>> { + pub(crate) fn keys(getter: &impl Get) -> Option>>>> { ActiveKeys::get(getter) } - pub(crate) fn set_start_block( - txn: &mut impl DbTxn, - start_block: u64, - id: ::Id, - ) { + pub(crate) fn set_start_block(txn: &mut impl DbTxn, start_block: u64, id: BlockIdFor) { Self::set_block(txn, start_block, id); LatestFinalizedBlock::set(txn, &start_block); LatestScannableBlock::set(txn, &start_block); @@ -189,11 +185,7 @@ impl ScannerDb { HighestAcknowledgedBlock::get(getter) } - pub(crate) fn set_outputs( - txn: &mut impl DbTxn, - block_number: u64, - outputs: Vec>, - ) { + pub(crate) fn set_outputs(txn: &mut impl DbTxn, block_number: u64, outputs: Vec>) { if outputs.is_empty() { return; } diff --git a/processor/scanner/src/index.rs b/processor/scanner/src/index.rs index 7967d5df3..de68522ed 100644 --- a/processor/scanner/src/index.rs +++ b/processor/scanner/src/index.rs @@ -1,6 +1,6 @@ use serai_db::{Db, DbTxn}; -use primitives::{Id, Block}; +use primitives::{Id, BlockHeader}; // TODO: Localize to IndexDb? use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; @@ -43,7 +43,7 @@ impl ContinuallyRan for IndexFinalizedTask { // Index the hashes of all blocks until the latest finalized block for b in (our_latest_finalized + 1) ..= latest_finalized { - let block = match self.feed.block_by_number(b).await { + let block = match self.feed.block_header_by_number(b).await { Ok(block) => block, Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, }; diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index a6f3e899f..addebb608 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -2,7 +2,7 @@ use core::{fmt::Debug, time::Duration}; use tokio::sync::mpsc; -use primitives::{ReceivedOutput, Block}; +use primitives::{ReceivedOutput, BlockHeader, Block}; mod db; mod index; @@ -21,15 +21,6 @@ pub trait ScannerFeed: Send + Sync { /// This value must be at least `1`. const CONFIRMATIONS: u64; - /// The type of the key used to receive coins on this blockchain. - type Key: group::Group + group::GroupEncoding; - - /// The type of the address used to specify who to send coins to on this blockchain. - type Address; - - /// The type representing a received (and spendable) output. - type Output: ReceivedOutput; - /// The representation of a block for this blockchain. /// /// A block is defined as a consensus event associated with a set of transactions. It is not @@ -58,17 +49,20 @@ pub trait ScannerFeed: Send + Sync { Ok(self.latest_block_number().await? - Self::CONFIRMATIONS) } + /// Fetch a block header by its number. + async fn block_header_by_number( + &self, + number: u64, + ) -> Result<::Header, Self::EphemeralError>; + /// Fetch a block by its number. async fn block_by_number(&self, number: u64) -> Result; - - /// Scan a block for its outputs. - async fn scan_for_outputs( - &self, - block: &Self::Block, - key: Self::Key, - ) -> Result, Self::EphemeralError>; } +type BlockIdFor = <<::Block as Block>::Header as BlockHeader>::Id; +type KeyFor = <::Block as Block>::Key; +type OutputFor = <::Block as Block>::Output; + /// A handle to immediately run an iteration of a task. #[derive(Clone)] pub(crate) struct RunNowHandle(mpsc::Sender<()>); diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 921650027..6743d9507 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -60,12 +60,7 @@ impl ContinuallyRan for ScanForOutputsTask { continue; } - for output in self - .feed - .scan_for_outputs(&block, key.key.0) - .await - .map_err(|e| format!("failed to scan block {b}: {e:?}"))? - { + for output in block.scan_for_outputs(key.key.0) { assert_eq!(output.key(), key.key.0); // TODO: Check for dust outputs.push(output); From 0bc03c39545b4ec274d046f6dfc310c4b540ad89 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 20 Aug 2024 18:20:28 -0400 Subject: [PATCH 011/179] Handle dust --- Cargo.lock | 1 + processor/primitives/src/lib.rs | 36 ++++++++++++++++----------------- processor/scanner/Cargo.toml | 2 ++ processor/scanner/src/lib.rs | 15 ++++++++++++++ processor/scanner/src/scan.rs | 20 +++++++++++++++++- 5 files changed, 55 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 230ed22fb..e3e6f3782 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8669,6 +8669,7 @@ dependencies = [ "log", "parity-scale-codec", "serai-db", + "serai-primitives", "serai-processor-messages", "serai-processor-primitives", "thiserror", diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs index 45f02571f..744aae470 100644 --- a/processor/primitives/src/lib.rs +++ b/processor/primitives/src/lib.rs @@ -34,6 +34,24 @@ pub trait Id: } impl Id for [u8; N] where [u8; N]: Default {} +/// A wrapper for a group element which implements the borsh traits. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct BorshG(pub G); +impl BorshSerialize for BorshG { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + writer.write_all(self.0.to_bytes().as_ref()) + } +} +impl BorshDeserialize for BorshG { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let mut repr = G::Repr::default(); + reader.read_exact(repr.as_mut())?; + Ok(Self( + Option::::from(G::from_bytes(&repr)).ok_or(borsh::io::Error::other("invalid point"))?, + )) + } +} + /// The type of the output. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum OutputType { @@ -171,21 +189,3 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { /// Scan all outputs within this block to find the outputs spendable by this key. fn scan_for_outputs(&self, key: Self::Key) -> Vec; } - -/// A wrapper for a group element which implements the borsh traits. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct BorshG(pub G); -impl BorshSerialize for BorshG { - fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { - writer.write_all(self.0.to_bytes().as_ref()) - } -} -impl BorshDeserialize for BorshG { - fn deserialize_reader(reader: &mut R) -> borsh::io::Result { - let mut repr = G::Repr::default(); - reader.read_exact(repr.as_mut())?; - Ok(Self( - Option::::from(G::from_bytes(&repr)).ok_or(borsh::io::Error::other("invalid point"))?, - )) - } -} diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index 670581d93..82de4de1b 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -35,5 +35,7 @@ tokio = { version = "1", default-features = false, features = ["rt-multi-thread" serai-db = { path = "../../common/db" } +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } + messages = { package = "serai-processor-messages", path = "../messages" } primitives = { package = "serai-processor-primitives", path = "../primitives" } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index addebb608..02c885992 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -2,6 +2,7 @@ use core::{fmt::Debug, time::Duration}; use tokio::sync::mpsc; +use serai_primitives::{Coin, Amount}; use primitives::{ReceivedOutput, BlockHeader, Block}; mod db; @@ -57,6 +58,20 @@ pub trait ScannerFeed: Send + Sync { /// Fetch a block by its number. async fn block_by_number(&self, number: u64) -> Result; + + /// The cost to aggregate an input as of the specified block. + /// + /// This is defined as the transaction fee for a 2-input, 1-output transaction. + async fn cost_to_aggregate( + &self, + coin: Coin, + block_number: u64, + ) -> Result; + + /// The dust threshold for the specified coin. + /// + /// This should be a value worth handling at a human level. + fn dust(&self, coin: Coin) -> Amount; } type BlockIdFor = <<::Block as Block>::Header as BlockHeader>::Id; diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 6743d9507..6058c7dac 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -62,7 +62,25 @@ impl ContinuallyRan for ScanForOutputsTask { for output in block.scan_for_outputs(key.key.0) { assert_eq!(output.key(), key.key.0); - // TODO: Check for dust + + // Check this isn't dust + { + let mut balance = output.balance(); + // First, subtract 2 * the cost to aggregate, as detailed in + // `spec/processor/UTXO Management.md` + // TODO: Cache this + let cost_to_aggregate = + self.feed.cost_to_aggregate(balance.coin, b).await.map_err(|e| { + format!("couldn't fetch cost to aggregate {:?} at {b}: {e:?}", balance.coin) + })?; + balance.amount.0 -= 2 * cost_to_aggregate.0; + + // Now, check it's still past the dust threshold + if balance.amount.0 < self.feed.dust(balance.coin).0 { + continue; + } + } + outputs.push(output); } } From 2578ba61944a8d136d4412c56099d96343c90ead Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 20 Aug 2024 19:37:47 -0400 Subject: [PATCH 012/179] Document expectations on Eventuality task and correct code determining the block safe to scan/report --- processor/scanner/src/db.rs | 22 +--- processor/scanner/src/eventuality.rs | 49 ++++++++ processor/scanner/src/lib.rs | 176 +-------------------------- processor/scanner/src/report.rs | 14 ++- processor/scanner/src/safe.rs | 82 ------------- 5 files changed, 65 insertions(+), 278 deletions(-) delete mode 100644 processor/scanner/src/safe.rs diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 0edfad97d..e92435bcf 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -27,16 +27,12 @@ create_db!( // The latest finalized block to appear of a blockchain LatestFinalizedBlock: () -> u64, - // The latest block which it's safe to scan (dependent on what Serai has acknowledged scanning) - LatestScannableBlock: () -> u64, // The next block to scan for received outputs NextToScanForOutputsBlock: () -> u64, // The next block to check for resolving eventualities NextToCheckForEventualitiesBlock: () -> u64, // The next block to potentially report NextToPotentiallyReportBlock: () -> u64, - // The highest acknowledged block - HighestAcknowledgedBlock: () -> u64, // If a block was notable /* @@ -125,7 +121,6 @@ impl ScannerDb { pub(crate) fn set_start_block(txn: &mut impl DbTxn, start_block: u64, id: BlockIdFor) { Self::set_block(txn, start_block, id); LatestFinalizedBlock::set(txn, &start_block); - LatestScannableBlock::set(txn, &start_block); NextToScanForOutputsBlock::set(txn, &start_block); NextToCheckForEventualitiesBlock::set(txn, &start_block); NextToPotentiallyReportBlock::set(txn, &start_block); @@ -138,11 +133,10 @@ impl ScannerDb { LatestFinalizedBlock::get(getter) } - pub(crate) fn set_latest_scannable_block(txn: &mut impl DbTxn, latest_scannable_block: u64) { - LatestScannableBlock::set(txn, &latest_scannable_block); - } pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option { - LatestScannableBlock::get(getter) + // This is whatever block we've checked the Eventualities of, plus the window length + // See `eventuality.rs` for more info + NextToCheckForEventualitiesBlock::get(getter).map(|b| b + S::WINDOW_LENGTH) } pub(crate) fn set_next_to_scan_for_outputs_block( @@ -175,16 +169,6 @@ impl ScannerDb { NextToPotentiallyReportBlock::get(getter) } - pub(crate) fn set_highest_acknowledged_block( - txn: &mut impl DbTxn, - highest_acknowledged_block: u64, - ) { - HighestAcknowledgedBlock::set(txn, &highest_acknowledged_block); - } - pub(crate) fn highest_acknowledged_block(getter: &impl Get) -> Option { - HighestAcknowledgedBlock::get(getter) - } - pub(crate) fn set_outputs(txn: &mut impl DbTxn, block_number: u64, outputs: Vec>) { if outputs.is_empty() { return; diff --git a/processor/scanner/src/eventuality.rs b/processor/scanner/src/eventuality.rs index 70b786d12..38f1d1124 100644 --- a/processor/scanner/src/eventuality.rs +++ b/processor/scanner/src/eventuality.rs @@ -1 +1,50 @@ // TODO + +/* + Note: The following assumes there's some value, `CONFIRMATIONS`, and the finalized block we + operate on is `CONFIRMATIONS` blocks deep. This is true for Proof-of-Work chains yet not the API + actively used here. + + When we scan a block, we receive outputs. When this block is acknowledged, we accumulate those + outputs into some scheduler, potentially causing certain transactions to begin their signing + protocol. + + Despite only scanning blocks with `CONFIRMATIONS`, we cannot assume that these transactions (in + their signed form) will only appear after `CONFIRMATIONS`. For `CONFIRMATIONS = 10`, the scanned + block's number being `1`, the blockchain will have blocks with numbers `0 ..= 10`. While this + implies the earliest the transaction will appear is when the block number is `11`, which is + `1 + CONFIRMATIONS` (the number of the scanned block, plus the confirmations), this isn't + guaranteed. + + A reorganization could occur which causes all unconfirmed blocks to be replaced, with the new + blockchain having the signed transaction present immediately. + + This means that in order to detect Eventuality completions, we can only check block `b+1` once + we've acknowledged block `b`, accumulated its outputs, triggered any transactions, and prepared + for their Eventualities. This is important as both the completion of Eventualities, and the scan + process, may cause a block to be considered notable (where notable blocks must be perfectly + ordered). + + We do not want to fully serialize the scan flow solely because the Eventuality flow must be. If + the time to scan, acknowledge, and intake a block ever exceeded the block time, we'd form a + backlog. + + The solution is to form a window of blocks we can scan/acknowledge/intake, safely, such that we + only form a backlog if the latency for a block exceeds the duration of the entire window (the + amount of blocks in the window * the block time). + + By considering the block an Eventuality resolves not as the block it does, yet the block a window + later, we enable the following flow: + + - The scanner scans within its window, submitting blocks for acknowledgement. + - We have the blocks acknowledged (the consensus protocol handling this in parallel). + - The scanner checks for Eventualities completed following acknowledged blocks. + - If all Eventualities for a retiring multisig have been cleared, the notable block is one window + later. + - The start of the window shifts to the last block we've checked for Eventualities. This means + the end of the window is the block we just set as notable, and yes, once that's scanned we can + successfully publish a batch for it in a canonical fashion. + + This forms a backlog only if the latency of scanning, acknowledgement, and intake (including + checking Eventualities) exceeds the window duration (the desired property). +*/ diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 02c885992..5f51e7d09 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -10,17 +10,17 @@ mod index; mod scan; mod eventuality; mod report; -mod safe; /// A feed usable to scan a blockchain. /// /// This defines the primitive types used, along with various getters necessary for indexing. #[async_trait::async_trait] pub trait ScannerFeed: Send + Sync { - /// The amount of confirmations required for a block to be finalized. + /// The amount of blocks to process in parallel. /// - /// This value must be at least `1`. - const CONFIRMATIONS: u64; + /// This value must be at least `1`. This value should be the worst-case latency to handle a + /// block divided by the expected block time. + const WINDOW_LENGTH: u64; /// The representation of a block for this blockchain. /// @@ -36,19 +36,11 @@ pub trait ScannerFeed: Send + Sync { /// resolve without manual intervention. type EphemeralError: Debug; - /// Fetch the number of the latest block. - /// - /// The block number is its zero-indexed position within a linear view of the external network's - /// consensus. The genesis block accordingly has block number 0. - async fn latest_block_number(&self) -> Result; - /// Fetch the number of the latest finalized block. /// /// The block number is its zero-indexed position within a linear view of the external network's /// consensus. The genesis block accordingly has block number 0. - async fn latest_finalized_block_number(&self) -> Result { - Ok(self.latest_block_number().await? - Self::CONFIRMATIONS) - } + async fn latest_finalized_block_number(&self) -> Result; /// Fetch a block header by its number. async fn block_header_by_number( @@ -262,77 +254,7 @@ impl ScannerDb { } } -/// The Scanner emits events relating to the blockchain, notably received outputs. -/// -/// It WILL NOT fail to emit an event, even if it reboots at selected moments. -/// -/// It MAY fire the same event multiple times. -#[derive(Debug)] -pub struct Scanner { - _db: PhantomData, - - keys: Vec<(usize, ::G)>, - - eventualities: HashMap, EventualitiesTracker>, - - ram_scanned: Option, - ram_outputs: HashSet>, - - need_ack: VecDeque, - - events: mpsc::UnboundedSender>, -} - -#[derive(Clone, Debug)] -struct ScannerHold { - scanner: Arc>>>, -} -impl ScannerHold { - async fn read(&self) -> RwLockReadGuard<'_, Option>> { - loop { - let lock = self.scanner.read().await; - if lock.is_none() { - drop(lock); - tokio::task::yield_now().await; - continue; - } - return lock; - } - } - async fn write(&self) -> RwLockWriteGuard<'_, Option>> { - loop { - let lock = self.scanner.write().await; - if lock.is_none() { - drop(lock); - tokio::task::yield_now().await; - continue; - } - return lock; - } - } - // This is safe to not check if something else already acquired the Scanner as the only caller is - // sequential. - async fn long_term_acquire(&self) -> Scanner { - self.scanner.write().await.take().unwrap() - } - async fn restore(&self, scanner: Scanner) { - let _ = self.scanner.write().await.insert(scanner); - } -} - -#[derive(Debug)] -pub struct ScannerHandle { - scanner: ScannerHold, - held_scanner: Option>, - pub events: ScannerEventChannel, - pub multisig_completed: mpsc::UnboundedSender, -} - impl ScannerHandle { - pub async fn ram_scanned(&self) -> usize { - self.scanner.read().await.as_ref().unwrap().ram_scanned.unwrap_or(0) - } - /// Register a key to scan for. pub async fn register_key( &mut self, @@ -363,17 +285,6 @@ impl ScannerHandle { scanner.eventualities.insert(key.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); } - pub fn db_scanned(getter: &G) -> Option { - ScannerDb::::latest_scanned_block(getter) - } - - // This perform a database read which isn't safe with regards to if the value is set or not - // It may be set, when it isn't expected to be set, or not set, when it is expected to be set - // Since the value is static, if it's set, it's correctly set - pub fn block_number(getter: &G, id: &>::Id) -> Option { - ScannerDb::::block_number(getter, id) - } - /// Acknowledge having handled a block. /// /// Creates a lock over the Scanner, preventing its independent scanning operations until @@ -447,7 +358,6 @@ impl Scanner { network: N, db: D, ) -> (ScannerHandle, Vec<(usize, ::G)>) { - let (events_send, events_recv) = mpsc::unbounded_channel(); let (multisig_completed_send, multisig_completed_recv) = mpsc::unbounded_channel(); let keys = ScannerDb::::keys(&db); @@ -455,44 +365,6 @@ impl Scanner { for key in &keys { eventualities.insert(key.1.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); } - - let ram_scanned = ScannerDb::::latest_scanned_block(&db); - - let scanner = ScannerHold { - scanner: Arc::new(RwLock::new(Some(Scanner { - _db: PhantomData, - - keys: keys.clone(), - - eventualities, - - ram_scanned, - ram_outputs: HashSet::new(), - - need_ack: VecDeque::new(), - - events: events_send, - }))), - }; - tokio::spawn(Scanner::run(db, network, scanner.clone(), multisig_completed_recv)); - - ( - ScannerHandle { - scanner, - held_scanner: None, - events: events_recv, - multisig_completed: multisig_completed_send, - }, - keys, - ) - } - - fn emit(&mut self, event: ScannerEvent) -> bool { - if self.events.send(event).is_err() { - info!("Scanner handler was dropped. Shutting down?"); - return false; - } - true } // An async function, to be spawned on a task, to discover and report outputs @@ -576,30 +448,6 @@ impl Scanner { info!("scanning block: {} ({block_being_scanned})", hex::encode(&block_id)); - // These DB calls are safe, despite not having a txn, since they're static values - // There's no issue if they're written in advance of expected (such as on reboot) - // They're also only expected here - if let Some(id) = ScannerDb::::block(&db, block_being_scanned) { - if id != block_id { - panic!("reorg'd from finalized {} to {}", hex::encode(id), hex::encode(block_id)); - } - } else { - // TODO: Move this to an unwrap - if let Some(id) = ScannerDb::::block(&db, block_being_scanned.saturating_sub(1)) { - if id != block.parent() { - panic!( - "block {} doesn't build off expected parent {}", - hex::encode(block_id), - hex::encode(id), - ); - } - } - - let mut txn = db.txn(); - ScannerDb::::save_block(&mut txn, block_being_scanned, &block_id); - txn.commit(); - } - // Scan new blocks // TODO: This lock acquisition may be long-lived... let mut scanner_lock = scanner_hold.write().await; @@ -617,16 +465,6 @@ impl Scanner { has_activation = true; } - let key_vec = key.to_bytes().as_ref().to_vec(); - - // TODO: These lines are the ones which will cause a really long-lived lock acquisition - for output in network.get_outputs(&block, key).await { - assert_eq!(output.key(), key); - if output.balance().amount.0 >= N::DUST { - outputs.push(output); - } - } - for (id, (block_number, tx, completion)) in network .get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block) .await @@ -778,10 +616,6 @@ impl Scanner { let retired = scanner.keys.remove(0).1; scanner.eventualities.remove(retired.to_bytes().as_ref()); } - - // Update ram_scanned - scanner.ram_scanned = Some(block_being_scanned); - drop(scanner_lock); // If we sent a Block event, once again check multisig_completed if sent_block && diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index 5c57a3f5d..34a596175 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -19,18 +19,20 @@ struct ReportTask { impl ContinuallyRan for ReportTask { async fn run_iteration(&mut self) -> Result { let highest_reportable = { - // Fetch the latest scanned and latest checked block + // Fetch the next to scan block let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db) .expect("ReportTask run before writing the start block"); - let next_to_check = ScannerDb::::next_to_check_for_eventualities_block(&self.db) - .expect("ReportTask run before writing the start block"); // If we haven't done any work, return - if (next_to_scan == 0) || (next_to_check == 0) { + if next_to_scan == 0 { return Ok(false); } + // The last scanned block is the block prior to this + #[allow(clippy::let_and_return)] let last_scanned = next_to_scan - 1; - let last_checked = next_to_check - 1; - last_scanned.min(last_checked) + // The last scanned block is the highest reportable block as we only scan blocks within a + // window where it's safe to immediately report the block + // See `eventuality.rs` for more info + last_scanned }; let next_to_potentially_report = ScannerDb::::next_to_potentially_report_block(&self.db) diff --git a/processor/scanner/src/safe.rs b/processor/scanner/src/safe.rs deleted file mode 100644 index a0b4f5471..000000000 --- a/processor/scanner/src/safe.rs +++ /dev/null @@ -1,82 +0,0 @@ -use core::marker::PhantomData; - -use serai_db::{Db, DbTxn}; - -use primitives::{Id, Block}; - -// TODO: Localize to SafeDb? -use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; - -/* - We mark blocks safe to scan when they're no more than `(CONFIRMATIONS - 1)` blocks after the - oldest notable block still pending acknowledgement (creating a window of length `CONFIRMATIONS` - when including the block pending acknowledgement). This means that if all known notable blocks - have been acknowledged, and a stretch of non-notable blocks occurs, they'll automatically be - marked safe to scan (since they come before the next oldest notable block still pending - acknowledgement). - - This design lets Serai safely schedule events `CONFIRMATIONS` blocks after the latest - acknowledged block. For an exhaustive proof of this, please see `mini`. -*/ -struct SafeToScanTask { - db: D, - _S: PhantomData, -} - -#[async_trait::async_trait] -impl ContinuallyRan for SafeToScanTask { - async fn run_iteration(&mut self) -> Result { - // First, we fetch the highest acknowledged block - let Some(highest_acknowledged_block) = ScannerDb::::highest_acknowledged_block(&self.db) - else { - // If no blocks have been acknowledged, we don't mark any safe - // Once the start block (implicitly safe) has been acknowledged, we proceed from there - return Ok(false); - }; - - let latest_block_known_if_pending_acknowledgement = { - // The next block to potentially report comes after all blocks we've decided to report or not - // If we've decided to report (or not report) a block, we know if it needs acknowledgement - // (and accordingly is pending acknowledgement) - // Accordingly, the block immediately before this is the latest block with a known status - ScannerDb::::next_to_potentially_report_block(&self.db) - .expect("SafeToScanTask run before writing the start block") - - 1 - }; - - let mut oldest_pending_acknowledgement = None; - for b in (highest_acknowledged_block + 1) ..= latest_block_known_if_pending_acknowledgement { - // If the block isn't notable, immediately flag it as acknowledged - if !ScannerDb::::is_block_notable(&self.db, b) { - let mut txn = self.db.txn(); - ScannerDb::::set_highest_acknowledged_block(&mut txn, b); - txn.commit(); - continue; - } - - oldest_pending_acknowledgement = Some(b); - break; - } - - // `oldest_pending_acknowledgement` is now the oldest block pending acknowledgement or `None` - // If it's `None`, then we were able to implicitly acknowledge all blocks within this span - // Since the safe block is `(CONFIRMATIONS - 1)` blocks after the oldest block still pending - // acknowledgement, and the oldest block still pending acknowledgement is in the future, - // we know the safe block to scan to is - // `>= latest_block_known_if_pending_acknowledgement + (CONFIRMATIONS - 1)` - let oldest_pending_acknowledgement = - oldest_pending_acknowledgement.unwrap_or(latest_block_known_if_pending_acknowledgement); - - let old_safe_block = ScannerDb::::latest_scannable_block(&self.db) - .expect("SafeToScanTask run before writing the start block"); - let new_safe_block = oldest_pending_acknowledgement + - (S::CONFIRMATIONS.checked_sub(1).expect("CONFIRMATIONS wasn't at least 1")); - - // Update the latest scannable block - let mut txn = self.db.txn(); - ScannerDb::::set_latest_scannable_block(&mut txn, new_safe_block); - txn.commit(); - - Ok(old_safe_block != new_safe_block) - } -} From cdcfe4a3d792ab6f1376a4aaf908be72919e4ae5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 21 Aug 2024 18:41:51 -0400 Subject: [PATCH 013/179] Add bounds for the eventuality task --- processor/scanner/src/db.rs | 20 +++++++- processor/scanner/src/eventuality.rs | 75 +++++++++++++++++++++++++++- 2 files changed, 93 insertions(+), 2 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index e92435bcf..70e34315f 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -33,6 +33,8 @@ create_db!( NextToCheckForEventualitiesBlock: () -> u64, // The next block to potentially report NextToPotentiallyReportBlock: () -> u64, + // Highest acknowledged block + HighestAcknowledgedBlock: () -> u64, // If a block was notable /* @@ -122,7 +124,9 @@ impl ScannerDb { Self::set_block(txn, start_block, id); LatestFinalizedBlock::set(txn, &start_block); NextToScanForOutputsBlock::set(txn, &start_block); - NextToCheckForEventualitiesBlock::set(txn, &start_block); + // We can receive outputs in this block, but any descending transactions will be in the next + // block. This, with the check on-set, creates a bound that this value in the DB is non-zero. + NextToCheckForEventualitiesBlock::set(txn, &(start_block + 1)); NextToPotentiallyReportBlock::set(txn, &start_block); } @@ -153,6 +157,10 @@ impl ScannerDb { txn: &mut impl DbTxn, next_to_check_for_eventualities_block: u64, ) { + assert!( + next_to_check_for_eventualities_block != 0, + "next to check for eventualities block was 0 when it's bound non-zero" + ); NextToCheckForEventualitiesBlock::set(txn, &next_to_check_for_eventualities_block); } pub(crate) fn next_to_check_for_eventualities_block(getter: &impl Get) -> Option { @@ -169,6 +177,16 @@ impl ScannerDb { NextToPotentiallyReportBlock::get(getter) } + pub(crate) fn set_highest_acknowledged_block( + txn: &mut impl DbTxn, + highest_acknowledged_block: u64, + ) { + HighestAcknowledgedBlock::set(txn, &highest_acknowledged_block); + } + pub(crate) fn highest_acknowledged_block(getter: &impl Get) -> Option { + HighestAcknowledgedBlock::get(getter) + } + pub(crate) fn set_outputs(txn: &mut impl DbTxn, block_number: u64, outputs: Vec>) { if outputs.is_empty() { return; diff --git a/processor/scanner/src/eventuality.rs b/processor/scanner/src/eventuality.rs index 38f1d1124..37892aa87 100644 --- a/processor/scanner/src/eventuality.rs +++ b/processor/scanner/src/eventuality.rs @@ -1,4 +1,9 @@ -// TODO +use serai_db::{Db, DbTxn}; + +use primitives::{Id, ReceivedOutput, Block}; + +// TODO: Localize to EventualityDb? +use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; /* Note: The following assumes there's some value, `CONFIRMATIONS`, and the finalized block we @@ -48,3 +53,71 @@ This forms a backlog only if the latency of scanning, acknowledgement, and intake (including checking Eventualities) exceeds the window duration (the desired property). */ +struct EventualityTask { + db: D, + feed: S, +} + +#[async_trait::async_trait] +impl ContinuallyRan for EventualityTask { + async fn run_iteration(&mut self) -> Result { + /* + The set of Eventualities only increase when a block is acknowledged. Accordingly, we can only + iterate up to (and including) the block currently pending acknowledgement. "including" is + because even if block `b` causes new Eventualities, they'll only potentially resolve in block + `b + 1`. + + We only know blocks will need acknowledgement *for sure* if they were scanned. The only other + causes are key activation and retirement (both scheduled outside the scan window). This makes + the exclusive upper bound the *next block to scan*. + */ + let exclusive_upper_bound = { + // Fetch the next to scan block + let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db) + .expect("EventualityTask run before writing the start block"); + // If we haven't done any work, return + if next_to_scan == 0 { + return Ok(false); + } + next_to_scan + }; + + // Fetch the highest acknowledged block + let highest_acknowledged = ScannerDb::::highest_acknowledged_block(&self.db) + .expect("EventualityTask run before writing the start block"); + + // Fetch the next block to check + let next_to_check = ScannerDb::::next_to_check_for_eventualities_block(&self.db) + .expect("EventualityTask run before writing the start block"); + + // Check all blocks + let mut iterated = false; + for b in next_to_check .. exclusive_upper_bound { + // If the prior block was notable *and* not acknowledged, break + // This is so if it caused any Eventualities (which may resolve this block), we have them + { + // This `- 1` is safe as next to check is bound to be non-zero + // This is possible since even if we receive coins in block 0, any transactions we'd make + // would resolve in block 1 (the first block we'll check under this non-zero rule) + let prior_block = b - 1; + if ScannerDb::::is_block_notable(&self.db, prior_block) && + (prior_block > highest_acknowledged) + { + break; + } + } + + iterated = true; + + todo!("TODO"); + + let mut txn = self.db.txn(); + // Update the next to check block + ScannerDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); + txn.commit(); + } + + // Run dependents if we successfully checked any blocks + Ok(iterated) + } +} From ceb1921f27335c8ecb66d7dcd6cf4cfa17f2d5fb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 22 Aug 2024 01:27:57 -0400 Subject: [PATCH 014/179] Add Eventuality back to processor primitives Also splits crate into modules. --- processor/primitives/src/block.rs | 40 +++++++ processor/primitives/src/eventuality.rs | 31 +++++ processor/primitives/src/lib.rs | 152 ++---------------------- processor/primitives/src/output.rs | 113 ++++++++++++++++++ 4 files changed, 194 insertions(+), 142 deletions(-) create mode 100644 processor/primitives/src/block.rs create mode 100644 processor/primitives/src/eventuality.rs create mode 100644 processor/primitives/src/output.rs diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs new file mode 100644 index 000000000..22f0b9984 --- /dev/null +++ b/processor/primitives/src/block.rs @@ -0,0 +1,40 @@ +use core::fmt::Debug; + +use group::{Group, GroupEncoding}; + +use crate::{Id, ReceivedOutput}; + +/// A block header from an external network. +pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { + /// The type used to identify blocks. + type Id: 'static + Id; + /// The ID of this block. + fn id(&self) -> Self::Id; + /// The ID of the parent block. + fn parent(&self) -> Self::Id; +} + +/// A block from an external network. +/// +/// A block is defined as a consensus event associated with a set of transactions. It is not +/// necessary to literally define it as whatever the external network defines as a block. For +/// external networks which finalize block(s), this block type should be a representation of all +/// transactions within a period finalization (whether block or epoch). +#[async_trait::async_trait] +pub trait Block: Send + Sync + Sized + Clone + Debug { + /// The type used for this block's header. + type Header: BlockHeader; + + /// The type used to represent keys on this external network. + type Key: Group + GroupEncoding; + /// The type used to represent addresses on this external network. + type Address; + /// The type used to represent received outputs on this external network. + type Output: ReceivedOutput; + + /// The ID of this block. + fn id(&self) -> ::Id; + + /// Scan all outputs within this block to find the outputs spendable by this key. + fn scan_for_outputs(&self, key: Self::Key) -> Vec; +} diff --git a/processor/primitives/src/eventuality.rs b/processor/primitives/src/eventuality.rs new file mode 100644 index 000000000..6e16637df --- /dev/null +++ b/processor/primitives/src/eventuality.rs @@ -0,0 +1,31 @@ +use std::collections::HashMap; +use std::io; + +/// A description of a transaction which will eventually happen. +pub trait Eventuality: Sized + Send + Sync { + /// A unique byte sequence which can be used to identify potentially resolving transactions. + /// + /// Both a transaction and an Eventuality are expected to be able to yield lookup sequences. + /// Lookup sequences MUST be unique to the Eventuality and identical to any transaction's which + /// satisfies this Eventuality. Transactions which don't satisfy this Eventuality MAY also have + /// an identical lookup sequence. + /// + /// This is used to find the Eventuality a transaction MAY resolve so we don't have to check all + /// transactions against all Eventualities. Once the potential resolved Eventuality is + /// identified, the full check is performed. + fn lookup(&self) -> Vec; + + /// Read an Eventuality. + fn read(reader: &mut R) -> io::Result; + /// Serialize an Eventuality to a `Vec`. + fn serialize(&self) -> Vec; +} + +/// A tracker of unresolved Eventualities. +#[derive(Debug)] +pub struct EventualityTracker { + /// The active Eventualities. + /// + /// These are keyed by their lookups. + pub active_eventualities: HashMap, E>, +} diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs index 744aae470..dc64facf4 100644 --- a/processor/primitives/src/lib.rs +++ b/processor/primitives/src/lib.rs @@ -3,15 +3,21 @@ #![deny(missing_docs)] use core::fmt::Debug; -use std::io; -use group::{Group, GroupEncoding}; - -use serai_primitives::Balance; +use group::GroupEncoding; use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; +mod output; +pub use output::*; + +mod eventuality; +pub use eventuality::*; + +mod block; +pub use block::*; + /// An ID for an output/transaction/block/etc. /// /// IDs don't need to implement `Copy`, enabling `[u8; 33]`, `[u8; 64]` to be used. IDs are still @@ -51,141 +57,3 @@ impl BorshDeserialize for BorshG { )) } } - -/// The type of the output. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum OutputType { - /// An output received to the address external payments use. - /// - /// This is reported to Substrate in a `Batch`. - External, - - /// A branch output. - /// - /// Given a known output set, and a known series of outbound transactions, we should be able to - /// form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs - /// in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, - /// say S[1], build off S[0], we need to observe when S[0] is included on-chain. - /// - /// We cannot. - /// - /// Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to - /// create S[0], and the actual payment info behind it, we cannot observe it on the blockchain - /// unless we participated in creating it. Locking the entire schedule, when we cannot sign for - /// the entire schedule at once, to a single signing set isn't feasible. - /// - /// While any member of the active signing set can provide data enabling other signers to - /// participate, it's several KB of data which we then have to code communication for. - /// The other option is to simply not observe S[0]. Instead, observe a TX with an identical - /// output to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a - /// malicious actor, has sent us a forged TX which is... equally as usable? So who cares? - /// - /// The only issue is if we have multiple outputs on-chain with identical amounts and purposes. - /// Accordingly, when the scheduler makes a plan for when a specific output is available, it - /// shouldn't set that plan. It should *push* that plan to a queue of plans to perform when - /// instances of that output occur. - Branch, - - /// A change output. - /// - /// This should be added to the available UTXO pool with no further action taken. It does not - /// need to be reported (though we do still need synchrony on the block it's in). There's no - /// explicit expectation for the usage of this output at time of recipience. - Change, - - /// A forwarded output from the prior multisig. - /// - /// This is distinguished for technical reasons around detecting when a multisig should be - /// retired. - Forwarded, -} - -impl OutputType { - fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&[match self { - OutputType::External => 0, - OutputType::Branch => 1, - OutputType::Change => 2, - OutputType::Forwarded => 3, - }]) - } - - fn read(reader: &mut R) -> io::Result { - let mut byte = [0; 1]; - reader.read_exact(&mut byte)?; - Ok(match byte[0] { - 0 => OutputType::External, - 1 => OutputType::Branch, - 2 => OutputType::Change, - 3 => OutputType::Forwarded, - _ => Err(io::Error::other("invalid OutputType"))?, - }) - } -} - -/// A received output. -pub trait ReceivedOutput: - Send + Sync + Sized + Clone + PartialEq + Eq + Debug -{ - /// The type used to identify this output. - type Id: 'static + Id; - - /// The type of this output. - fn kind(&self) -> OutputType; - - /// The ID of this output. - fn id(&self) -> Self::Id; - /// The key this output was received by. - fn key(&self) -> K; - - /// The presumed origin for this output. - /// - /// This is used as the address to refund coins to if we can't handle the output as desired - /// (unless overridden). - fn presumed_origin(&self) -> Option; - - /// The balance associated with this output. - fn balance(&self) -> Balance; - /// The arbitrary data (presumably an InInstruction) associated with this output. - fn data(&self) -> &[u8]; - - /// Write this output. - fn write(&self, writer: &mut W) -> io::Result<()>; - /// Read an output. - fn read(reader: &mut R) -> io::Result; -} - -/// A block header from an external network. -pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { - /// The type used to identify blocks. - type Id: 'static + Id; - /// The ID of this block. - fn id(&self) -> Self::Id; - /// The ID of the parent block. - fn parent(&self) -> Self::Id; -} - -/// A block from an external network. -/// -/// A block is defined as a consensus event associated with a set of transactions. It is not -/// necessary to literally define it as whatever the external network defines as a block. For -/// external networks which finalize block(s), this block type should be a representation of all -/// transactions within a period finalization (whether block or epoch). -#[async_trait::async_trait] -pub trait Block: Send + Sync + Sized + Clone + Debug { - /// The type used for this block's header. - type Header: BlockHeader; - - /// The type used to represent keys on this external network. - type Key: Group + GroupEncoding; - /// The type used to represent addresses on this external network. - type Address; - /// The type used to represent received outputs on this external network. - type Output: ReceivedOutput; - - /// The ID of this block. - fn id(&self) -> ::Id; - - /// Scan all outputs within this block to find the outputs spendable by this key. - fn scan_for_outputs(&self, key: Self::Key) -> Vec; -} diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs new file mode 100644 index 000000000..1dd186aac --- /dev/null +++ b/processor/primitives/src/output.rs @@ -0,0 +1,113 @@ +use core::fmt::Debug; +use std::io; + +use group::GroupEncoding; + +use serai_primitives::Balance; + +use crate::Id; + +/// The type of the output. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum OutputType { + /// An output received to the address external payments use. + /// + /// This is reported to Substrate in a `Batch`. + External, + + /// A branch output. + /// + /// Given a known output set, and a known series of outbound transactions, we should be able to + /// form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs + /// in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, + /// say S[1], build off S[0], we need to observe when S[0] is included on-chain. + /// + /// We cannot. + /// + /// Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to + /// create S[0], and the actual payment info behind it, we cannot observe it on the blockchain + /// unless we participated in creating it. Locking the entire schedule, when we cannot sign for + /// the entire schedule at once, to a single signing set isn't feasible. + /// + /// While any member of the active signing set can provide data enabling other signers to + /// participate, it's several KB of data which we then have to code communication for. + /// The other option is to simply not observe S[0]. Instead, observe a TX with an identical + /// output to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a + /// malicious actor, has sent us a forged TX which is... equally as usable? So who cares? + /// + /// The only issue is if we have multiple outputs on-chain with identical amounts and purposes. + /// Accordingly, when the scheduler makes a plan for when a specific output is available, it + /// shouldn't set that plan. It should *push* that plan to a queue of plans to perform when + /// instances of that output occur. + Branch, + + /// A change output. + /// + /// This should be added to the available UTXO pool with no further action taken. It does not + /// need to be reported (though we do still need synchrony on the block it's in). There's no + /// explicit expectation for the usage of this output at time of recipience. + Change, + + /// A forwarded output from the prior multisig. + /// + /// This is distinguished for technical reasons around detecting when a multisig should be + /// retired. + Forwarded, +} + +impl OutputType { + /// Write the OutputType. + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&[match self { + OutputType::External => 0, + OutputType::Branch => 1, + OutputType::Change => 2, + OutputType::Forwarded => 3, + }]) + } + + /// Read an OutputType. + pub fn read(reader: &mut R) -> io::Result { + let mut byte = [0; 1]; + reader.read_exact(&mut byte)?; + Ok(match byte[0] { + 0 => OutputType::External, + 1 => OutputType::Branch, + 2 => OutputType::Change, + 3 => OutputType::Forwarded, + _ => Err(io::Error::other("invalid OutputType"))?, + }) + } +} + +/// A received output. +pub trait ReceivedOutput: + Send + Sync + Sized + Clone + PartialEq + Eq + Debug +{ + /// The type used to identify this output. + type Id: 'static + Id; + + /// The type of this output. + fn kind(&self) -> OutputType; + + /// The ID of this output. + fn id(&self) -> Self::Id; + /// The key this output was received by. + fn key(&self) -> K; + + /// The presumed origin for this output. + /// + /// This is used as the address to refund coins to if we can't handle the output as desired + /// (unless overridden). + fn presumed_origin(&self) -> Option; + + /// The balance associated with this output. + fn balance(&self) -> Balance; + /// The arbitrary data (presumably an InInstruction) associated with this output. + fn data(&self) -> &[u8]; + + /// Write this output. + fn write(&self, writer: &mut W) -> io::Result<()>; + /// Read an output. + fn read(reader: &mut R) -> io::Result; +} From 9566fd31baacaae4059a0c01be1dd76a5e4a609b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 23 Aug 2024 20:30:06 -0400 Subject: [PATCH 015/179] Decide flow between scan/eventuality/report Scan now only handles External outputs, with an associated essay going over why. Scan directly creates the InInstruction (prior planned to be done in Report), and Eventuality is declared to end up yielding the outputs. That will require making the Eventuality flow two-stage. One stage to evaluate existing Eventualities and yield outputs, and one stage to incorporate new Eventualities before advancing the scan window. --- processor/scanner/src/db.rs | 100 +++++++++++------ processor/scanner/src/eventuality.rs | 2 + processor/scanner/src/lib.rs | 155 ++++++++------------------- processor/scanner/src/lifetime.rs | 96 +++++++++++++++++ processor/scanner/src/report.rs | 14 +++ processor/scanner/src/scan.rs | 148 +++++++++++++++++++++---- 6 files changed, 349 insertions(+), 166 deletions(-) create mode 100644 processor/scanner/src/lifetime.rs diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 70e34315f..2e462712d 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -5,25 +5,44 @@ use serai_db::{Get, DbTxn, create_db}; use primitives::{Id, ReceivedOutput, Block, BorshG}; -use crate::{ScannerFeed, BlockIdFor, KeyFor, OutputFor}; +use crate::{lifetime::LifetimeStage, ScannerFeed, BlockIdFor, KeyFor, OutputFor}; // The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. trait Borshy: BorshSerialize + BorshDeserialize {} impl Borshy for T {} #[derive(BorshSerialize, BorshDeserialize)] -pub(crate) struct SeraiKey { - pub(crate) activation_block_number: u64, - pub(crate) retirement_block_number: Option, +struct SeraiKeyDbEntry { + activation_block_number: u64, + key: K, +} + +pub(crate) struct SeraiKey { + pub(crate) stage: LifetimeStage, pub(crate) key: K, } +pub(crate) struct OutputWithInInstruction> { + output: O, + refund_address: A, + in_instruction: InInstructionWithBalance, +} + +impl> OutputWithInInstruction { + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.output.write(writer)?; + // TODO self.refund_address.write(writer)?; + self.in_instruction.encode_to(writer); + Ok(()) + } +} + create_db!( Scanner { BlockId: (number: u64) -> I, BlockNumber: (id: I) -> u64, - ActiveKeys: () -> Vec>, + ActiveKeys: () -> Vec>, // The latest finalized block to appear of a blockchain LatestFinalizedBlock: () -> u64, @@ -80,48 +99,60 @@ impl ScannerDb { NotableBlock::set(txn, activation_block_number, &()); // Push the key - let mut keys: Vec>>> = ActiveKeys::get(txn).unwrap_or(vec![]); + let mut keys: Vec>>> = ActiveKeys::get(txn).unwrap_or(vec![]); for key_i in &keys { if key == key_i.key.0 { panic!("queueing a key prior queued"); } } - keys.push(SeraiKey { - activation_block_number, - retirement_block_number: None, - key: BorshG(key), - }); + keys.push(SeraiKeyDbEntry { activation_block_number, key: BorshG(key) }); ActiveKeys::set(txn, &keys); } - // retirement_block_number is inclusive, so the key will no longer be scanned for as of the - // specified block - pub(crate) fn retire_key(txn: &mut impl DbTxn, retirement_block_number: u64, key: KeyFor) { - let mut keys: Vec>>> = + // TODO: This will be called from the Eventuality task yet this field is read by the scan task + // We need to write the argument for its safety + pub(crate) fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { + let mut keys: Vec>>> = ActiveKeys::get(txn).expect("retiring key yet no active keys"); assert!(keys.len() > 1, "retiring our only key"); - for i in 0 .. keys.len() { - if key == keys[i].key.0 { - keys[i].retirement_block_number = Some(retirement_block_number); - ActiveKeys::set(txn, &keys); - return; + assert_eq!(keys[0].key.0, key, "not retiring the oldest key"); + keys.remove(0); + ActiveKeys::set(txn, &keys); + } + pub(crate) fn active_keys_as_of_next_to_scan_for_outputs_block( + getter: &impl Get, + ) -> Option>>> { + // We don't take this as an argument as we don't keep all historical keys in memory + // If we've scanned block 1,000,000, we can't answer the active keys as of block 0 + let block_number = Self::next_to_scan_for_outputs_block(getter)?; + + let raw_keys: Vec>>> = ActiveKeys::get(getter)?; + let mut keys = Vec::with_capacity(2); + for i in 0 .. raw_keys.len() { + if block_number < raw_keys[i].activation_block_number { + continue; } - - // This is not the key in question, but since it's older, it already should've been queued - // for retirement - assert!( - keys[i].retirement_block_number.is_some(), - "older key wasn't retired before newer key" - ); + keys.push(SeraiKey { + key: raw_keys[i].key.0, + stage: LifetimeStage::calculate::( + block_number, + raw_keys[i].activation_block_number, + raw_keys.get(i + 1).map(|key| key.activation_block_number), + ), + }); } - panic!("retiring key yet not present in keys") - } - pub(crate) fn keys(getter: &impl Get) -> Option>>>> { - ActiveKeys::get(getter) + assert!(keys.len() <= 2); + Some(keys) } pub(crate) fn set_start_block(txn: &mut impl DbTxn, start_block: u64, id: BlockIdFor) { + assert!( + LatestFinalizedBlock::get(txn).is_none(), + "setting start block but prior set start block" + ); + Self::set_block(txn, start_block, id); + LatestFinalizedBlock::set(txn, &start_block); NextToScanForOutputsBlock::set(txn, &start_block); // We can receive outputs in this block, but any descending transactions will be in the next @@ -138,9 +169,10 @@ impl ScannerDb { } pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option { - // This is whatever block we've checked the Eventualities of, plus the window length + // We can only scan up to whatever block we've checked the Eventualities of, plus the window + // length. Since this returns an inclusive bound, we need to subtract 1 // See `eventuality.rs` for more info - NextToCheckForEventualitiesBlock::get(getter).map(|b| b + S::WINDOW_LENGTH) + NextToCheckForEventualitiesBlock::get(getter).map(|b| b + S::WINDOW_LENGTH - 1) } pub(crate) fn set_next_to_scan_for_outputs_block( @@ -187,7 +219,7 @@ impl ScannerDb { HighestAcknowledgedBlock::get(getter) } - pub(crate) fn set_outputs(txn: &mut impl DbTxn, block_number: u64, outputs: Vec>) { + pub(crate) fn set_in_instructions(txn: &mut impl DbTxn, block_number: u64, outputs: Vec, AddressFor, OutputFor>>) { if outputs.is_empty() { return; } diff --git a/processor/scanner/src/eventuality.rs b/processor/scanner/src/eventuality.rs index 37892aa87..cb91ca42c 100644 --- a/processor/scanner/src/eventuality.rs +++ b/processor/scanner/src/eventuality.rs @@ -109,6 +109,8 @@ impl ContinuallyRan for EventualityTask { iterated = true; + // TODO: Not only check/clear eventualities, if this eventuality forwarded an output, queue + // it to be reported in however many blocks todo!("TODO"); let mut txn = self.db.txn(); diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 5f51e7d09..04366dfff 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -5,10 +5,18 @@ use tokio::sync::mpsc; use serai_primitives::{Coin, Amount}; use primitives::{ReceivedOutput, BlockHeader, Block}; +// Logic for deciding where in its lifetime a multisig is. +mod lifetime; + +// Database schema definition and associated functions. mod db; +// Task to index the blockchain, ensuring we don't reorganize finalized blocks. mod index; +// Scans blocks for received coins. mod scan; +/// Check blocks for transactions expected to eventually occur. mod eventuality; +/// Task which reports `Batch`s to Substrate. mod report; /// A feed usable to scan a blockchain. @@ -16,12 +24,22 @@ mod report; /// This defines the primitive types used, along with various getters necessary for indexing. #[async_trait::async_trait] pub trait ScannerFeed: Send + Sync { + /// The amount of confirmations a block must have to be considered finalized. + /// + /// This value must be at least `1`. + const CONFIRMATIONS: u64; + /// The amount of blocks to process in parallel. /// /// This value must be at least `1`. This value should be the worst-case latency to handle a /// block divided by the expected block time. const WINDOW_LENGTH: u64; + /// The amount of blocks which will occur in 10 minutes (approximate). + /// + /// This value must be at least `1`. + const TEN_MINUTES: u64; + /// The representation of a block for this blockchain. /// /// A block is defined as a consensus event associated with a set of transactions. It is not @@ -152,6 +170,32 @@ pub(crate) trait ContinuallyRan: Sized { } } +/// A representation of a scanner. +pub struct Scanner; +impl Scanner { + /// Create a new scanner. + /// + /// This will begin its execution, spawning several asynchronous tasks. + // TODO: Take start_time and binary search here? + pub fn new(start_block: u64) -> Self { + todo!("TODO") + } + + /// Acknowledge a block. + /// + /// This means this block was ordered on Serai in relation to `Burn` events, and all validators + /// have achieved synchrony on it. + pub fn acknowledge_block( + &mut self, + block_number: u64, + key_to_activate: Option<()>, + forwarded_outputs: Vec<()>, + eventualities_created: Vec<()>, + ) { + todo!("TODO") + } +} + /* #[derive(Clone, Debug)] pub enum ScannerEvent { @@ -172,8 +216,6 @@ pub enum ScannerEvent { ), } -pub type ScannerEventChannel = mpsc::UnboundedReceiver>; - #[derive(Clone, Debug)] struct ScannerDb(PhantomData, PhantomData); impl ScannerDb { @@ -184,38 +226,6 @@ impl ScannerDb { getter.get(Self::seen_key(id)).is_some() } - fn outputs_key(block: &>::Id) -> Vec { - Self::scanner_key(b"outputs", block.as_ref()) - } - fn save_outputs( - txn: &mut D::Transaction<'_>, - block: &>::Id, - outputs: &[N::Output], - ) { - let mut bytes = Vec::with_capacity(outputs.len() * 64); - for output in outputs { - output.write(&mut bytes).unwrap(); - } - txn.put(Self::outputs_key(block), bytes); - } - fn outputs( - txn: &D::Transaction<'_>, - block: &>::Id, - ) -> Option> { - let bytes_vec = txn.get(Self::outputs_key(block))?; - let mut bytes: &[u8] = bytes_vec.as_ref(); - - let mut res = vec![]; - while !bytes.is_empty() { - res.push(N::Output::read(&mut bytes).unwrap()); - } - Some(res) - } - - fn scanned_block_key() -> Vec { - Self::scanner_key(b"scanned_block", []) - } - fn save_scanned_block(txn: &mut D::Transaction<'_>, block: usize) -> Vec { let id = Self::block(txn, block); // It may be None for the first key rotated to let outputs = @@ -255,36 +265,6 @@ impl ScannerDb { } impl ScannerHandle { - /// Register a key to scan for. - pub async fn register_key( - &mut self, - txn: &mut D::Transaction<'_>, - activation_number: usize, - key: ::G, - ) { - info!("Registering key {} in scanner at {activation_number}", hex::encode(key.to_bytes())); - - let mut scanner_lock = self.scanner.write().await; - let scanner = scanner_lock.as_mut().unwrap(); - assert!( - activation_number > scanner.ram_scanned.unwrap_or(0), - "activation block of new keys was already scanned", - ); - - if scanner.keys.is_empty() { - assert!(scanner.ram_scanned.is_none()); - scanner.ram_scanned = Some(activation_number); - assert!(ScannerDb::::save_scanned_block(txn, activation_number).is_empty()); - } - - ScannerDb::::register_key(txn, activation_number, key); - scanner.keys.push((activation_number, key)); - #[cfg(not(test))] // TODO: A test violates this. Improve the test with a better flow - assert!(scanner.keys.len() <= 2); - - scanner.eventualities.insert(key.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); - } - /// Acknowledge having handled a block. /// /// Creates a lock over the Scanner, preventing its independent scanning operations until @@ -375,53 +355,6 @@ impl Scanner { mut multisig_completed: mpsc::UnboundedReceiver, ) { loop { - let (ram_scanned, latest_block_to_scan) = { - // Sleep 5 seconds to prevent hammering the node/scanner lock - sleep(Duration::from_secs(5)).await; - - let ram_scanned = { - let scanner_lock = scanner_hold.read().await; - let scanner = scanner_lock.as_ref().unwrap(); - - // If we're not scanning for keys yet, wait until we are - if scanner.keys.is_empty() { - continue; - } - - let ram_scanned = scanner.ram_scanned.unwrap(); - // If a Batch has taken too long to be published, start waiting until it is before - // continuing scanning - // Solves a race condition around multisig rotation, documented in the relevant doc - // and demonstrated with mini - if let Some(needing_ack) = scanner.need_ack.front() { - let next = ram_scanned + 1; - let limit = needing_ack + N::CONFIRMATIONS; - assert!(next <= limit); - if next == limit { - continue; - } - }; - - ram_scanned - }; - - ( - ram_scanned, - loop { - break match network.get_latest_block_number().await { - // Only scan confirmed blocks, which we consider effectively finalized - // CONFIRMATIONS - 1 as whatever's in the latest block already has 1 confirm - Ok(latest) => latest.saturating_sub(N::CONFIRMATIONS.saturating_sub(1)), - Err(_) => { - warn!("couldn't get latest block number"); - sleep(Duration::from_secs(60)).await; - continue; - } - }; - }, - ) - }; - for block_being_scanned in (ram_scanned + 1) ..= latest_block_to_scan { // Redo the checks for if we're too far ahead { diff --git a/processor/scanner/src/lifetime.rs b/processor/scanner/src/lifetime.rs new file mode 100644 index 000000000..62ee91c3e --- /dev/null +++ b/processor/scanner/src/lifetime.rs @@ -0,0 +1,96 @@ +use crate::ScannerFeed; + +/// An enum representing the stage of a multisig within its lifetime. +/// +/// This corresponds to `spec/processor/Multisig Rotation.md`, which details steps 1-8 of the +/// rotation process. Steps 7-8 regard a multisig which isn't retiring yet retired, and +/// accordingly, no longer exists, so they are not modelled here (as this only models active +/// multisigs. Inactive multisigs aren't represented in the first place). +pub(crate) enum LifetimeStage { + /// A new multisig, once active, shouldn't actually start receiving coins until several blocks + /// later. If any UI is premature in sending to this multisig, we delay to report the outputs to + /// prevent some DoS concerns. + /// + /// This represents steps 1-3 for a new multisig. + ActiveYetNotReporting, + /// Active with all outputs being reported on-chain. + /// + /// This represents step 4 onwards for a new multisig. + Active, + /// Retiring with all outputs being reported on-chain. + /// + /// This represents step 4 for a retiring multisig. + UsingNewForChange, + /// Retiring with outputs being forwarded, reported on-chain once forwarded. + /// + /// This represents step 5 for a retiring multisig. + Forwarding, + /// Retiring with only existing obligations being handled. + /// + /// This represents step 6 for a retiring multisig. + /// + /// Steps 7 and 8 are represented by the retiring multisig no longer existing, and these states + /// are only for multisigs which actively exist. + Finishing, +} + +impl LifetimeStage { + /// Get the stage of its lifetime this multisig is in based on when the next multisig's key + /// activates. + /// + /// Panics if the multisig being calculated for isn't actually active and a variety of other + /// insane cases. + pub(crate) fn calculate( + block_number: u64, + activation_block_number: u64, + next_keys_activation_block_number: Option, + ) -> Self { + assert!( + activation_block_number >= block_number, + "calculating lifetime stage for an inactive multisig" + ); + // This is exclusive, not inclusive, since we want a CONFIRMATIONS + 10 minutes window and the + // activation block itself is the first block within this window + let active_yet_not_reporting_end_block = + activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; + if block_number < active_yet_not_reporting_end_block { + return LifetimeStage::ActiveYetNotReporting; + } + + let Some(next_keys_activation_block_number) = next_keys_activation_block_number else { + // If there is no next multisig, this is the active multisig + return LifetimeStage::Active; + }; + + assert!( + next_keys_activation_block_number > active_yet_not_reporting_end_block, + "next set of keys activated before this multisig activated" + ); + + // If the new multisig is still having its activation block finalized on-chain, this multisig + // is still active (step 3) + let new_active_yet_not_reporting_end_block = + next_keys_activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; + if block_number < new_active_yet_not_reporting_end_block { + return LifetimeStage::Active; + } + + // Step 4 details a further CONFIRMATIONS + let new_active_and_used_for_change_end_block = + new_active_yet_not_reporting_end_block + S::CONFIRMATIONS; + if block_number < new_active_and_used_for_change_end_block { + return LifetimeStage::UsingNewForChange; + } + + // Step 5 details a further 6 hours + // 6 hours = 6 * 60 minutes = 6 * 6 * 10 minutes + let new_active_and_forwarded_to_end_block = + new_active_and_used_for_change_end_block + (6 * 6 * S::TEN_MINUTES); + if block_number < new_active_and_forwarded_to_end_block { + return LifetimeStage::Forwarding; + } + + // Step 6 + LifetimeStage::Finishing + } +} diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index 34a596175..b22208950 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -40,6 +40,20 @@ impl ContinuallyRan for ReportTask { for b in next_to_potentially_report ..= highest_reportable { if ScannerDb::::is_block_notable(&self.db, b) { + let outputs = todo!("TODO"); + let in_instructions_to_report = vec![]; + for output in outputs { + match output.kind() { + // These do get reported since the scanner eliminates any which shouldn't be reported + OutputType::External => todo!("TODO"), + // These do not get reported in Batches + OutputType::Branch | OutputType::Change => {} + // These now get reported if they're legitimately forwarded + OutputType::Forwarded => { + todo!("TODO") + } + } + } todo!("TODO: Make Batches, which requires handling Forwarded within this crate"); } diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 6058c7dac..137f708a4 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -5,6 +5,51 @@ use primitives::{Id, ReceivedOutput, Block}; // TODO: Localize to ScanDb? use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; +// Construct an InInstruction from an external output. +// +// Also returns the address to refund the coins to upon error. +fn in_instruction_from_output( + output: &impl ReceivedOutput, +) -> (Option, Option) { + assert_eq!(output.kind(), OutputType::External); + + let presumed_origin = output.presumed_origin(); + + let mut data = output.data(); + let max_data_len = usize::try_from(MAX_DATA_LEN).unwrap(); + if data.len() > max_data_len { + error!( + "data in output {} exceeded MAX_DATA_LEN ({MAX_DATA_LEN}): {}. skipping", + hex::encode(output.id()), + data.len(), + ); + return (presumed_origin, None); + } + + let shorthand = match Shorthand::decode(&mut data) { + Ok(shorthand) => shorthand, + Err(e) => { + info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); + return (presumed_origin, None); + } + }; + let instruction = match RefundableInInstruction::try_from(shorthand) { + Ok(instruction) => instruction, + Err(e) => { + info!( + "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", + hex::encode(output.id()) + ); + return (presumed_origin, None); + } + }; + + ( + instruction.origin.and_then(|addr| A::try_from(addr).ok()).or(presumed_origin), + Some(instruction.instruction), + ) +} + struct ScanForOutputsTask { db: D, feed: S, @@ -42,29 +87,79 @@ impl ContinuallyRan for ScanForOutputsTask { log::info!("scanning block: {} ({b})", hex::encode(block.id())); - let mut keys = - ScannerDb::::keys(&self.db).expect("scanning for a blockchain without any keys set"); - // Remove all the retired keys - while let Some(retire_at) = keys[0].retirement_block_number { - if retire_at <= b { - keys.remove(0); - } - } - assert!(keys.len() <= 2); + assert_eq!(ScannerDb::::next_to_scan_for_outputs_block(&self.db).unwrap(), b); + let mut keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) + .expect("scanning for a blockchain without any keys set"); - let mut outputs = vec![]; + let mut in_instructions = vec![]; // Scan for each key for key in keys { - // If this key has yet to active, skip it - if key.activation_block_number > b { - continue; - } + for output in block.scan_for_outputs(key.key) { + assert_eq!(output.key(), key.key); + + /* + The scan task runs ahead of time, obtaining ordering on the external network's blocks + with relation to events on the Serai network. This is done via publishing a Batch which + contains the InInstructions from External outputs. Accordingly, the scan process only + has to yield External outputs. - for output in block.scan_for_outputs(key.key.0) { - assert_eq!(output.key(), key.key.0); + It'd appear to make sense to scan for all outputs, and after scanning for all outputs, + yield all outputs. The issue is we can't identify outputs we created here. We can only + identify the outputs we receive and their *declared intention*. + + We only want to handle Change/Branch/Forwarded outputs we made ourselves. For + Forwarded, the reasoning is obvious (retiring multisigs should only downsize, yet + accepting new outputs solely because they claim to be Forwarded would increase the size + of the multisig). For Change/Branch, it's because such outputs which aren't ours are + pointless. They wouldn't hurt to accumulate though. + + The issue is they would hurt to accumulate. We want to filter outputs which are less + than their cost to aggregate, a variable itself variable to the current blockchain. We + can filter such outputs here, yet if we drop a Change output, we create an insolvency. + We'd need to track the loss and offset it later. That means we can't filter such + outputs, as we expect any Change output we make. + + The issue is the Change outputs we don't make. Someone can create an output declaring + to be Change, yet not actually Change. If we don't filter it, it'd be queued for + accumulation, yet it may cost more to accumulate than it's worth. + + The solution is to let the Eventuality task, which does know if we made an output or + not (or rather, if a transaction is identical to a transaction which should exist + regarding effects) decide to keep/yield the outputs which we should only keep if we + made them (as Serai itself should not make worthless outputs, so we can assume they're + worthwhile, and even if they're not economically, they are technically). + + The alternative, we drop outputs here with a generic filter rule and then report back + the insolvency created, still doesn't work as we'd only be creating if an insolvency if + the output was actually made by us (and not simply someone else sending in). We can + have the Eventuality task report the insolvency, yet that requires the scanner be + responsible for such filter logic. It's more flexible, and has a cleaner API, + to do so at a higher level. + */ + if output.kind() != OutputType::External { + continue; + } + + // Drop External outputs if they're to a multisig which won't report them + // This means we should report any External output we save to disk here + #[allow(clippy::match_same_arms)] + match key.stage { + // TODO: Delay External outputs + LifetimeStage::ActiveYetNotReporting => todo!("TODO"), + // We should report External outputs in these cases + LifetimeStage::Active | LifetimeStage::UsingNewForChange => {} + // We should report External outputs only once forwarded, where they'll appear as + // OutputType::Forwarded + LifetimeStage::Forwarding => todo!("TODO"), + // We should drop these as we should not be handling new External outputs at this + // time + LifetimeStage::Finishing => { + continue; + } + } // Check this isn't dust - { + let balance_to_use = { let mut balance = output.balance(); // First, subtract 2 * the cost to aggregate, as detailed in // `spec/processor/UTXO Management.md` @@ -79,15 +174,26 @@ impl ContinuallyRan for ScanForOutputsTask { if balance.amount.0 < self.feed.dust(balance.coin).0 { continue; } - } + }; - outputs.push(output); + // Decode and save the InInstruction/refund addr for this output + match in_instruction_from_output::(output) { + (refund_addr, Some(instruction)) => { + let instruction = InInstructionWithBalance { instruction, balance: balance_to_use }; + // TODO: Make a proper struct out of this + in_instructions.push((output.id(), refund_addr, instruction)); + todo!("TODO: Save to be reported") + } + (Some(refund_addr), None) => todo!("TODO: Queue refund"), + // Since we didn't receive an instruction nor can we refund this, accumulate it + (None, None) => {} + } } } let mut txn = self.db.txn(); - // Save the outputs - ScannerDb::::set_outputs(&mut txn, b, outputs); + // Save the in instructions + ScannerDb::::set_in_instructions(&mut txn, b, in_instructions); // Update the next to scan block ScannerDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); txn.commit(); From e8dd4deff009dd6403b1016dad1db10be509cbc7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 23 Aug 2024 21:21:02 -0400 Subject: [PATCH 016/179] Correct compilation errors --- Cargo.lock | 1 + processor/primitives/src/block.rs | 4 +-- processor/primitives/src/output.rs | 7 ++-- processor/scanner/Cargo.toml | 1 + processor/scanner/src/db.rs | 27 ++++++++++----- processor/scanner/src/lib.rs | 16 +++++---- processor/scanner/src/report.rs | 18 ++-------- processor/scanner/src/scan.rs | 53 ++++++++++++++++++++---------- 8 files changed, 77 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e3e6f3782..f887bd8ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8669,6 +8669,7 @@ dependencies = [ "log", "parity-scale-codec", "serai-db", + "serai-in-instructions-primitives", "serai-primitives", "serai-processor-messages", "serai-processor-primitives", diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs index 22f0b9984..1fc92c3a9 100644 --- a/processor/primitives/src/block.rs +++ b/processor/primitives/src/block.rs @@ -2,7 +2,7 @@ use core::fmt::Debug; use group::{Group, GroupEncoding}; -use crate::{Id, ReceivedOutput}; +use crate::{Id, Address, ReceivedOutput}; /// A block header from an external network. pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { @@ -28,7 +28,7 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { /// The type used to represent keys on this external network. type Key: Group + GroupEncoding; /// The type used to represent addresses on this external network. - type Address; + type Address: Address; /// The type used to represent received outputs on this external network. type Output: ReceivedOutput; diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs index 1dd186aac..2b96d2299 100644 --- a/processor/primitives/src/output.rs +++ b/processor/primitives/src/output.rs @@ -3,10 +3,13 @@ use std::io; use group::GroupEncoding; -use serai_primitives::Balance; +use serai_primitives::{ExternalAddress, Balance}; use crate::Id; +/// An address on the external network. +pub trait Address: Send + Sync + TryFrom {} + /// The type of the output. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum OutputType { @@ -81,7 +84,7 @@ impl OutputType { } /// A received output. -pub trait ReceivedOutput: +pub trait ReceivedOutput: Send + Sync + Sized + Clone + PartialEq + Eq + Debug { /// The type used to identify this output. diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index 82de4de1b..a16b55f2c 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -36,6 +36,7 @@ tokio = { version = "1", default-features = false, features = ["rt-multi-thread" serai-db = { path = "../../common/db" } serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } +serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std"] } messages = { package = "serai-processor-messages", path = "../messages" } primitives = { package = "serai-processor-primitives", path = "../primitives" } diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 2e462712d..7eb276ce2 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -1,11 +1,17 @@ use core::marker::PhantomData; +use std::io; +use group::GroupEncoding; + +use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db}; +use serai_in_instructions_primitives::InInstructionWithBalance; + use primitives::{Id, ReceivedOutput, Block, BorshG}; -use crate::{lifetime::LifetimeStage, ScannerFeed, BlockIdFor, KeyFor, OutputFor}; +use crate::{lifetime::LifetimeStage, ScannerFeed, BlockIdFor, KeyFor, AddressFor, OutputFor}; // The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. trait Borshy: BorshSerialize + BorshDeserialize {} @@ -22,16 +28,16 @@ pub(crate) struct SeraiKey { pub(crate) key: K, } -pub(crate) struct OutputWithInInstruction> { - output: O, - refund_address: A, - in_instruction: InInstructionWithBalance, +pub(crate) struct OutputWithInInstruction { + pub(crate) output: OutputFor, + pub(crate) return_address: Option>, + pub(crate) in_instruction: InInstructionWithBalance, } -impl> OutputWithInInstruction { +impl OutputWithInInstruction { fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { self.output.write(writer)?; - // TODO self.refund_address.write(writer)?; + // TODO self.return_address.write(writer)?; self.in_instruction.encode_to(writer); Ok(()) } @@ -172,6 +178,7 @@ impl ScannerDb { // We can only scan up to whatever block we've checked the Eventualities of, plus the window // length. Since this returns an inclusive bound, we need to subtract 1 // See `eventuality.rs` for more info + // TODO: Adjust based on register eventualities NextToCheckForEventualitiesBlock::get(getter).map(|b| b + S::WINDOW_LENGTH - 1) } @@ -219,7 +226,11 @@ impl ScannerDb { HighestAcknowledgedBlock::get(getter) } - pub(crate) fn set_in_instructions(txn: &mut impl DbTxn, block_number: u64, outputs: Vec, AddressFor, OutputFor>>) { + pub(crate) fn set_in_instructions( + txn: &mut impl DbTxn, + block_number: u64, + outputs: Vec>, + ) { if outputs.is_empty() { return; } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 04366dfff..7bd8cc2ec 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -1,4 +1,4 @@ -use core::{fmt::Debug, time::Duration}; +use core::{marker::PhantomData, fmt::Debug, time::Duration}; use tokio::sync::mpsc; @@ -86,6 +86,7 @@ pub trait ScannerFeed: Send + Sync { type BlockIdFor = <<::Block as Block>::Header as BlockHeader>::Id; type KeyFor = <::Block as Block>::Key; +type AddressFor = <::Block as Block>::Address; type OutputFor = <::Block as Block>::Output; /// A handle to immediately run an iteration of a task. @@ -171,8 +172,8 @@ pub(crate) trait ContinuallyRan: Sized { } /// A representation of a scanner. -pub struct Scanner; -impl Scanner { +pub struct Scanner(PhantomData); +impl Scanner { /// Create a new scanner. /// /// This will begin its execution, spawning several asynchronous tasks. @@ -189,9 +190,12 @@ impl Scanner { &mut self, block_number: u64, key_to_activate: Option<()>, - forwarded_outputs: Vec<()>, - eventualities_created: Vec<()>, - ) { + ) -> Vec> { + todo!("TODO") + } + + /// Register the Eventualities caused by a block. + pub fn register_eventualities(&mut self, block_number: u64, eventualities: Vec<()>) { todo!("TODO") } } diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index b22208950..3c22556c2 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -5,7 +5,7 @@ use serai_db::{Db, DbTxn}; -use primitives::{Id, Block}; +use primitives::{Id, OutputType, Block}; // TODO: Localize to ReportDb? use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; @@ -40,20 +40,8 @@ impl ContinuallyRan for ReportTask { for b in next_to_potentially_report ..= highest_reportable { if ScannerDb::::is_block_notable(&self.db, b) { - let outputs = todo!("TODO"); - let in_instructions_to_report = vec![]; - for output in outputs { - match output.kind() { - // These do get reported since the scanner eliminates any which shouldn't be reported - OutputType::External => todo!("TODO"), - // These do not get reported in Batches - OutputType::Branch | OutputType::Change => {} - // These now get reported if they're legitimately forwarded - OutputType::Forwarded => { - todo!("TODO") - } - } - } + let in_instructions = todo!("TODO"); + // TODO: Also pull the InInstructions from forwarding todo!("TODO: Make Batches, which requires handling Forwarded within this crate"); } diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 137f708a4..133325866 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -1,16 +1,28 @@ +use group::GroupEncoding; + +use scale::{Encode, Decode}; use serai_db::{Db, DbTxn}; -use primitives::{Id, ReceivedOutput, Block}; +use serai_primitives::{MAX_DATA_LEN, ExternalAddress}; +use serai_in_instructions_primitives::{ + Shorthand, RefundableInInstruction, InInstruction, InInstructionWithBalance, +}; + +use primitives::{Id, OutputType, ReceivedOutput, Block}; // TODO: Localize to ScanDb? -use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; +use crate::{ + lifetime::LifetimeStage, + db::{OutputWithInInstruction, ScannerDb}, + ScannerFeed, AddressFor, OutputFor, ContinuallyRan, +}; // Construct an InInstruction from an external output. // -// Also returns the address to refund the coins to upon error. -fn in_instruction_from_output( - output: &impl ReceivedOutput, -) -> (Option, Option) { +// Also returns the address to return the coins to upon error. +fn in_instruction_from_output( + output: &OutputFor, +) -> (Option>, Option) { assert_eq!(output.kind(), OutputType::External); let presumed_origin = output.presumed_origin(); @@ -18,7 +30,7 @@ fn in_instruction_from_output( let mut data = output.data(); let max_data_len = usize::try_from(MAX_DATA_LEN).unwrap(); if data.len() > max_data_len { - error!( + log::info!( "data in output {} exceeded MAX_DATA_LEN ({MAX_DATA_LEN}): {}. skipping", hex::encode(output.id()), data.len(), @@ -29,14 +41,14 @@ fn in_instruction_from_output( let shorthand = match Shorthand::decode(&mut data) { Ok(shorthand) => shorthand, Err(e) => { - info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); + log::info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); return (presumed_origin, None); } }; let instruction = match RefundableInInstruction::try_from(shorthand) { Ok(instruction) => instruction, Err(e) => { - info!( + log::info!( "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", hex::encode(output.id()) ); @@ -45,7 +57,7 @@ fn in_instruction_from_output( }; ( - instruction.origin.and_then(|addr| A::try_from(addr).ok()).or(presumed_origin), + instruction.origin.and_then(|addr| AddressFor::::try_from(addr).ok()).or(presumed_origin), Some(instruction.instruction), ) } @@ -174,18 +186,25 @@ impl ContinuallyRan for ScanForOutputsTask { if balance.amount.0 < self.feed.dust(balance.coin).0 { continue; } + + balance }; - // Decode and save the InInstruction/refund addr for this output - match in_instruction_from_output::(output) { - (refund_addr, Some(instruction)) => { - let instruction = InInstructionWithBalance { instruction, balance: balance_to_use }; + // Decode and save the InInstruction/return addr for this output + match in_instruction_from_output::(&output) { + (return_address, Some(instruction)) => { + let in_instruction = + InInstructionWithBalance { instruction, balance: balance_to_use }; // TODO: Make a proper struct out of this - in_instructions.push((output.id(), refund_addr, instruction)); + in_instructions.push(OutputWithInInstruction { + output, + return_address, + in_instruction, + }); todo!("TODO: Save to be reported") } - (Some(refund_addr), None) => todo!("TODO: Queue refund"), - // Since we didn't receive an instruction nor can we refund this, accumulate it + (Some(return_addr), None) => todo!("TODO: Queue return"), + // Since we didn't receive an instruction nor can we return this, accumulate it (None, None) => {} } } From e1e7432e480cbaa26e75aa1489539d33c299f678 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 23 Aug 2024 22:09:54 -0400 Subject: [PATCH 017/179] Finish scan task --- processor/scanner/src/db.rs | 62 +++++++++++++++++++--- processor/scanner/src/lib.rs | 2 + processor/scanner/src/lifetime.rs | 22 ++++---- processor/scanner/src/scan.rs | 87 ++++++++++++++++++------------- 4 files changed, 122 insertions(+), 51 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 7eb276ce2..fa2db7812 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -24,8 +24,9 @@ struct SeraiKeyDbEntry { } pub(crate) struct SeraiKey { - pub(crate) stage: LifetimeStage, pub(crate) key: K, + pub(crate) stage: LifetimeStage, + pub(crate) block_at_which_reporting_starts: u64, } pub(crate) struct OutputWithInInstruction { @@ -81,6 +82,9 @@ create_db!( // This collapses from `bool` to `()`, using if the value was set for true and false otherwise NotableBlock: (number: u64) -> (), + SerializedQueuedOutputs: (block_number: u64) -> Vec, + SerializedForwardedOutputsIndex: (block_number: u64) -> Vec, + SerializedForwardedOutput: (output_id: &[u8]) -> Vec, SerializedOutputs: (block_number: u64) -> Vec, } ); @@ -138,14 +142,13 @@ impl ScannerDb { if block_number < raw_keys[i].activation_block_number { continue; } - keys.push(SeraiKey { - key: raw_keys[i].key.0, - stage: LifetimeStage::calculate::( + let (stage, block_at_which_reporting_starts) = + LifetimeStage::calculate_stage_and_reporting_start_block::( block_number, raw_keys[i].activation_block_number, raw_keys.get(i + 1).map(|key| key.activation_block_number), - ), - }); + ); + keys.push(SeraiKey { key: raw_keys[i].key.0, stage, block_at_which_reporting_starts }); } assert!(keys.len() <= 2); Some(keys) @@ -226,6 +229,53 @@ impl ScannerDb { HighestAcknowledgedBlock::get(getter) } + pub(crate) fn take_queued_outputs( + txn: &mut impl DbTxn, + block_number: u64, + ) -> Vec> { + todo!("TODO") + } + + pub(crate) fn queue_return( + txn: &mut impl DbTxn, + block_queued_from: u64, + return_addr: AddressFor, + output: OutputFor, + ) { + todo!("TODO") + } + + pub(crate) fn queue_output_until_block( + txn: &mut impl DbTxn, + queue_for_block: u64, + output: &OutputWithInInstruction, + ) { + let mut outputs = + SerializedQueuedOutputs::get(txn, queue_for_block).unwrap_or(Vec::with_capacity(128)); + output.write(&mut outputs).unwrap(); + SerializedQueuedOutputs::set(txn, queue_for_block, &outputs); + } + + pub(crate) fn save_output_being_forwarded( + txn: &mut impl DbTxn, + block_forwarded_from: u64, + output: &OutputWithInInstruction, + ) { + let mut buf = Vec::with_capacity(128); + output.write(&mut buf).unwrap(); + + let id = output.output.id(); + + // Save this to an index so we can later fetch all outputs to forward + let mut forwarded_outputs = SerializedForwardedOutputsIndex::get(txn, block_forwarded_from) + .unwrap_or(Vec::with_capacity(32)); + forwarded_outputs.extend(id.as_ref()); + SerializedForwardedOutputsIndex::set(txn, block_forwarded_from, &forwarded_outputs); + + // Save the output itself + SerializedForwardedOutput::set(txn, id.as_ref(), &buf); + } + pub(crate) fn set_in_instructions( txn: &mut impl DbTxn, block_number: u64, diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 7bd8cc2ec..0a26f1773 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -195,6 +195,8 @@ impl Scanner { } /// Register the Eventualities caused by a block. + // TODO: Replace this with a callback returned by acknowledge_block which panics if it's not + // called yet dropped pub fn register_eventualities(&mut self, block_number: u64, eventualities: Vec<()>) { todo!("TODO") } diff --git a/processor/scanner/src/lifetime.rs b/processor/scanner/src/lifetime.rs index 62ee91c3e..6d189bca7 100644 --- a/processor/scanner/src/lifetime.rs +++ b/processor/scanner/src/lifetime.rs @@ -35,16 +35,16 @@ pub(crate) enum LifetimeStage { } impl LifetimeStage { - /// Get the stage of its lifetime this multisig is in based on when the next multisig's key - /// activates. + /// Get the stage of its lifetime this multisig is in, and the block at which we start reporting + /// outputs to it. /// /// Panics if the multisig being calculated for isn't actually active and a variety of other /// insane cases. - pub(crate) fn calculate( + pub(crate) fn calculate_stage_and_reporting_start_block( block_number: u64, activation_block_number: u64, next_keys_activation_block_number: Option, - ) -> Self { + ) -> (Self, u64) { assert!( activation_block_number >= block_number, "calculating lifetime stage for an inactive multisig" @@ -53,13 +53,15 @@ impl LifetimeStage { // activation block itself is the first block within this window let active_yet_not_reporting_end_block = activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; + // The exclusive end block is the inclusive start block + let reporting_start_block = active_yet_not_reporting_end_block; if block_number < active_yet_not_reporting_end_block { - return LifetimeStage::ActiveYetNotReporting; + return (LifetimeStage::ActiveYetNotReporting, reporting_start_block); } let Some(next_keys_activation_block_number) = next_keys_activation_block_number else { // If there is no next multisig, this is the active multisig - return LifetimeStage::Active; + return (LifetimeStage::Active, reporting_start_block); }; assert!( @@ -72,14 +74,14 @@ impl LifetimeStage { let new_active_yet_not_reporting_end_block = next_keys_activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; if block_number < new_active_yet_not_reporting_end_block { - return LifetimeStage::Active; + return (LifetimeStage::Active, reporting_start_block); } // Step 4 details a further CONFIRMATIONS let new_active_and_used_for_change_end_block = new_active_yet_not_reporting_end_block + S::CONFIRMATIONS; if block_number < new_active_and_used_for_change_end_block { - return LifetimeStage::UsingNewForChange; + return (LifetimeStage::UsingNewForChange, reporting_start_block); } // Step 5 details a further 6 hours @@ -87,10 +89,10 @@ impl LifetimeStage { let new_active_and_forwarded_to_end_block = new_active_and_used_for_change_end_block + (6 * 6 * S::TEN_MINUTES); if block_number < new_active_and_forwarded_to_end_block { - return LifetimeStage::Forwarding; + return (LifetimeStage::Forwarding, reporting_start_block); } // Step 6 - LifetimeStage::Finishing + (LifetimeStage::Finishing, reporting_start_block) } } diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 133325866..e35eb7494 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -103,7 +103,10 @@ impl ContinuallyRan for ScanForOutputsTask { let mut keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) .expect("scanning for a blockchain without any keys set"); - let mut in_instructions = vec![]; + let mut txn = self.db.txn(); + + let mut in_instructions = ScannerDb::::take_queued_outputs(&mut txn, b); + // Scan for each key for key in keys { for output in block.scan_for_outputs(key.key) { @@ -152,24 +155,6 @@ impl ContinuallyRan for ScanForOutputsTask { continue; } - // Drop External outputs if they're to a multisig which won't report them - // This means we should report any External output we save to disk here - #[allow(clippy::match_same_arms)] - match key.stage { - // TODO: Delay External outputs - LifetimeStage::ActiveYetNotReporting => todo!("TODO"), - // We should report External outputs in these cases - LifetimeStage::Active | LifetimeStage::UsingNewForChange => {} - // We should report External outputs only once forwarded, where they'll appear as - // OutputType::Forwarded - LifetimeStage::Forwarding => todo!("TODO"), - // We should drop these as we should not be handling new External outputs at this - // time - LifetimeStage::Finishing => { - continue; - } - } - // Check this isn't dust let balance_to_use = { let mut balance = output.balance(); @@ -190,27 +175,59 @@ impl ContinuallyRan for ScanForOutputsTask { balance }; - // Decode and save the InInstruction/return addr for this output - match in_instruction_from_output::(&output) { - (return_address, Some(instruction)) => { - let in_instruction = - InInstructionWithBalance { instruction, balance: balance_to_use }; - // TODO: Make a proper struct out of this - in_instructions.push(OutputWithInInstruction { - output, - return_address, - in_instruction, - }); - todo!("TODO: Save to be reported") + // Fetch the InInstruction/return addr for this output + let output_with_in_instruction = match in_instruction_from_output::(&output) { + (return_address, Some(instruction)) => OutputWithInInstruction { + output, + return_address, + in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use }, + }, + (Some(return_addr), None) => { + // Since there was no instruction here, return this since we parsed a return address + ScannerDb::::queue_return(&mut txn, b, return_addr, output); + continue; + } + // Since we didn't receive an instruction nor can we return this, move on + (None, None) => continue, + }; + + // Drop External outputs if they're to a multisig which won't report them + // This means we should report any External output we save to disk here + #[allow(clippy::match_same_arms)] + match key.stage { + // This multisig isn't yet reporting its External outputs to avoid a DoS + // Queue the output to be reported when this multisig starts reporting + LifetimeStage::ActiveYetNotReporting => { + ScannerDb::::queue_output_until_block( + &mut txn, + key.block_at_which_reporting_starts, + &output_with_in_instruction, + ); + continue; + } + // We should report External outputs in these cases + LifetimeStage::Active | LifetimeStage::UsingNewForChange => {} + // We should report External outputs only once forwarded, where they'll appear as + // OutputType::Forwarded. We save them now for when they appear + LifetimeStage::Forwarding => { + // When the forwarded output appears, we can see which Plan it's associated with and + // from there recover this output + ScannerDb::::save_output_being_forwarded(&mut txn, &output_with_in_instruction); + continue; + } + // We should drop these as we should not be handling new External outputs at this + // time + LifetimeStage::Finishing => { + continue; } - (Some(return_addr), None) => todo!("TODO: Queue return"), - // Since we didn't receive an instruction nor can we return this, accumulate it - (None, None) => {} } + // Ensures we didn't miss a `continue` above + assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange)); + + in_instructions.push(output_with_in_instruction); } } - let mut txn = self.db.txn(); // Save the in instructions ScannerDb::::set_in_instructions(&mut txn, b, in_instructions); // Update the next to scan block From 2f5dc4bc579c1b3c3a13792d7ec2da7d77fb2d02 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 23 Aug 2024 22:29:15 -0400 Subject: [PATCH 018/179] Flesh out report task --- processor/primitives/src/block.rs | 11 +++--- processor/scanner/src/db.rs | 31 +++++++++------- processor/scanner/src/lib.rs | 6 ++-- processor/scanner/src/report.rs | 59 +++++++++++++++++++++++++------ processor/scanner/src/scan.rs | 2 +- 5 files changed, 79 insertions(+), 30 deletions(-) diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs index 1fc92c3a9..77e7e8160 100644 --- a/processor/primitives/src/block.rs +++ b/processor/primitives/src/block.rs @@ -6,12 +6,13 @@ use crate::{Id, Address, ReceivedOutput}; /// A block header from an external network. pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { - /// The type used to identify blocks. - type Id: 'static + Id; /// The ID of this block. - fn id(&self) -> Self::Id; + /// + /// This is fixed to 32-bytes and is expected to be cryptographically binding with 128-bit + /// security. This is not required to be the ID used natively by the external network. + fn id(&self) -> [u8; 32]; /// The ID of the parent block. - fn parent(&self) -> Self::Id; + fn parent(&self) -> [u8; 32]; } /// A block from an external network. @@ -33,7 +34,7 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { type Output: ReceivedOutput; /// The ID of this block. - fn id(&self) -> ::Id; + fn id(&self) -> [u8; 32]; /// Scan all outputs within this block to find the outputs spendable by this key. fn scan_for_outputs(&self, key: Self::Key) -> Vec; diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index fa2db7812..cccbe5f6d 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -11,7 +11,7 @@ use serai_in_instructions_primitives::InInstructionWithBalance; use primitives::{Id, ReceivedOutput, Block, BorshG}; -use crate::{lifetime::LifetimeStage, ScannerFeed, BlockIdFor, KeyFor, AddressFor, OutputFor}; +use crate::{lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor}; // The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. trait Borshy: BorshSerialize + BorshDeserialize {} @@ -46,8 +46,8 @@ impl OutputWithInInstruction { create_db!( Scanner { - BlockId: (number: u64) -> I, - BlockNumber: (id: I) -> u64, + BlockId: (number: u64) -> [u8; 32], + BlockNumber: (id: [u8; 32]) -> u64, ActiveKeys: () -> Vec>, @@ -91,14 +91,14 @@ create_db!( pub(crate) struct ScannerDb(PhantomData); impl ScannerDb { - pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: BlockIdFor) { + pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: [u8; 32]) { BlockId::set(txn, number, &id); BlockNumber::set(txn, id, &number); } - pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option> { + pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option<[u8; 32]> { BlockId::get(getter, number) } - pub(crate) fn block_number(getter: &impl Get, id: BlockIdFor) -> Option { + pub(crate) fn block_number(getter: &impl Get, id: [u8; 32]) -> Option { BlockNumber::get(getter, id) } @@ -154,7 +154,7 @@ impl ScannerDb { Some(keys) } - pub(crate) fn set_start_block(txn: &mut impl DbTxn, start_block: u64, id: BlockIdFor) { + pub(crate) fn set_start_block(txn: &mut impl DbTxn, start_block: u64, id: [u8; 32]) { assert!( LatestFinalizedBlock::get(txn).is_none(), "setting start block but prior set start block" @@ -276,18 +276,18 @@ impl ScannerDb { SerializedForwardedOutput::set(txn, id.as_ref(), &buf); } + // TODO: Use a DbChannel here, and send the instructions to the report task and the outputs to + // the eventuality task? That way this cleans up after itself pub(crate) fn set_in_instructions( txn: &mut impl DbTxn, block_number: u64, outputs: Vec>, ) { - if outputs.is_empty() { - return; + if !outputs.is_empty() { + // Set this block as notable + NotableBlock::set(txn, block_number, &()); } - // Set this block as notable - NotableBlock::set(txn, block_number, &()); - let mut buf = Vec::with_capacity(outputs.len() * 128); for output in outputs { output.write(&mut buf).unwrap(); @@ -295,6 +295,13 @@ impl ScannerDb { SerializedOutputs::set(txn, block_number, &buf); } + pub(crate) fn in_instructions( + getter: &impl Get, + block_number: u64, + ) -> Option>> { + todo!("TODO") + } + pub(crate) fn is_block_notable(getter: &impl Get, number: u64) -> bool { NotableBlock::get(getter, number).is_some() } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 0a26f1773..5b5f6fe25 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -2,7 +2,7 @@ use core::{marker::PhantomData, fmt::Debug, time::Duration}; use tokio::sync::mpsc; -use serai_primitives::{Coin, Amount}; +use serai_primitives::{NetworkId, Coin, Amount}; use primitives::{ReceivedOutput, BlockHeader, Block}; // Logic for deciding where in its lifetime a multisig is. @@ -24,6 +24,9 @@ mod report; /// This defines the primitive types used, along with various getters necessary for indexing. #[async_trait::async_trait] pub trait ScannerFeed: Send + Sync { + /// The ID of the network being scanned for. + const NETWORK: NetworkId; + /// The amount of confirmations a block must have to be considered finalized. /// /// This value must be at least `1`. @@ -84,7 +87,6 @@ pub trait ScannerFeed: Send + Sync { fn dust(&self, coin: Coin) -> Amount; } -type BlockIdFor = <<::Block as Block>::Header as BlockHeader>::Id; type KeyFor = <::Block as Block>::Key; type AddressFor = <::Block as Block>::Address; type OutputFor = <::Block as Block>::Output; diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index 3c22556c2..17cdca35a 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -1,15 +1,20 @@ -/* - We only report blocks once both tasks, scanning for received ouputs and eventualities, have - processed the block. This ensures we've performed all ncessary options. -*/ - +use scale::Encode; use serai_db::{Db, DbTxn}; +use serai_primitives::BlockHash; +use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; use primitives::{Id, OutputType, Block}; // TODO: Localize to ReportDb? use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; +/* + This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion. + + We only report blocks once both tasks, scanning for received outputs and checking for resolved + Eventualities, have processed the block. This ensures we know if this block is notable, and have + the InInstructions for it. +*/ struct ReportTask { db: D, feed: S, @@ -39,15 +44,49 @@ impl ContinuallyRan for ReportTask { .expect("ReportTask run before writing the start block"); for b in next_to_potentially_report ..= highest_reportable { - if ScannerDb::::is_block_notable(&self.db, b) { - let in_instructions = todo!("TODO"); - // TODO: Also pull the InInstructions from forwarding - todo!("TODO: Make Batches, which requires handling Forwarded within this crate"); + let mut txn = self.db.txn(); + + if ScannerDb::::is_block_notable(&txn, b) { + let in_instructions = ScannerDb::::in_instructions(&txn, b) + .expect("reporting block which didn't set its InInstructions"); + + let network = S::NETWORK; + let block_hash = + ScannerDb::::block_id(&txn, b).expect("reporting block we didn't save the ID for"); + let mut batch_id = ScannerDb::::acquire_batch_id(txn); + + // start with empty batch + let mut batches = + vec![Batch { network, id: batch_id, block: BlockHash(block_hash), instructions: vec![] }]; + + for instruction in in_instructions { + let batch = batches.last_mut().unwrap(); + batch.instructions.push(instruction.in_instruction); + + // check if batch is over-size + if batch.encode().len() > MAX_BATCH_SIZE { + // pop the last instruction so it's back in size + let instruction = batch.instructions.pop().unwrap(); + + // bump the id for the new batch + batch_id = ScannerDb::::acquire_batch_id(txn); + + // make a new batch with this instruction included + batches.push(Batch { + network, + id: batch_id, + block: BlockHash(block_hash), + instructions: vec![instruction], + }); + } + } + + todo!("TODO: Set/emit batches"); } - let mut txn = self.db.txn(); // Update the next to potentially report block ScannerDb::::set_next_to_potentially_report_block(&mut txn, b + 1); + txn.commit(); } diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index e35eb7494..365f0f144 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -212,7 +212,7 @@ impl ContinuallyRan for ScanForOutputsTask { LifetimeStage::Forwarding => { // When the forwarded output appears, we can see which Plan it's associated with and // from there recover this output - ScannerDb::::save_output_being_forwarded(&mut txn, &output_with_in_instruction); + ScannerDb::::save_output_being_forwarded(&mut txn, b, &output_with_in_instruction); continue; } // We should drop these as we should not be handling new External outputs at this From 6873331794a4179565ebfa75566831b1548cb35b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 24 Aug 2024 17:30:02 -0400 Subject: [PATCH 019/179] Have the scan flag blocks with change/branch/forwarded as notable --- processor/scanner/src/db.rs | 5 +++++ processor/scanner/src/report.rs | 1 + processor/scanner/src/scan.rs | 8 ++++++++ 3 files changed, 14 insertions(+) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index cccbe5f6d..0710ae309 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -120,6 +120,7 @@ impl ScannerDb { } // TODO: This will be called from the Eventuality task yet this field is read by the scan task // We need to write the argument for its safety + // TODO: retire_key needs to set the notable block pub(crate) fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { let mut keys: Vec>>> = ActiveKeys::get(txn).expect("retiring key yet no active keys"); @@ -276,6 +277,10 @@ impl ScannerDb { SerializedForwardedOutput::set(txn, id.as_ref(), &buf); } + pub(crate) fn flag_notable(txn: &mut impl DbTxn, block_number: u64) { + NotableBlock::set(txn, block_number, &()); + } + // TODO: Use a DbChannel here, and send the instructions to the report task and the outputs to // the eventuality task? That way this cleans up after itself pub(crate) fn set_in_instructions( diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index 17cdca35a..37ef8874e 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -46,6 +46,7 @@ impl ContinuallyRan for ReportTask { for b in next_to_potentially_report ..= highest_reportable { let mut txn = self.db.txn(); + // If this block is notable, create the Batch(s) for it if ScannerDb::::is_block_notable(&txn, b) { let in_instructions = ScannerDb::::in_instructions(&txn, b) .expect("reporting block which didn't set its InInstructions"); diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 365f0f144..8c8e07b35 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -152,6 +152,14 @@ impl ContinuallyRan for ScanForOutputsTask { to do so at a higher level. */ if output.kind() != OutputType::External { + // While we don't report these outputs, we still need consensus on this block and + // accordingly still need to set it as notable + let balance = outputs.balance(); + // We ensure it's over the dust limit to prevent people sending 1 satoshi from causing + // an invocation of a consensus/signing protocol + if balance.amount.0 >= self.feed.dust(balance.coin).0 { + ScannerDb::::flag_notable(&mut txn, b); + } continue; } From 2f08a12da46938b4cc330717117ec52a67ca5468 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 24 Aug 2024 23:43:31 -0400 Subject: [PATCH 020/179] Flesh out eventuality task --- processor/primitives/src/block.rs | 30 ++++++- processor/primitives/src/eventuality.rs | 11 ++- processor/primitives/src/lib.rs | 4 +- processor/primitives/src/output.rs | 4 + processor/scanner/src/db.rs | 6 +- processor/scanner/src/eventuality.rs | 103 ++++++++++++++++++++++-- processor/scanner/src/index.rs | 2 +- processor/scanner/src/lib.rs | 11 ++- processor/scanner/src/lifetime.rs | 1 + processor/scanner/src/report.rs | 19 ++++- processor/scanner/src/scan.rs | 12 ++- 11 files changed, 174 insertions(+), 29 deletions(-) diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs index 77e7e8160..5ca2acec9 100644 --- a/processor/primitives/src/block.rs +++ b/processor/primitives/src/block.rs @@ -1,8 +1,9 @@ use core::fmt::Debug; +use std::collections::HashMap; use group::{Group, GroupEncoding}; -use crate::{Id, Address, ReceivedOutput}; +use crate::{Id, Address, ReceivedOutput, Eventuality, EventualityTracker}; /// A block header from an external network. pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { @@ -15,6 +16,12 @@ pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { fn parent(&self) -> [u8; 32]; } +/// A transaction from an external network. +pub trait Transaction: Send + Sync + Sized { + /// The type used to identify transactions on this external network. + type Id: Id; +} + /// A block from an external network. /// /// A block is defined as a consensus event associated with a set of transactions. It is not @@ -30,12 +37,31 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { type Key: Group + GroupEncoding; /// The type used to represent addresses on this external network. type Address: Address; + /// The type used to represent transactions on this external network. + type Transaction: Transaction; /// The type used to represent received outputs on this external network. - type Output: ReceivedOutput; + type Output: ReceivedOutput< + Self::Key, + Self::Address, + TransactionId = ::Id, + >; + /// The type used to represent an Eventuality for a transaction on this external network. + type Eventuality: Eventuality< + OutputId = >::Id, + >; /// The ID of this block. fn id(&self) -> [u8; 32]; /// Scan all outputs within this block to find the outputs spendable by this key. fn scan_for_outputs(&self, key: Self::Key) -> Vec; + + /// Check if this block resolved any Eventualities. + /// + /// Returns tbe resolved Eventualities, indexed by the ID of the transactions which resolved + /// them. + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap<::Id, Self::Eventuality>; } diff --git a/processor/primitives/src/eventuality.rs b/processor/primitives/src/eventuality.rs index 6e16637df..7203031b2 100644 --- a/processor/primitives/src/eventuality.rs +++ b/processor/primitives/src/eventuality.rs @@ -1,8 +1,12 @@ -use std::collections::HashMap; -use std::io; +use std::{io, collections::HashMap}; + +use crate::Id; /// A description of a transaction which will eventually happen. pub trait Eventuality: Sized + Send + Sync { + /// The type used to identify a received output. + type OutputId: Id; + /// A unique byte sequence which can be used to identify potentially resolving transactions. /// /// Both a transaction and an Eventuality are expected to be able to yield lookup sequences. @@ -15,6 +19,9 @@ pub trait Eventuality: Sized + Send + Sync { /// identified, the full check is performed. fn lookup(&self) -> Vec; + /// The output this plan forwarded. + fn forwarded_output(&self) -> Option; + /// Read an Eventuality. fn read(reader: &mut R) -> io::Result; /// Serialize an Eventuality to a `Vec`. diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs index dc64facf4..f796a13a6 100644 --- a/processor/primitives/src/lib.rs +++ b/processor/primitives/src/lib.rs @@ -2,7 +2,7 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] -use core::fmt::Debug; +use core::{hash::Hash, fmt::Debug}; use group::GroupEncoding; @@ -29,6 +29,8 @@ pub trait Id: + Clone + Default + PartialEq + + Eq + + Hash + AsRef<[u8]> + AsMut<[u8]> + Debug diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs index 2b96d2299..152a59e08 100644 --- a/processor/primitives/src/output.rs +++ b/processor/primitives/src/output.rs @@ -89,12 +89,16 @@ pub trait ReceivedOutput: { /// The type used to identify this output. type Id: 'static + Id; + /// The type used to identify the transaction which created this output. + type TransactionId: 'static + Id; /// The type of this output. fn kind(&self) -> OutputType; /// The ID of this output. fn id(&self) -> Self::Id; + /// The ID of the transaction which created this output. + fn transaction_id(&self) -> Self::TransactionId; /// The key this output was received by. fn key(&self) -> K; diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 0710ae309..09807a09c 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -1,15 +1,13 @@ use core::marker::PhantomData; use std::io; -use group::GroupEncoding; - -use scale::{Encode, Decode}; +use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db}; use serai_in_instructions_primitives::InInstructionWithBalance; -use primitives::{Id, ReceivedOutput, Block, BorshG}; +use primitives::{ReceivedOutput, BorshG}; use crate::{lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor}; diff --git a/processor/scanner/src/eventuality.rs b/processor/scanner/src/eventuality.rs index cb91ca42c..b223fd799 100644 --- a/processor/scanner/src/eventuality.rs +++ b/processor/scanner/src/eventuality.rs @@ -1,9 +1,9 @@ use serai_db::{Db, DbTxn}; -use primitives::{Id, ReceivedOutput, Block}; +use primitives::{OutputType, ReceivedOutput, Block}; // TODO: Localize to EventualityDb? -use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; +use crate::{lifetime::LifetimeStage, db::ScannerDb, ScannerFeed, ContinuallyRan}; /* Note: The following assumes there's some value, `CONFIRMATIONS`, and the finalized block we @@ -109,12 +109,105 @@ impl ContinuallyRan for EventualityTask { iterated = true; - // TODO: Not only check/clear eventualities, if this eventuality forwarded an output, queue - // it to be reported in however many blocks - todo!("TODO"); + // TODO: Add a helper to fetch an indexed block, de-duplicate with scan + let block = match self.feed.block_by_number(b).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, + }; + + // Check the ID of this block is the expected ID + { + let expected = + ScannerDb::::block_id(&self.db, b).expect("scannable block didn't have its ID saved"); + if block.id() != expected { + panic!( + "finalized chain reorganized from {} to {} at {}", + hex::encode(expected), + hex::encode(block.id()), + b + ); + } + } + + log::info!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); + + /* + This is proper as the keys for the next to scan block (at most `WINDOW_LENGTH` ahead, + which is `<= CONFIRMATIONS`) will be the keys to use here. + + If we had added a new key (which hasn't actually actived by the block we're currently + working on), it won't have any Eventualities for at least `CONFIRMATIONS` blocks (so it'd + have no impact here). + + As for retiring a key, that's done on this task's timeline. We ensure we don't bork the + scanner by officially retiring the key `WINDOW_LENGTH` blocks in the future (ensuring the + scanner never has a malleable view of the keys). + */ + // TODO: Ensure the add key/remove key DB fns are called by the same task to prevent issues + // there + // TODO: On register eventuality, assert the above timeline assumptions + let mut keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) + .expect("scanning for a blockchain without any keys set"); let mut txn = self.db.txn(); + + // Fetch the External outputs we reported, and therefore should yield after handling this + // block + let mut outputs = ScannerDb::::in_instructions(&txn, b) + .expect("handling eventualities/outputs for block which didn't set its InInstructions") + .into_iter() + .map(|output| output.output) + .collect::>(); + + for key in keys { + let completed_eventualities = { + let mut eventualities = ScannerDb::::eventualities(&txn, key.key); + let completed_eventualities = block.check_for_eventuality_resolutions(&mut eventualities); + ScannerDb::::set_eventualities(&mut txn, eventualities); + completed_eventualities + }; + + // Fetch all non-External outputs + let mut non_external_outputs = block.scan_for_outputs(key.key); + non_external_outputs.retain(|output| output.kind() != OutputType::External); + // Drop any outputs less than the dust limit + non_external_outputs.retain(|output| { + let balance = output.balance(); + balance.amount.0 >= self.feed.dust(balance.coin).0 + }); + + /* + Now that we have all non-External outputs, we filter them to be only the outputs which + are from transactions which resolve our own Eventualities *if* the multisig is retiring. + This implements step 6 of `spec/processor/Multisig Rotation.md`. + + We may receive a Change output. The only issue with accumulating this would be if it + extends the multisig's lifetime (by increasing the amount of outputs yet to be + forwarded). By checking it's one we made, either: + 1) It's a legitimate Change output to be forwarded + 2) It's a Change output created by a user burning coins (specifying the Change address), + which can only be created while the multisig is actively handling `Burn`s (therefore + ensuring this multisig cannot be kept alive ad-infinitum) + + The commentary on Change outputs also applies to Branch/Forwarded. They'll presumably get + ignored if not usable however. + */ + if key.stage == LifetimeStage::Finishing { + non_external_outputs + .retain(|output| completed_eventualities.contains_key(&output.transaction_id())); + } + + // Now, we iterate over all Forwarded outputs and queue their InInstructions + todo!("TODO"); + + // Accumulate all of these outputs + outputs.extend(non_external_outputs); + } + + let outputs_to_return = ScannerDb::::take_queued_returns(&mut txn, b); + // Update the next to check block + // TODO: Two-stage process ScannerDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); txn.commit(); } diff --git a/processor/scanner/src/index.rs b/processor/scanner/src/index.rs index de68522ed..b5c4fd0f2 100644 --- a/processor/scanner/src/index.rs +++ b/processor/scanner/src/index.rs @@ -1,6 +1,6 @@ use serai_db::{Db, DbTxn}; -use primitives::{Id, BlockHeader}; +use primitives::BlockHeader; // TODO: Localize to IndexDb? use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 5b5f6fe25..b683a4b79 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -3,7 +3,7 @@ use core::{marker::PhantomData, fmt::Debug, time::Duration}; use tokio::sync::mpsc; use serai_primitives::{NetworkId, Coin, Amount}; -use primitives::{ReceivedOutput, BlockHeader, Block}; +use primitives::Block; // Logic for deciding where in its lifetime a multisig is. mod lifetime; @@ -34,8 +34,8 @@ pub trait ScannerFeed: Send + Sync { /// The amount of blocks to process in parallel. /// - /// This value must be at least `1`. This value should be the worst-case latency to handle a - /// block divided by the expected block time. + /// This must be at least `1`. This must be less than or equal to `CONFIRMATIONS`. This value + /// should be the worst-case latency to handle a block divided by the expected block time. const WINDOW_LENGTH: u64; /// The amount of blocks which will occur in 10 minutes (approximate). @@ -83,7 +83,8 @@ pub trait ScannerFeed: Send + Sync { /// The dust threshold for the specified coin. /// - /// This should be a value worth handling at a human level. + /// This MUST be constant. Serai MJUST NOT create internal outputs worth less than this. This + /// SHOULD be a value worth handling at a human level. fn dust(&self, coin: Coin) -> Amount; } @@ -188,6 +189,8 @@ impl Scanner { /// /// This means this block was ordered on Serai in relation to `Burn` events, and all validators /// have achieved synchrony on it. + // TODO: If we're acknowledge block `b`, the Eventuality task was already eligible to check it + // for Eventualities. We need this to block until the Eventuality task has actually checked it. pub fn acknowledge_block( &mut self, block_number: u64, diff --git a/processor/scanner/src/lifetime.rs b/processor/scanner/src/lifetime.rs index 6d189bca7..09df7a37c 100644 --- a/processor/scanner/src/lifetime.rs +++ b/processor/scanner/src/lifetime.rs @@ -6,6 +6,7 @@ use crate::ScannerFeed; /// rotation process. Steps 7-8 regard a multisig which isn't retiring yet retired, and /// accordingly, no longer exists, so they are not modelled here (as this only models active /// multisigs. Inactive multisigs aren't represented in the first place). +#[derive(PartialEq)] pub(crate) enum LifetimeStage { /// A new multisig, once active, shouldn't actually start receiving coins until several blocks /// later. If any UI is premature in sending to this multisig, we delay to report the outputs to diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index 37ef8874e..2c35d0f56 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -3,7 +3,8 @@ use serai_db::{Db, DbTxn}; use serai_primitives::BlockHash; use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; -use primitives::{Id, OutputType, Block}; + +use primitives::ReceivedOutput; // TODO: Localize to ReportDb? use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; @@ -48,8 +49,20 @@ impl ContinuallyRan for ReportTask { // If this block is notable, create the Batch(s) for it if ScannerDb::::is_block_notable(&txn, b) { - let in_instructions = ScannerDb::::in_instructions(&txn, b) - .expect("reporting block which didn't set its InInstructions"); + let in_instructions = { + let mut in_instructions = ScannerDb::::in_instructions(&txn, b) + .expect("reporting block which didn't set its InInstructions"); + // Sort these before reporting them in case anything we did is non-deterministic/to have + // a well-defined order (not implicit to however we got this result, enabling different + // methods to be used in the future) + in_instructions.sort_by(|a, b| { + use core::cmp::{Ordering, Ord}; + let res = a.output.id().as_ref().cmp(&b.output.id().as_ref()); + assert!(res != Ordering::Equal); + res + }); + in_instructions + }; let network = S::NETWORK; let block_hash = diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 8c8e07b35..2bfb112f2 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -1,14 +1,12 @@ -use group::GroupEncoding; - -use scale::{Encode, Decode}; +use scale::Decode; use serai_db::{Db, DbTxn}; -use serai_primitives::{MAX_DATA_LEN, ExternalAddress}; +use serai_primitives::MAX_DATA_LEN; use serai_in_instructions_primitives::{ Shorthand, RefundableInInstruction, InInstruction, InInstructionWithBalance, }; -use primitives::{Id, OutputType, ReceivedOutput, Block}; +use primitives::{OutputType, ReceivedOutput, Block}; // TODO: Localize to ScanDb? use crate::{ @@ -100,7 +98,7 @@ impl ContinuallyRan for ScanForOutputsTask { log::info!("scanning block: {} ({b})", hex::encode(block.id())); assert_eq!(ScannerDb::::next_to_scan_for_outputs_block(&self.db).unwrap(), b); - let mut keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) + let keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) .expect("scanning for a blockchain without any keys set"); let mut txn = self.db.txn(); @@ -154,7 +152,7 @@ impl ContinuallyRan for ScanForOutputsTask { if output.kind() != OutputType::External { // While we don't report these outputs, we still need consensus on this block and // accordingly still need to set it as notable - let balance = outputs.balance(); + let balance = output.balance(); // We ensure it's over the dust limit to prevent people sending 1 satoshi from causing // an invocation of a consensus/signing protocol if balance.amount.0 >= self.feed.dust(balance.coin).0 { From ebf09904b2c600db2007845f5adada2e6803d8ce Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 26 Aug 2024 22:49:57 -0400 Subject: [PATCH 021/179] Add a callback to accumulate outputs and return the new Eventualities --- processor/scanner/src/eventuality.rs | 30 +++++++++++++++++++++++----- processor/scanner/src/index.rs | 2 +- processor/scanner/src/lib.rs | 29 +++++++++++++++++++-------- processor/scanner/src/report.rs | 2 +- processor/scanner/src/scan.rs | 2 +- 5 files changed, 49 insertions(+), 16 deletions(-) diff --git a/processor/scanner/src/eventuality.rs b/processor/scanner/src/eventuality.rs index b223fd799..83ab4ebaf 100644 --- a/processor/scanner/src/eventuality.rs +++ b/processor/scanner/src/eventuality.rs @@ -1,9 +1,11 @@ -use serai_db::{Db, DbTxn}; +use group::GroupEncoding; + +use serai_db::{DbTxn, Db}; use primitives::{OutputType, ReceivedOutput, Block}; // TODO: Localize to EventualityDb? -use crate::{lifetime::LifetimeStage, db::ScannerDb, ScannerFeed, ContinuallyRan}; +use crate::{lifetime::LifetimeStage, db::ScannerDb, ScannerFeed, KeyFor, Scheduler, ContinuallyRan}; /* Note: The following assumes there's some value, `CONFIRMATIONS`, and the finalized block we @@ -53,13 +55,14 @@ use crate::{lifetime::LifetimeStage, db::ScannerDb, ScannerFeed, ContinuallyRan} This forms a backlog only if the latency of scanning, acknowledgement, and intake (including checking Eventualities) exceeds the window duration (the desired property). */ -struct EventualityTask { +struct EventualityTask> { db: D, feed: S, + scheduler: Sch, } #[async_trait::async_trait] -impl ContinuallyRan for EventualityTask { +impl> ContinuallyRan for EventualityTask { async fn run_iteration(&mut self) -> Result { /* The set of Eventualities only increase when a block is acknowledged. Accordingly, we can only @@ -168,6 +171,7 @@ impl ContinuallyRan for EventualityTask { }; // Fetch all non-External outputs + // TODO: Have a scan_for_outputs_ext which sorts for us let mut non_external_outputs = block.scan_for_outputs(key.key); non_external_outputs.retain(|output| output.kind() != OutputType::External); // Drop any outputs less than the dust limit @@ -206,8 +210,24 @@ impl ContinuallyRan for EventualityTask { let outputs_to_return = ScannerDb::::take_queued_returns(&mut txn, b); + let new_eventualities = + self.scheduler.accumulate_outputs_and_return_outputs(&mut txn, outputs, outputs_to_return); + for (key, new_eventualities) in new_eventualities { + let key = { + let mut key_repr = as GroupEncoding>::Repr::default(); + assert_eq!(key.len(), key_repr.as_ref().len()); + key_repr.as_mut().copy_from_slice(&key); + KeyFor::::from_bytes(&key_repr).unwrap() + }; + + let mut eventualities = ScannerDb::::eventualities(&txn, key.key); + for new_eventuality in new_eventualities { + eventualities.active_eventualities.insert(new_eventuality.lookup(), new_eventuality); + } + ScannerDb::::set_eventualities(&mut txn, eventualities); + } + // Update the next to check block - // TODO: Two-stage process ScannerDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); txn.commit(); } diff --git a/processor/scanner/src/index.rs b/processor/scanner/src/index.rs index b5c4fd0f2..1d2780151 100644 --- a/processor/scanner/src/index.rs +++ b/processor/scanner/src/index.rs @@ -1,4 +1,4 @@ -use serai_db::{Db, DbTxn}; +use serai_db::{DbTxn, Db}; use primitives::BlockHeader; diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index b683a4b79..7919f0063 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -1,8 +1,12 @@ use core::{marker::PhantomData, fmt::Debug, time::Duration}; +use std::collections::HashMap; use tokio::sync::mpsc; +use serai_db::DbTxn; + use serai_primitives::{NetworkId, Coin, Amount}; + use primitives::Block; // Logic for deciding where in its lifetime a multisig is. @@ -91,6 +95,21 @@ pub trait ScannerFeed: Send + Sync { type KeyFor = <::Block as Block>::Key; type AddressFor = <::Block as Block>::Address; type OutputFor = <::Block as Block>::Output; +type EventualityFor = <::Block as Block>::Eventuality; + +/// The object responsible for accumulating outputs and planning new transactions. +pub trait Scheduler { + /// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for. + /// + /// The `Vec` used as the key in the returned HashMap should be the encoded key these + /// Eventualities are for. + fn accumulate_outputs_and_return_outputs( + &mut self, + txn: &mut impl DbTxn, + outputs: Vec>, + outputs_to_return: Vec>, + ) -> HashMap, Vec>>; +} /// A handle to immediately run an iteration of a task. #[derive(Clone)] @@ -189,8 +208,9 @@ impl Scanner { /// /// This means this block was ordered on Serai in relation to `Burn` events, and all validators /// have achieved synchrony on it. - // TODO: If we're acknowledge block `b`, the Eventuality task was already eligible to check it + // TODO: If we're acknowledging block `b`, the Eventuality task was already eligible to check it // for Eventualities. We need this to block until the Eventuality task has actually checked it. + // TODO: Does the prior TODO hold with how the callback is now handled? pub fn acknowledge_block( &mut self, block_number: u64, @@ -198,13 +218,6 @@ impl Scanner { ) -> Vec> { todo!("TODO") } - - /// Register the Eventualities caused by a block. - // TODO: Replace this with a callback returned by acknowledge_block which panics if it's not - // called yet dropped - pub fn register_eventualities(&mut self, block_number: u64, eventualities: Vec<()>) { - todo!("TODO") - } } /* diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index 2c35d0f56..ec87845f4 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -1,5 +1,5 @@ use scale::Encode; -use serai_db::{Db, DbTxn}; +use serai_db::{DbTxn, Db}; use serai_primitives::BlockHash; use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 2bfb112f2..cd010d7c2 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -1,5 +1,5 @@ use scale::Decode; -use serai_db::{Db, DbTxn}; +use serai_db::{DbTxn, Db}; use serai_primitives::MAX_DATA_LEN; use serai_in_instructions_primitives::{ From a8616375a752a526ef49f10d22688d259cef9a1b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 26 Aug 2024 22:57:28 -0400 Subject: [PATCH 022/179] Move ContinuallyRan into primitives I'm unsure where else it'll be used within the processor, yet it's generally useful and I don't want to make a dedicated crate yet. --- Cargo.lock | 2 + processor/primitives/Cargo.toml | 3 ++ processor/primitives/src/lib.rs | 3 ++ processor/primitives/src/task.rs | 93 ++++++++++++++++++++++++++++++++ processor/scanner/src/lib.rs | 84 +---------------------------- 5 files changed, 102 insertions(+), 83 deletions(-) create mode 100644 processor/primitives/src/task.rs diff --git a/Cargo.lock b/Cargo.lock index f887bd8ca..4cc54e15e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8654,8 +8654,10 @@ dependencies = [ "async-trait", "borsh", "group", + "log", "parity-scale-codec", "serai-primitives", + "tokio", ] [[package]] diff --git a/processor/primitives/Cargo.toml b/processor/primitives/Cargo.toml index dd59c0a8e..9427a6042 100644 --- a/processor/primitives/Cargo.toml +++ b/processor/primitives/Cargo.toml @@ -25,3 +25,6 @@ serai-primitives = { path = "../../substrate/primitives", default-features = fal scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] } diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs index f796a13a6..b0b7ae04e 100644 --- a/processor/primitives/src/lib.rs +++ b/processor/primitives/src/lib.rs @@ -9,6 +9,9 @@ use group::GroupEncoding; use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; +/// A module for task-related structs and functionality. +pub mod task; + mod output; pub use output::*; diff --git a/processor/primitives/src/task.rs b/processor/primitives/src/task.rs new file mode 100644 index 000000000..a7d6153ca --- /dev/null +++ b/processor/primitives/src/task.rs @@ -0,0 +1,93 @@ +use core::time::Duration; + +use tokio::sync::mpsc; + +/// A handle to immediately run an iteration of a task. +#[derive(Clone)] +pub struct RunNowHandle(mpsc::Sender<()>); +/// An instruction recipient to immediately run an iteration of a task. +pub struct RunNowRecipient(mpsc::Receiver<()>); + +impl RunNowHandle { + /// Create a new run-now handle to be assigned to a task. + pub fn new() -> (Self, RunNowRecipient) { + // Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as + // soon as possible + let (send, recv) = mpsc::channel(1); + (Self(send), RunNowRecipient(recv)) + } + + /// Tell the task to run now (and not whenever its next iteration on a timer is). + /// + /// Panics if the task has been dropped. + pub fn run_now(&self) { + #[allow(clippy::match_same_arms)] + match self.0.try_send(()) { + Ok(()) => {} + // NOP on full, as this task will already be ran as soon as possible + Err(mpsc::error::TrySendError::Full(())) => {} + Err(mpsc::error::TrySendError::Closed(())) => { + panic!("task was unexpectedly closed when calling run_now") + } + } + } +} + +/// A task to be continually ran. +#[async_trait::async_trait] +pub trait ContinuallyRan: Sized { + /// The amount of seconds before this task should be polled again. + const DELAY_BETWEEN_ITERATIONS: u64 = 5; + /// The maximum amount of seconds before this task should be run again. + /// + /// Upon error, the amount of time waited will be linearly increased until this limit. + const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120; + + /// Run an iteration of the task. + /// + /// If this returns `true`, all dependents of the task will immediately have a new iteration ran + /// (without waiting for whatever timer they were already on). + async fn run_iteration(&mut self) -> Result; + + /// Continually run the task. + /// + /// This returns a channel which can have a message set to immediately trigger a new run of an + /// iteration. + async fn continually_run(mut self, mut run_now: RunNowRecipient, dependents: Vec) { + // The default number of seconds to sleep before running the task again + let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS; + // The current number of seconds to sleep before running the task again + // We increment this upon errors in order to not flood the logs with errors + let mut current_sleep_before_next_task = default_sleep_before_next_task; + let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| { + let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task; + // Set a limit of sleeping for two minutes + *current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS); + }; + + loop { + match self.run_iteration().await { + Ok(run_dependents) => { + // Upon a successful (error-free) loop iteration, reset the amount of time we sleep + current_sleep_before_next_task = default_sleep_before_next_task; + + if run_dependents { + for dependent in &dependents { + dependent.run_now(); + } + } + } + Err(e) => { + log::debug!("{}", e); + increase_sleep_before_next_task(&mut current_sleep_before_next_task); + } + } + + // Don't run the task again for another few seconds UNLESS told to run now + tokio::select! { + () = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {}, + msg = run_now.0.recv() => assert_eq!(msg, Some(()), "run now handle was dropped"), + } + } + } +} diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 7919f0063..822acb27d 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -7,7 +7,7 @@ use serai_db::DbTxn; use serai_primitives::{NetworkId, Coin, Amount}; -use primitives::Block; +use primitives::{task::*, Block}; // Logic for deciding where in its lifetime a multisig is. mod lifetime; @@ -111,88 +111,6 @@ pub trait Scheduler { ) -> HashMap, Vec>>; } -/// A handle to immediately run an iteration of a task. -#[derive(Clone)] -pub(crate) struct RunNowHandle(mpsc::Sender<()>); -/// An instruction recipient to immediately run an iteration of a task. -pub(crate) struct RunNowRecipient(mpsc::Receiver<()>); - -impl RunNowHandle { - /// Create a new run-now handle to be assigned to a task. - pub(crate) fn new() -> (Self, RunNowRecipient) { - // Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as - // soon as possible - let (send, recv) = mpsc::channel(1); - (Self(send), RunNowRecipient(recv)) - } - - /// Tell the task to run now (and not whenever its next iteration on a timer is). - /// - /// Panics if the task has been dropped. - pub(crate) fn run_now(&self) { - #[allow(clippy::match_same_arms)] - match self.0.try_send(()) { - Ok(()) => {} - // NOP on full, as this task will already be ran as soon as possible - Err(mpsc::error::TrySendError::Full(())) => {} - Err(mpsc::error::TrySendError::Closed(())) => { - panic!("task was unexpectedly closed when calling run_now") - } - } - } -} - -#[async_trait::async_trait] -pub(crate) trait ContinuallyRan: Sized { - /// Run an iteration of the task. - /// - /// If this returns `true`, all dependents of the task will immediately have a new iteration ran - /// (without waiting for whatever timer they were already on). - async fn run_iteration(&mut self) -> Result; - - /// Continually run the task. - /// - /// This returns a channel which can have a message set to immediately trigger a new run of an - /// iteration. - async fn continually_run(mut self, mut run_now: RunNowRecipient, dependents: Vec) { - // The default number of seconds to sleep before running the task again - let default_sleep_before_next_task = 5; - // The current number of seconds to sleep before running the task again - // We increment this upon errors in order to not flood the logs with errors - let mut current_sleep_before_next_task = default_sleep_before_next_task; - let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| { - let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task; - // Set a limit of sleeping for two minutes - *current_sleep_before_next_task = new_sleep.max(120); - }; - - loop { - match self.run_iteration().await { - Ok(run_dependents) => { - // Upon a successful (error-free) loop iteration, reset the amount of time we sleep - current_sleep_before_next_task = default_sleep_before_next_task; - - if run_dependents { - for dependent in &dependents { - dependent.run_now(); - } - } - } - Err(e) => { - log::debug!("{}", e); - increase_sleep_before_next_task(&mut current_sleep_before_next_task); - } - } - - // Don't run the task again for another few seconds UNLESS told to run now - tokio::select! { - () = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {}, - msg = run_now.0.recv() => assert_eq!(msg, Some(()), "run now handle was dropped"), - } - } - } -} - /// A representation of a scanner. pub struct Scanner(PhantomData); impl Scanner { From 2dccfa886fe56d3872af9c2072138f25233d09cf Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 26 Aug 2024 23:15:19 -0400 Subject: [PATCH 023/179] Add helper methods Has fetched blocks checked to be the indexed blocks. Has scanned outputs be sorted, meaning they aren't subject to implicit order/may be non-deterministic (such as if handled by a threadpool). --- processor/primitives/src/block.rs | 4 ++- processor/scanner/src/eventuality.rs | 32 ++++------------- processor/scanner/src/index.rs | 2 +- processor/scanner/src/lib.rs | 52 ++++++++++++++++++++++++++-- processor/scanner/src/scan.rs | 23 ++---------- 5 files changed, 63 insertions(+), 50 deletions(-) diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs index 5ca2acec9..6f603ab2b 100644 --- a/processor/primitives/src/block.rs +++ b/processor/primitives/src/block.rs @@ -54,7 +54,9 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { fn id(&self) -> [u8; 32]; /// Scan all outputs within this block to find the outputs spendable by this key. - fn scan_for_outputs(&self, key: Self::Key) -> Vec; + /// + /// No assumption on the order of the returned outputs is made. + fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec; /// Check if this block resolved any Eventualities. /// diff --git a/processor/scanner/src/eventuality.rs b/processor/scanner/src/eventuality.rs index 83ab4ebaf..8fc182466 100644 --- a/processor/scanner/src/eventuality.rs +++ b/processor/scanner/src/eventuality.rs @@ -5,13 +5,11 @@ use serai_db::{DbTxn, Db}; use primitives::{OutputType, ReceivedOutput, Block}; // TODO: Localize to EventualityDb? -use crate::{lifetime::LifetimeStage, db::ScannerDb, ScannerFeed, KeyFor, Scheduler, ContinuallyRan}; +use crate::{ + lifetime::LifetimeStage, db::ScannerDb, BlockExt, ScannerFeed, KeyFor, Scheduler, ContinuallyRan, +}; /* - Note: The following assumes there's some value, `CONFIRMATIONS`, and the finalized block we - operate on is `CONFIRMATIONS` blocks deep. This is true for Proof-of-Work chains yet not the API - actively used here. - When we scan a block, we receive outputs. When this block is acknowledged, we accumulate those outputs into some scheduler, potentially causing certain transactions to begin their signing protocol. @@ -112,25 +110,7 @@ impl> ContinuallyRan for EventualityTas iterated = true; - // TODO: Add a helper to fetch an indexed block, de-duplicate with scan - let block = match self.feed.block_by_number(b).await { - Ok(block) => block, - Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, - }; - - // Check the ID of this block is the expected ID - { - let expected = - ScannerDb::::block_id(&self.db, b).expect("scannable block didn't have its ID saved"); - if block.id() != expected { - panic!( - "finalized chain reorganized from {} to {} at {}", - hex::encode(expected), - hex::encode(block.id()), - b - ); - } - } + let block = self.feed.block_by_number(b).await?; log::info!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); @@ -171,7 +151,6 @@ impl> ContinuallyRan for EventualityTas }; // Fetch all non-External outputs - // TODO: Have a scan_for_outputs_ext which sorts for us let mut non_external_outputs = block.scan_for_outputs(key.key); non_external_outputs.retain(|output| output.kind() != OutputType::External); // Drop any outputs less than the dust limit @@ -210,6 +189,7 @@ impl> ContinuallyRan for EventualityTas let outputs_to_return = ScannerDb::::take_queued_returns(&mut txn, b); + // TODO: This also has to intake Burns let new_eventualities = self.scheduler.accumulate_outputs_and_return_outputs(&mut txn, outputs, outputs_to_return); for (key, new_eventualities) in new_eventualities { @@ -220,7 +200,7 @@ impl> ContinuallyRan for EventualityTas KeyFor::::from_bytes(&key_repr).unwrap() }; - let mut eventualities = ScannerDb::::eventualities(&txn, key.key); + let mut eventualities = ScannerDb::::eventualities(&txn, key); for new_eventuality in new_eventualities { eventualities.active_eventualities.insert(new_eventuality.lookup(), new_eventuality); } diff --git a/processor/scanner/src/index.rs b/processor/scanner/src/index.rs index 1d2780151..e3c5c6acd 100644 --- a/processor/scanner/src/index.rs +++ b/processor/scanner/src/index.rs @@ -43,7 +43,7 @@ impl ContinuallyRan for IndexFinalizedTask { // Index the hashes of all blocks until the latest finalized block for b in (our_latest_finalized + 1) ..= latest_finalized { - let block = match self.feed.block_header_by_number(b).await { + let block = match self.feed.unchecked_block_header_by_number(b).await { Ok(block) => block, Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, }; diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 822acb27d..5b41301ef 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -23,6 +23,23 @@ mod eventuality; /// Task which reports `Batch`s to Substrate. mod report; +/// Extension traits around Block. +pub(crate) trait BlockExt: Block { + fn scan_for_outputs(&self, key: Self::Key) -> Vec; +} +impl BlockExt for B { + fn scan_for_outputs(&self, key: Self::Key) -> Vec { + let mut outputs = self.scan_for_outputs_unordered(); + outputs.sort_by(|a, b| { + use core::cmp::{Ordering, Ord}; + let res = a.id().as_ref().cmp(&b.id().as_ref()); + assert!(res != Ordering::Equal, "scanned two outputs within a block with the same ID"); + res + }); + outputs + } +} + /// A feed usable to scan a blockchain. /// /// This defines the primitive types used, along with various getters necessary for indexing. @@ -68,13 +85,44 @@ pub trait ScannerFeed: Send + Sync { async fn latest_finalized_block_number(&self) -> Result; /// Fetch a block header by its number. - async fn block_header_by_number( + /// + /// This does not check the returned BlockHeader is the header for the block we indexed. + async fn unchecked_block_header_by_number( &self, number: u64, ) -> Result<::Header, Self::EphemeralError>; /// Fetch a block by its number. - async fn block_by_number(&self, number: u64) -> Result; + /// + /// This does not check the returned Block is the block we indexed. + async fn unchecked_block_by_number( + &self, + number: u64, + ) -> Result; + + /// Fetch a block by its number. + /// + /// Panics if the block requested wasn't indexed. + async fn block_by_number(&self, getter: &impl Get, number: u64) -> Result { + let block = match self.unchecked_block_by_number(number).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?, + }; + + // Check the ID of this block is the expected ID + { + let expected = + ScannerDb::::block_id(&self.db, number).expect("requested a block which wasn't indexed"); + if block.id() != expected { + panic!( + "finalized chain reorganized from {} to {} at {}", + hex::encode(expected), + hex::encode(block.id()), + number, + ); + } + } + } /// The cost to aggregate an input as of the specified block. /// diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index cd010d7c2..ddc1110e0 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -12,7 +12,7 @@ use primitives::{OutputType, ReceivedOutput, Block}; use crate::{ lifetime::LifetimeStage, db::{OutputWithInInstruction, ScannerDb}, - ScannerFeed, AddressFor, OutputFor, ContinuallyRan, + BlockExt, ScannerFeed, AddressFor, OutputFor, ContinuallyRan, }; // Construct an InInstruction from an external output. @@ -76,24 +76,7 @@ impl ContinuallyRan for ScanForOutputsTask { .expect("ScanForOutputsTask run before writing the start block"); for b in next_to_scan ..= latest_scannable { - let block = match self.feed.block_by_number(b).await { - Ok(block) => block, - Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, - }; - - // Check the ID of this block is the expected ID - { - let expected = - ScannerDb::::block_id(&self.db, b).expect("scannable block didn't have its ID saved"); - if block.id() != expected { - panic!( - "finalized chain reorganized from {} to {} at {}", - hex::encode(expected), - hex::encode(block.id()), - b - ); - } - } + let block = self.feed.block_by_number(b).await?; log::info!("scanning block: {} ({b})", hex::encode(block.id())); @@ -143,7 +126,7 @@ impl ContinuallyRan for ScanForOutputsTask { worthwhile, and even if they're not economically, they are technically). The alternative, we drop outputs here with a generic filter rule and then report back - the insolvency created, still doesn't work as we'd only be creating if an insolvency if + the insolvency created, still doesn't work as we'd only be creating an insolvency if the output was actually made by us (and not simply someone else sending in). We can have the Eventuality task report the insolvency, yet that requires the scanner be responsible for such filter logic. It's more flexible, and has a cleaner API, From 414060e5a9409efa437e95297872a904c99fa777 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 26 Aug 2024 23:18:31 -0400 Subject: [PATCH 024/179] Make index a folder, not a file --- processor/scanner/src/{index.rs => index/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename processor/scanner/src/{index.rs => index/mod.rs} (100%) diff --git a/processor/scanner/src/index.rs b/processor/scanner/src/index/mod.rs similarity index 100% rename from processor/scanner/src/index.rs rename to processor/scanner/src/index/mod.rs From 9810dbf0b6adbe6c6728125e927690a008b95d3b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 26 Aug 2024 23:24:49 -0400 Subject: [PATCH 025/179] Make a dedicated IndexDb --- processor/scanner/src/db.rs | 29 +++---------------------- processor/scanner/src/index/db.rs | 34 ++++++++++++++++++++++++++++++ processor/scanner/src/index/mod.rs | 16 ++++++++------ processor/scanner/src/lib.rs | 2 +- 4 files changed, 47 insertions(+), 34 deletions(-) create mode 100644 processor/scanner/src/index/db.rs diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 09807a09c..4ac6badaa 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -44,13 +44,8 @@ impl OutputWithInInstruction { create_db!( Scanner { - BlockId: (number: u64) -> [u8; 32], - BlockNumber: (id: [u8; 32]) -> u64, - ActiveKeys: () -> Vec>, - // The latest finalized block to appear of a blockchain - LatestFinalizedBlock: () -> u64, // The next block to scan for received outputs NextToScanForOutputsBlock: () -> u64, // The next block to check for resolving eventualities @@ -89,17 +84,6 @@ create_db!( pub(crate) struct ScannerDb(PhantomData); impl ScannerDb { - pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: [u8; 32]) { - BlockId::set(txn, number, &id); - BlockNumber::set(txn, id, &number); - } - pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option<[u8; 32]> { - BlockId::get(getter, number) - } - pub(crate) fn block_number(getter: &impl Get, id: [u8; 32]) -> Option { - BlockNumber::get(getter, id) - } - // activation_block_number is inclusive, so the key will be scanned for starting at the specified // block pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: KeyFor) { @@ -155,13 +139,13 @@ impl ScannerDb { pub(crate) fn set_start_block(txn: &mut impl DbTxn, start_block: u64, id: [u8; 32]) { assert!( - LatestFinalizedBlock::get(txn).is_none(), + NextToScanForOutputsBlock::get(txn).is_none(), "setting start block but prior set start block" ); - Self::set_block(txn, start_block, id); + crate::index::IndexDb::set_block(txn, start_block, id); + crate::index::IndexDb::set_latest_finalized_block(txn, start_block); - LatestFinalizedBlock::set(txn, &start_block); NextToScanForOutputsBlock::set(txn, &start_block); // We can receive outputs in this block, but any descending transactions will be in the next // block. This, with the check on-set, creates a bound that this value in the DB is non-zero. @@ -169,13 +153,6 @@ impl ScannerDb { NextToPotentiallyReportBlock::set(txn, &start_block); } - pub(crate) fn set_latest_finalized_block(txn: &mut impl DbTxn, latest_finalized_block: u64) { - LatestFinalizedBlock::set(txn, &latest_finalized_block); - } - pub(crate) fn latest_finalized_block(getter: &impl Get) -> Option { - LatestFinalizedBlock::get(getter) - } - pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option { // We can only scan up to whatever block we've checked the Eventualities of, plus the window // length. Since this returns an inclusive bound, we need to subtract 1 diff --git a/processor/scanner/src/index/db.rs b/processor/scanner/src/index/db.rs new file mode 100644 index 000000000..a46d6fa64 --- /dev/null +++ b/processor/scanner/src/index/db.rs @@ -0,0 +1,34 @@ +use serai_db::{Get, DbTxn, create_db}; + +create_db!( + ScannerIndex { + // A lookup of a block's number to its ID + BlockId: (number: u64) -> [u8; 32], + // A lookup of a block's ID to its number + BlockNumber: (id: [u8; 32]) -> u64, + + // The latest finalized block to appear on the blockchain + LatestFinalizedBlock: () -> u64, + } +); + +pub(crate) struct IndexDb; +impl IndexDb { + pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: [u8; 32]) { + BlockId::set(txn, number, &id); + BlockNumber::set(txn, id, &number); + } + pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option<[u8; 32]> { + BlockId::get(getter, number) + } + pub(crate) fn block_number(getter: &impl Get, id: [u8; 32]) -> Option { + BlockNumber::get(getter, id) + } + + pub(crate) fn set_latest_finalized_block(txn: &mut impl DbTxn, latest_finalized_block: u64) { + LatestFinalizedBlock::set(txn, &latest_finalized_block); + } + pub(crate) fn latest_finalized_block(getter: &impl Get) -> Option { + LatestFinalizedBlock::get(getter) + } +} diff --git a/processor/scanner/src/index/mod.rs b/processor/scanner/src/index/mod.rs index e3c5c6acd..078016506 100644 --- a/processor/scanner/src/index/mod.rs +++ b/processor/scanner/src/index/mod.rs @@ -1,9 +1,11 @@ use serai_db::{DbTxn, Db}; -use primitives::BlockHeader; +use primitives::{task::ContinuallyRan, BlockHeader}; -// TODO: Localize to IndexDb? -use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; +use crate::ScannerFeed; + +mod db; +pub(crate) use db::IndexDb; /* This processor should build its own index of the blockchain, yet only for finalized blocks which @@ -22,7 +24,7 @@ struct IndexFinalizedTask { impl ContinuallyRan for IndexFinalizedTask { async fn run_iteration(&mut self) -> Result { // Fetch the latest finalized block - let our_latest_finalized = ScannerDb::::latest_finalized_block(&self.db) + let our_latest_finalized = IndexDb::latest_finalized_block(&self.db) .expect("IndexTask run before writing the start block"); let latest_finalized = match self.feed.latest_finalized_block_number().await { Ok(latest_finalized) => latest_finalized, @@ -51,7 +53,7 @@ impl ContinuallyRan for IndexFinalizedTask { // Check this descends from our indexed chain { let expected_parent = - ScannerDb::::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block"); + IndexDb::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block"); if block.parent() != expected_parent { panic!( "current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})", @@ -64,8 +66,8 @@ impl ContinuallyRan for IndexFinalizedTask { // Update the latest finalized block let mut txn = self.db.txn(); - ScannerDb::::set_block(&mut txn, b, block.id()); - ScannerDb::::set_latest_finalized_block(&mut txn, b); + IndexDb::set_block(&mut txn, b, block.id()); + IndexDb::set_latest_finalized_block(&mut txn, b); txn.commit(); } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 5b41301ef..d38c2ec34 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -112,7 +112,7 @@ pub trait ScannerFeed: Send + Sync { // Check the ID of this block is the expected ID { let expected = - ScannerDb::::block_id(&self.db, number).expect("requested a block which wasn't indexed"); + crate::index::IndexDb::block_id(&self.db, number).expect("requested a block which wasn't indexed"); if block.id() != expected { panic!( "finalized chain reorganized from {} to {} at {}", From bd6f39d2b4c79563d955d61995537aadeba672a3 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 26 Aug 2024 23:25:30 -0400 Subject: [PATCH 026/179] Make Eventuality a folder, not a file --- processor/scanner/src/{eventuality.rs => eventuality/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename processor/scanner/src/{eventuality.rs => eventuality/mod.rs} (100%) diff --git a/processor/scanner/src/eventuality.rs b/processor/scanner/src/eventuality/mod.rs similarity index 100% rename from processor/scanner/src/eventuality.rs rename to processor/scanner/src/eventuality/mod.rs From 960ecd8f4a2736271aa77bbb36ec8ac188541e5a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 27 Aug 2024 00:23:15 -0400 Subject: [PATCH 027/179] Add dedicated Eventuality DB and stub missing fns --- processor/scanner/src/db.rs | 16 +++++++++-- processor/scanner/src/eventuality/db.rs | 36 ++++++++++++++++++++++++ processor/scanner/src/eventuality/mod.rs | 15 ++++++---- processor/scanner/src/lib.rs | 26 +++++++++-------- processor/scanner/src/report.rs | 10 +++---- processor/scanner/src/scan.rs | 4 +-- 6 files changed, 81 insertions(+), 26 deletions(-) create mode 100644 processor/scanner/src/eventuality/db.rs diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 4ac6badaa..185112226 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -215,8 +215,8 @@ impl ScannerDb { pub(crate) fn queue_return( txn: &mut impl DbTxn, block_queued_from: u64, - return_addr: AddressFor, - output: OutputFor, + return_addr: &AddressFor, + output: &OutputFor, ) { todo!("TODO") } @@ -253,6 +253,10 @@ impl ScannerDb { } pub(crate) fn flag_notable(txn: &mut impl DbTxn, block_number: u64) { + assert!( + NextToPotentiallyReportBlock::get(txn).unwrap() <= block_number, + "already potentially reported a block we're only now flagging as notable" + ); NotableBlock::set(txn, block_number, &()); } @@ -285,4 +289,12 @@ impl ScannerDb { pub(crate) fn is_block_notable(getter: &impl Get, number: u64) -> bool { NotableBlock::get(getter, number).is_some() } + + pub(crate) fn take_queued_returns(txn: &mut impl DbTxn, block_number: u64) -> Vec> { + todo!("TODO") + } + + pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 { + todo!("TODO") + } } diff --git a/processor/scanner/src/eventuality/db.rs b/processor/scanner/src/eventuality/db.rs new file mode 100644 index 000000000..e379532d2 --- /dev/null +++ b/processor/scanner/src/eventuality/db.rs @@ -0,0 +1,36 @@ +use core::marker::PhantomData; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db}; + +use primitives::EventualityTracker; + +use crate::{ScannerFeed, KeyFor, EventualityFor}; + +// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. +trait Borshy: BorshSerialize + BorshDeserialize {} +impl Borshy for T {} + +create_db!( + ScannerEventuality { + SerializedEventualities: () -> Vec, + } +); + +pub(crate) struct EventualityDb(PhantomData); +impl EventualityDb { + pub(crate) fn set_eventualities( + txn: &mut impl DbTxn, + key: KeyFor, + eventualities: &EventualityTracker>, + ) { + todo!("TODO") + } + + pub(crate) fn eventualities( + getter: &impl Get, + key: KeyFor, + ) -> EventualityTracker> { + todo!("TODO") + } +} diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 8fc182466..3d70d650f 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -2,13 +2,16 @@ use group::GroupEncoding; use serai_db::{DbTxn, Db}; -use primitives::{OutputType, ReceivedOutput, Block}; +use primitives::{OutputType, ReceivedOutput, Eventuality, Block}; // TODO: Localize to EventualityDb? use crate::{ lifetime::LifetimeStage, db::ScannerDb, BlockExt, ScannerFeed, KeyFor, Scheduler, ContinuallyRan, }; +mod db; +use db::EventualityDb; + /* When we scan a block, we receive outputs. When this block is acknowledged, we accumulate those outputs into some scheduler, potentially causing certain transactions to begin their signing @@ -110,7 +113,7 @@ impl> ContinuallyRan for EventualityTas iterated = true; - let block = self.feed.block_by_number(b).await?; + let block = self.feed.block_by_number(&self.db, b).await?; log::info!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); @@ -144,9 +147,9 @@ impl> ContinuallyRan for EventualityTas for key in keys { let completed_eventualities = { - let mut eventualities = ScannerDb::::eventualities(&txn, key.key); + let mut eventualities = EventualityDb::::eventualities(&txn, key.key); let completed_eventualities = block.check_for_eventuality_resolutions(&mut eventualities); - ScannerDb::::set_eventualities(&mut txn, eventualities); + EventualityDb::::set_eventualities(&mut txn, key.key, &eventualities); completed_eventualities }; @@ -200,11 +203,11 @@ impl> ContinuallyRan for EventualityTas KeyFor::::from_bytes(&key_repr).unwrap() }; - let mut eventualities = ScannerDb::::eventualities(&txn, key); + let mut eventualities = EventualityDb::::eventualities(&txn, key); for new_eventuality in new_eventualities { eventualities.active_eventualities.insert(new_eventuality.lookup(), new_eventuality); } - ScannerDb::::set_eventualities(&mut txn, eventualities); + EventualityDb::::set_eventualities(&mut txn, key, &eventualities); } // Update the next to check block diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index d38c2ec34..fb6599b70 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -1,13 +1,11 @@ -use core::{marker::PhantomData, fmt::Debug, time::Duration}; +use core::{marker::PhantomData, fmt::Debug}; use std::collections::HashMap; -use tokio::sync::mpsc; - -use serai_db::DbTxn; +use serai_db::{Get, DbTxn}; use serai_primitives::{NetworkId, Coin, Amount}; -use primitives::{task::*, Block}; +use primitives::{task::*, ReceivedOutput, Block}; // Logic for deciding where in its lifetime a multisig is. mod lifetime; @@ -29,10 +27,10 @@ pub(crate) trait BlockExt: Block { } impl BlockExt for B { fn scan_for_outputs(&self, key: Self::Key) -> Vec { - let mut outputs = self.scan_for_outputs_unordered(); + let mut outputs = self.scan_for_outputs_unordered(key); outputs.sort_by(|a, b| { use core::cmp::{Ordering, Ord}; - let res = a.id().as_ref().cmp(&b.id().as_ref()); + let res = a.id().as_ref().cmp(b.id().as_ref()); assert!(res != Ordering::Equal, "scanned two outputs within a block with the same ID"); res }); @@ -103,7 +101,11 @@ pub trait ScannerFeed: Send + Sync { /// Fetch a block by its number. /// /// Panics if the block requested wasn't indexed. - async fn block_by_number(&self, getter: &impl Get, number: u64) -> Result { + async fn block_by_number( + &self, + getter: &(impl Send + Sync + Get), + number: u64, + ) -> Result { let block = match self.unchecked_block_by_number(number).await { Ok(block) => block, Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?, @@ -111,8 +113,8 @@ pub trait ScannerFeed: Send + Sync { // Check the ID of this block is the expected ID { - let expected = - crate::index::IndexDb::block_id(&self.db, number).expect("requested a block which wasn't indexed"); + let expected = crate::index::IndexDb::block_id(getter, number) + .expect("requested a block which wasn't indexed"); if block.id() != expected { panic!( "finalized chain reorganized from {} to {} at {}", @@ -122,6 +124,8 @@ pub trait ScannerFeed: Send + Sync { ); } } + + Ok(block) } /// The cost to aggregate an input as of the specified block. @@ -146,7 +150,7 @@ type OutputFor = <::Block as Block>::Output; type EventualityFor = <::Block as Block>::Eventuality; /// The object responsible for accumulating outputs and planning new transactions. -pub trait Scheduler { +pub trait Scheduler: Send { /// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for. /// /// The `Vec` used as the key in the returned HashMap should be the encoded key these diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index ec87845f4..8f37d7a64 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -7,7 +7,7 @@ use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; use primitives::ReceivedOutput; // TODO: Localize to ReportDb? -use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan}; +use crate::{db::ScannerDb, index::IndexDb, ScannerFeed, ContinuallyRan}; /* This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion. @@ -57,7 +57,7 @@ impl ContinuallyRan for ReportTask { // methods to be used in the future) in_instructions.sort_by(|a, b| { use core::cmp::{Ordering, Ord}; - let res = a.output.id().as_ref().cmp(&b.output.id().as_ref()); + let res = a.output.id().as_ref().cmp(b.output.id().as_ref()); assert!(res != Ordering::Equal); res }); @@ -66,8 +66,8 @@ impl ContinuallyRan for ReportTask { let network = S::NETWORK; let block_hash = - ScannerDb::::block_id(&txn, b).expect("reporting block we didn't save the ID for"); - let mut batch_id = ScannerDb::::acquire_batch_id(txn); + IndexDb::block_id(&txn, b).expect("reporting block we didn't save the ID for"); + let mut batch_id = ScannerDb::::acquire_batch_id(&mut txn); // start with empty batch let mut batches = @@ -83,7 +83,7 @@ impl ContinuallyRan for ReportTask { let instruction = batch.instructions.pop().unwrap(); // bump the id for the new batch - batch_id = ScannerDb::::acquire_batch_id(txn); + batch_id = ScannerDb::::acquire_batch_id(&mut txn); // make a new batch with this instruction included batches.push(Batch { diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index ddc1110e0..7e59c92d0 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -76,7 +76,7 @@ impl ContinuallyRan for ScanForOutputsTask { .expect("ScanForOutputsTask run before writing the start block"); for b in next_to_scan ..= latest_scannable { - let block = self.feed.block_by_number(b).await?; + let block = self.feed.block_by_number(&self.db, b).await?; log::info!("scanning block: {} ({b})", hex::encode(block.id())); @@ -173,7 +173,7 @@ impl ContinuallyRan for ScanForOutputsTask { }, (Some(return_addr), None) => { // Since there was no instruction here, return this since we parsed a return address - ScannerDb::::queue_return(&mut txn, b, return_addr, output); + ScannerDb::::queue_return(&mut txn, b, &return_addr, &output); continue; } // Since we didn't receive an instruction nor can we return this, move on From 65e48626cc8c51f519c9005a8af1ec792fb48e91 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 27 Aug 2024 00:44:11 -0400 Subject: [PATCH 028/179] Don't expose IndexDb throughout the crate --- processor/primitives/src/task.rs | 2 +- processor/scanner/src/db.rs | 3 --- processor/scanner/src/index/mod.rs | 40 ++++++++++++++++++++++++++++-- processor/scanner/src/lib.rs | 3 +-- processor/scanner/src/report.rs | 5 ++-- 5 files changed, 42 insertions(+), 11 deletions(-) diff --git a/processor/primitives/src/task.rs b/processor/primitives/src/task.rs index a7d6153ca..94a576a0a 100644 --- a/processor/primitives/src/task.rs +++ b/processor/primitives/src/task.rs @@ -78,7 +78,7 @@ pub trait ContinuallyRan: Sized { } } Err(e) => { - log::debug!("{}", e); + log::warn!("{}", e); increase_sleep_before_next_task(&mut current_sleep_before_next_task); } } diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 185112226..42086681a 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -143,9 +143,6 @@ impl ScannerDb { "setting start block but prior set start block" ); - crate::index::IndexDb::set_block(txn, start_block, id); - crate::index::IndexDb::set_latest_finalized_block(txn, start_block); - NextToScanForOutputsBlock::set(txn, &start_block); // We can receive outputs in this block, but any descending transactions will be in the next // block. This, with the check on-set, creates a bound that this value in the DB is non-zero. diff --git a/processor/scanner/src/index/mod.rs b/processor/scanner/src/index/mod.rs index 078016506..7c70eedc7 100644 --- a/processor/scanner/src/index/mod.rs +++ b/processor/scanner/src/index/mod.rs @@ -1,11 +1,17 @@ -use serai_db::{DbTxn, Db}; +use serai_db::{Get, DbTxn, Db}; use primitives::{task::ContinuallyRan, BlockHeader}; use crate::ScannerFeed; mod db; -pub(crate) use db::IndexDb; +use db::IndexDb; + +/// Panics if an unindexed block's ID is requested. +pub(crate) fn block_id(getter: &impl Get, block_number: u64) -> [u8; 32] { + IndexDb::block_id(getter, block_number) + .unwrap_or_else(|| panic!("requested block ID for unindexed block {block_number}")) +} /* This processor should build its own index of the blockchain, yet only for finalized blocks which @@ -20,6 +26,36 @@ struct IndexFinalizedTask { feed: S, } +impl IndexFinalizedTask { + pub(crate) async fn new(mut db: D, feed: S, start_block: u64) -> Self { + if IndexDb::block_id(&db, start_block).is_none() { + // Fetch the block for its ID + let block = { + let mut delay = Self::DELAY_BETWEEN_ITERATIONS; + loop { + match feed.unchecked_block_header_by_number(start_block).await { + Ok(block) => break block, + Err(e) => { + log::warn!("IndexFinalizedTask couldn't fetch start block {start_block}: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(delay)).await; + delay += Self::DELAY_BETWEEN_ITERATIONS; + delay = delay.min(Self::MAX_DELAY_BETWEEN_ITERATIONS); + } + }; + } + }; + + // Initialize the DB + let mut txn = db.txn(); + IndexDb::set_block(&mut txn, start_block, block.id()); + IndexDb::set_latest_finalized_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, feed } + } +} + #[async_trait::async_trait] impl ContinuallyRan for IndexFinalizedTask { async fn run_iteration(&mut self) -> Result { diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index fb6599b70..a29f10697 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -113,8 +113,7 @@ pub trait ScannerFeed: Send + Sync { // Check the ID of this block is the expected ID { - let expected = crate::index::IndexDb::block_id(getter, number) - .expect("requested a block which wasn't indexed"); + let expected = crate::index::block_id(getter, number); if block.id() != expected { panic!( "finalized chain reorganized from {} to {} at {}", diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index 8f37d7a64..f2caf6921 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -7,7 +7,7 @@ use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; use primitives::ReceivedOutput; // TODO: Localize to ReportDb? -use crate::{db::ScannerDb, index::IndexDb, ScannerFeed, ContinuallyRan}; +use crate::{db::ScannerDb, index, ScannerFeed, ContinuallyRan}; /* This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion. @@ -65,8 +65,7 @@ impl ContinuallyRan for ReportTask { }; let network = S::NETWORK; - let block_hash = - IndexDb::block_id(&txn, b).expect("reporting block we didn't save the ID for"); + let block_hash = index::block_id(&txn, b); let mut batch_id = ScannerDb::::acquire_batch_id(&mut txn); // start with empty batch From 1b949d69e70d43272aca3fdff16c96c6f69c1e72 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 27 Aug 2024 01:54:49 -0400 Subject: [PATCH 029/179] Add a DbChannel between scan and eventuality task --- processor/scanner/src/db.rs | 127 ++++++++++++++++------- processor/scanner/src/eventuality/mod.rs | 47 ++++++--- processor/scanner/src/lib.rs | 20 +++- processor/scanner/src/scan.rs | 31 ++++-- 4 files changed, 166 insertions(+), 59 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 42086681a..3ea41161a 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -3,13 +3,13 @@ use std::io; use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_db::{Get, DbTxn, create_db}; +use serai_db::{Get, DbTxn, create_db, db_channel}; use serai_in_instructions_primitives::InInstructionWithBalance; use primitives::{ReceivedOutput, BorshG}; -use crate::{lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor}; +use crate::{lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, Return}; // The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. trait Borshy: BorshSerialize + BorshDeserialize {} @@ -76,8 +76,6 @@ create_db!( NotableBlock: (number: u64) -> (), SerializedQueuedOutputs: (block_number: u64) -> Vec, - SerializedForwardedOutputsIndex: (block_number: u64) -> Vec, - SerializedForwardedOutput: (output_id: &[u8]) -> Vec, SerializedOutputs: (block_number: u64) -> Vec, } ); @@ -209,15 +207,6 @@ impl ScannerDb { todo!("TODO") } - pub(crate) fn queue_return( - txn: &mut impl DbTxn, - block_queued_from: u64, - return_addr: &AddressFor, - output: &OutputFor, - ) { - todo!("TODO") - } - pub(crate) fn queue_output_until_block( txn: &mut impl DbTxn, queue_for_block: u64, @@ -229,26 +218,6 @@ impl ScannerDb { SerializedQueuedOutputs::set(txn, queue_for_block, &outputs); } - pub(crate) fn save_output_being_forwarded( - txn: &mut impl DbTxn, - block_forwarded_from: u64, - output: &OutputWithInInstruction, - ) { - let mut buf = Vec::with_capacity(128); - output.write(&mut buf).unwrap(); - - let id = output.output.id(); - - // Save this to an index so we can later fetch all outputs to forward - let mut forwarded_outputs = SerializedForwardedOutputsIndex::get(txn, block_forwarded_from) - .unwrap_or(Vec::with_capacity(32)); - forwarded_outputs.extend(id.as_ref()); - SerializedForwardedOutputsIndex::set(txn, block_forwarded_from, &forwarded_outputs); - - // Save the output itself - SerializedForwardedOutput::set(txn, id.as_ref(), &buf); - } - pub(crate) fn flag_notable(txn: &mut impl DbTxn, block_number: u64) { assert!( NextToPotentiallyReportBlock::get(txn).unwrap() <= block_number, @@ -287,11 +256,99 @@ impl ScannerDb { NotableBlock::get(getter, number).is_some() } - pub(crate) fn take_queued_returns(txn: &mut impl DbTxn, block_number: u64) -> Vec> { + pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 { todo!("TODO") } - pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 { + pub(crate) fn return_address_and_in_instruction_for_forwarded_output( + getter: &impl Get, + output: & as ReceivedOutput, AddressFor>>::Id, + ) -> Option<(Option>, InInstructionWithBalance)> { + todo!("TODO") + } +} + +/// The data produced by scanning a block. +/// +/// This is the sender's version which includes the forwarded outputs with their InInstructions, +/// which need to be saved to the database for later retrieval. +pub(crate) struct SenderScanData { + /// The block number. + pub(crate) block_number: u64, + /// The received outputs which should be accumulated into the scheduler. + pub(crate) received_external_outputs: Vec>, + /// The outputs which need to be forwarded. + pub(crate) forwards: Vec>, + /// The outputs which need to be returned. + pub(crate) returns: Vec>, +} + +/// The data produced by scanning a block. +/// +/// This is the receiver's version which doesn't include the forwarded outputs' InInstructions, as +/// the Eventuality task doesn't need it to process this block. +pub(crate) struct ReceiverScanData { + /// The block number. + pub(crate) block_number: u64, + /// The received outputs which should be accumulated into the scheduler. + pub(crate) received_external_outputs: Vec>, + /// The outputs which need to be forwarded. + pub(crate) forwards: Vec>, + /// The outputs which need to be returned. + pub(crate) returns: Vec>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +pub(crate) struct SerializedScanData { + pub(crate) block_number: u64, + pub(crate) data: Vec, +} + +db_channel! { + ScannerScanEventuality { + ScannedBlock: (empty_key: ()) -> SerializedScanData, + } +} + +pub(crate) struct ScanToEventualityDb(PhantomData); +impl ScanToEventualityDb { + pub(crate) fn send_scan_data(txn: &mut impl DbTxn, block_number: u64, data: &SenderScanData) { + /* + SerializedForwardedOutputsIndex: (block_number: u64) -> Vec, + SerializedForwardedOutput: (output_id: &[u8]) -> Vec, + + pub(crate) fn save_output_being_forwarded( + txn: &mut impl DbTxn, + block_forwarded_from: u64, + output: &OutputWithInInstruction, + ) { + let mut buf = Vec::with_capacity(128); + output.write(&mut buf).unwrap(); + + let id = output.output.id(); + + // Save this to an index so we can later fetch all outputs to forward + let mut forwarded_outputs = SerializedForwardedOutputsIndex::get(txn, block_forwarded_from) + .unwrap_or(Vec::with_capacity(32)); + forwarded_outputs.extend(id.as_ref()); + SerializedForwardedOutputsIndex::set(txn, block_forwarded_from, &forwarded_outputs); + + // Save the output itself + SerializedForwardedOutput::set(txn, id.as_ref(), &buf); + } + */ + + ScannedBlock::send(txn, (), todo!("TODO")); + } + pub(crate) fn recv_scan_data(txn: &mut impl DbTxn, block_number: u64) -> ReceiverScanData { + let data = + ScannedBlock::try_recv(txn, ()).expect("receiving data for a scanned block not yet sent"); + assert_eq!( + block_number, data.block_number, + "received data for a scanned block distinct than expected" + ); + let data = &data.data; + todo!("TODO") } } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 3d70d650f..4f5fbe630 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -2,11 +2,13 @@ use group::GroupEncoding; use serai_db::{DbTxn, Db}; -use primitives::{OutputType, ReceivedOutput, Eventuality, Block}; +use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, Block}; // TODO: Localize to EventualityDb? use crate::{ - lifetime::LifetimeStage, db::ScannerDb, BlockExt, ScannerFeed, KeyFor, Scheduler, ContinuallyRan, + lifetime::LifetimeStage, + db::{OutputWithInInstruction, ReceiverScanData, ScannerDb, ScanToEventualityDb}, + BlockExt, ScannerFeed, KeyFor, SchedulerUpdate, Scheduler, }; mod db; @@ -137,13 +139,12 @@ impl> ContinuallyRan for EventualityTas let mut txn = self.db.txn(); - // Fetch the External outputs we reported, and therefore should yield after handling this - // block - let mut outputs = ScannerDb::::in_instructions(&txn, b) - .expect("handling eventualities/outputs for block which didn't set its InInstructions") - .into_iter() - .map(|output| output.output) - .collect::>(); + // Fetch the data from the scanner + let scan_data = ScanToEventualityDb::recv_scan_data(&mut txn, b); + assert_eq!(scan_data.block_number, b); + let ReceiverScanData { block_number: _, received_external_outputs, forwards, returns } = + scan_data; + let mut outputs = received_external_outputs; for key in keys { let completed_eventualities = { @@ -184,17 +185,37 @@ impl> ContinuallyRan for EventualityTas } // Now, we iterate over all Forwarded outputs and queue their InInstructions - todo!("TODO"); + for output in + non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded) + { + let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else { + // Output sent to the forwarding address yet not actually forwarded + continue; + }; + let Some(forwarded) = eventuality.forwarded_output() else { + // This was a TX made by us, yet someone burned to the forwarding address + continue; + }; + + let (return_address, in_instruction) = + ScannerDb::::return_address_and_in_instruction_for_forwarded_output( + &txn, &forwarded, + ) + .expect("forwarded an output yet didn't save its InInstruction to the DB"); + ScannerDb::::queue_output_until_block( + &mut txn, + b + S::WINDOW_LENGTH, + &OutputWithInInstruction { output: output.clone(), return_address, in_instruction }, + ); + } // Accumulate all of these outputs outputs.extend(non_external_outputs); } - let outputs_to_return = ScannerDb::::take_queued_returns(&mut txn, b); - // TODO: This also has to intake Burns let new_eventualities = - self.scheduler.accumulate_outputs_and_return_outputs(&mut txn, outputs, outputs_to_return); + self.scheduler.update(&mut txn, SchedulerUpdate { outputs, forwards, returns }); for (key, new_eventualities) in new_eventualities { let key = { let mut key_repr = as GroupEncoding>::Repr::default(); diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index a29f10697..ef2954716 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -148,17 +148,29 @@ type AddressFor = <::Block as Block>::Address; type OutputFor = <::Block as Block>::Output; type EventualityFor = <::Block as Block>::Eventuality; +/// A return to occur. +pub struct Return { + address: AddressFor, + output: OutputFor, +} + +/// An update for the scheduler. +pub struct SchedulerUpdate { + outputs: Vec>, + forwards: Vec>, + returns: Vec>, +} + /// The object responsible for accumulating outputs and planning new transactions. pub trait Scheduler: Send { /// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for. /// - /// The `Vec` used as the key in the returned HashMap should be the encoded key these + /// The `Vec` used as the key in the returned HashMap should be the encoded key the /// Eventualities are for. - fn accumulate_outputs_and_return_outputs( + fn update( &mut self, txn: &mut impl DbTxn, - outputs: Vec>, - outputs_to_return: Vec>, + update: SchedulerUpdate, ) -> HashMap, Vec>>; } diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 7e59c92d0..d8312e3b0 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -11,8 +11,8 @@ use primitives::{OutputType, ReceivedOutput, Block}; // TODO: Localize to ScanDb? use crate::{ lifetime::LifetimeStage, - db::{OutputWithInInstruction, ScannerDb}, - BlockExt, ScannerFeed, AddressFor, OutputFor, ContinuallyRan, + db::{OutputWithInInstruction, SenderScanData, ScannerDb, ScanToEventualityDb}, + BlockExt, ScannerFeed, AddressFor, OutputFor, Return, ContinuallyRan, }; // Construct an InInstruction from an external output. @@ -86,6 +86,12 @@ impl ContinuallyRan for ScanForOutputsTask { let mut txn = self.db.txn(); + let mut scan_data = SenderScanData { + block_number: b, + received_external_outputs: vec![], + forwards: vec![], + returns: vec![], + }; let mut in_instructions = ScannerDb::::take_queued_outputs(&mut txn, b); // Scan for each key @@ -171,13 +177,21 @@ impl ContinuallyRan for ScanForOutputsTask { return_address, in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use }, }, - (Some(return_addr), None) => { + (Some(address), None) => { // Since there was no instruction here, return this since we parsed a return address - ScannerDb::::queue_return(&mut txn, b, &return_addr, &output); + if key.stage != LifetimeStage::Finishing { + scan_data.returns.push(Return { address, output }); + } + continue; + } + // Since we didn't receive an instruction nor can we return this, queue this for + // accumulation and move on + (None, None) => { + if key.stage != LifetimeStage::Finishing { + scan_data.received_external_outputs.push(output); + } continue; } - // Since we didn't receive an instruction nor can we return this, move on - (None, None) => continue, }; // Drop External outputs if they're to a multisig which won't report them @@ -201,7 +215,7 @@ impl ContinuallyRan for ScanForOutputsTask { LifetimeStage::Forwarding => { // When the forwarded output appears, we can see which Plan it's associated with and // from there recover this output - ScannerDb::::save_output_being_forwarded(&mut txn, b, &output_with_in_instruction); + scan_data.forwards.push(output_with_in_instruction); continue; } // We should drop these as we should not be handling new External outputs at this @@ -213,10 +227,13 @@ impl ContinuallyRan for ScanForOutputsTask { // Ensures we didn't miss a `continue` above assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange)); + scan_data.received_external_outputs.push(output_with_in_instruction.output.clone()); in_instructions.push(output_with_in_instruction); } } + // Save the outputs to return + ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); // Save the in instructions ScannerDb::::set_in_instructions(&mut txn, b, in_instructions); // Update the next to scan block From 14b9c62fb60aa2b1a1271e98174c2dc7cc855b3c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 27 Aug 2024 02:14:59 -0400 Subject: [PATCH 030/179] Use a channel for the InInstructions It's still unclear how we'll handle refunding failed InInstructions at this time. Presumably, extending the InInstruction channel with the associated output ID? --- processor/scanner/src/db.rs | 67 ++++++++++++++++++++------------- processor/scanner/src/report.rs | 35 +++++++---------- processor/scanner/src/scan.rs | 30 ++++++++++++--- 3 files changed, 79 insertions(+), 53 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 3ea41161a..b4d7c27ba 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -226,32 +226,6 @@ impl ScannerDb { NotableBlock::set(txn, block_number, &()); } - // TODO: Use a DbChannel here, and send the instructions to the report task and the outputs to - // the eventuality task? That way this cleans up after itself - pub(crate) fn set_in_instructions( - txn: &mut impl DbTxn, - block_number: u64, - outputs: Vec>, - ) { - if !outputs.is_empty() { - // Set this block as notable - NotableBlock::set(txn, block_number, &()); - } - - let mut buf = Vec::with_capacity(outputs.len() * 128); - for output in outputs { - output.write(&mut buf).unwrap(); - } - SerializedOutputs::set(txn, block_number, &buf); - } - - pub(crate) fn in_instructions( - getter: &impl Get, - block_number: u64, - ) -> Option>> { - todo!("TODO") - } - pub(crate) fn is_block_notable(getter: &impl Get, number: u64) -> bool { NotableBlock::get(getter, number).is_some() } @@ -352,3 +326,44 @@ impl ScanToEventualityDb { todo!("TODO") } } + +#[derive(BorshSerialize, BorshDeserialize)] +pub(crate) struct BlockBoundInInstructions { + pub(crate) block_number: u64, + pub(crate) in_instructions: Vec, +} + +db_channel! { + ScannerScanReport { + InInstructions: (empty_key: ()) -> BlockBoundInInstructions, + } +} + +pub(crate) struct ScanToReportDb(PhantomData); +impl ScanToReportDb { + pub(crate) fn send_in_instructions( + txn: &mut impl DbTxn, + block_number: u64, + in_instructions: Vec, + ) { + if !in_instructions.is_empty() { + // Set this block as notable + NotableBlock::set(txn, block_number, &()); + } + + InInstructions::send(txn, (), &BlockBoundInInstructions { block_number, in_instructions }); + } + + pub(crate) fn recv_in_instructions( + txn: &mut impl DbTxn, + block_number: u64, + ) -> Vec { + let data = InInstructions::try_recv(txn, ()) + .expect("receiving InInstructions for a scanned block not yet sent"); + assert_eq!( + block_number, data.block_number, + "received InInstructions for a scanned block distinct than expected" + ); + data.in_instructions + } +} diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index f2caf6921..39a721069 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -4,10 +4,11 @@ use serai_db::{DbTxn, Db}; use serai_primitives::BlockHash; use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; -use primitives::ReceivedOutput; - -// TODO: Localize to ReportDb? -use crate::{db::ScannerDb, index, ScannerFeed, ContinuallyRan}; +// TODO: Localize to Report? +use crate::{ + db::{ScannerDb, ScanToReportDb}, + index, ScannerFeed, ContinuallyRan, +}; /* This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion. @@ -47,23 +48,15 @@ impl ContinuallyRan for ReportTask { for b in next_to_potentially_report ..= highest_reportable { let mut txn = self.db.txn(); + // Receive the InInstructions for this block + // We always do this as we can't trivially tell if we should recv InInstructions before we do + let in_instructions = ScanToReportDb::::recv_in_instructions(&mut txn, b); + let notable = ScannerDb::::is_block_notable(&txn, b); + if !notable { + assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions"); + } // If this block is notable, create the Batch(s) for it - if ScannerDb::::is_block_notable(&txn, b) { - let in_instructions = { - let mut in_instructions = ScannerDb::::in_instructions(&txn, b) - .expect("reporting block which didn't set its InInstructions"); - // Sort these before reporting them in case anything we did is non-deterministic/to have - // a well-defined order (not implicit to however we got this result, enabling different - // methods to be used in the future) - in_instructions.sort_by(|a, b| { - use core::cmp::{Ordering, Ord}; - let res = a.output.id().as_ref().cmp(b.output.id().as_ref()); - assert!(res != Ordering::Equal); - res - }); - in_instructions - }; - + if notable { let network = S::NETWORK; let block_hash = index::block_id(&txn, b); let mut batch_id = ScannerDb::::acquire_batch_id(&mut txn); @@ -74,7 +67,7 @@ impl ContinuallyRan for ReportTask { for instruction in in_instructions { let batch = batches.last_mut().unwrap(); - batch.instructions.push(instruction.in_instruction); + batch.instructions.push(instruction); // check if batch is over-size if batch.encode().len() > MAX_BATCH_SIZE { diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index d8312e3b0..861a9725a 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -11,7 +11,7 @@ use primitives::{OutputType, ReceivedOutput, Block}; // TODO: Localize to ScanDb? use crate::{ lifetime::LifetimeStage, - db::{OutputWithInInstruction, SenderScanData, ScannerDb, ScanToEventualityDb}, + db::{OutputWithInInstruction, SenderScanData, ScannerDb, ScanToReportDb, ScanToEventualityDb}, BlockExt, ScannerFeed, AddressFor, OutputFor, Return, ContinuallyRan, }; @@ -92,7 +92,25 @@ impl ContinuallyRan for ScanForOutputsTask { forwards: vec![], returns: vec![], }; - let mut in_instructions = ScannerDb::::take_queued_outputs(&mut txn, b); + let mut in_instructions = vec![]; + + let queued_outputs = { + let mut queued_outputs = ScannerDb::::take_queued_outputs(&mut txn, b); + + // Sort the queued outputs in case they weren't queued in a deterministic fashion + queued_outputs.sort_by(|a, b| { + use core::cmp::{Ordering, Ord}; + let res = a.output.id().as_ref().cmp(b.output.id().as_ref()); + assert!(res != Ordering::Equal); + res + }); + + queued_outputs + }; + for queued_output in queued_outputs { + scan_data.received_external_outputs.push(queued_output.output); + in_instructions.push(queued_output.in_instruction); + } // Scan for each key for key in keys { @@ -228,14 +246,14 @@ impl ContinuallyRan for ScanForOutputsTask { assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange)); scan_data.received_external_outputs.push(output_with_in_instruction.output.clone()); - in_instructions.push(output_with_in_instruction); + in_instructions.push(output_with_in_instruction.in_instruction); } } - // Save the outputs to return + // Send the scan data to the eventuality task ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); - // Save the in instructions - ScannerDb::::set_in_instructions(&mut txn, b, in_instructions); + // Send the in instructions to the report task + ScanToReportDb::::send_in_instructions(&mut txn, b, in_instructions); // Update the next to scan block ScannerDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); txn.commit(); From a632630706ad4a8fa911e1ccaf7c63abf3aaad2b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 27 Aug 2024 02:21:22 -0400 Subject: [PATCH 031/179] Add sorts for safety even upon non-determinism --- processor/scanner/src/eventuality/mod.rs | 9 ++++++--- processor/scanner/src/lib.rs | 21 ++++++++++++++------- processor/scanner/src/scan.rs | 13 +++---------- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 4f5fbe630..20e241128 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -8,7 +8,7 @@ use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, use crate::{ lifetime::LifetimeStage, db::{OutputWithInInstruction, ReceiverScanData, ScannerDb, ScanToEventualityDb}, - BlockExt, ScannerFeed, KeyFor, SchedulerUpdate, Scheduler, + BlockExt, ScannerFeed, KeyFor, SchedulerUpdate, Scheduler, sort_outputs, }; mod db; @@ -214,8 +214,11 @@ impl> ContinuallyRan for EventualityTas } // TODO: This also has to intake Burns - let new_eventualities = - self.scheduler.update(&mut txn, SchedulerUpdate { outputs, forwards, returns }); + let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns }; + scheduler_update.outputs.sort_by(sort_outputs); + scheduler_update.forwards.sort_by(sort_outputs); + scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + let new_eventualities = self.scheduler.update(&mut txn, scheduler_update); for (key, new_eventualities) in new_eventualities { let key = { let mut key_repr = as GroupEncoding>::Repr::default(); diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index ef2954716..d245e2552 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -1,11 +1,13 @@ use core::{marker::PhantomData, fmt::Debug}; use std::collections::HashMap; +use group::GroupEncoding; + use serai_db::{Get, DbTxn}; use serai_primitives::{NetworkId, Coin, Amount}; -use primitives::{task::*, ReceivedOutput, Block}; +use primitives::{task::*, Address, ReceivedOutput, Block}; // Logic for deciding where in its lifetime a multisig is. mod lifetime; @@ -21,6 +23,16 @@ mod eventuality; /// Task which reports `Batch`s to Substrate. mod report; +pub(crate) fn sort_outputs>( + a: &O, + b: &O, +) -> core::cmp::Ordering { + use core::cmp::{Ordering, Ord}; + let res = a.id().as_ref().cmp(b.id().as_ref()); + assert!(res != Ordering::Equal, "two outputs within a collection had the same ID"); + res +} + /// Extension traits around Block. pub(crate) trait BlockExt: Block { fn scan_for_outputs(&self, key: Self::Key) -> Vec; @@ -28,12 +40,7 @@ pub(crate) trait BlockExt: Block { impl BlockExt for B { fn scan_for_outputs(&self, key: Self::Key) -> Vec { let mut outputs = self.scan_for_outputs_unordered(key); - outputs.sort_by(|a, b| { - use core::cmp::{Ordering, Ord}; - let res = a.id().as_ref().cmp(b.id().as_ref()); - assert!(res != Ordering::Equal, "scanned two outputs within a block with the same ID"); - res - }); + outputs.sort_by(sort_outputs); outputs } } diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 861a9725a..8617ec186 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -6,13 +6,13 @@ use serai_in_instructions_primitives::{ Shorthand, RefundableInInstruction, InInstruction, InInstructionWithBalance, }; -use primitives::{OutputType, ReceivedOutput, Block}; +use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Block}; // TODO: Localize to ScanDb? use crate::{ lifetime::LifetimeStage, db::{OutputWithInInstruction, SenderScanData, ScannerDb, ScanToReportDb, ScanToEventualityDb}, - BlockExt, ScannerFeed, AddressFor, OutputFor, Return, ContinuallyRan, + BlockExt, ScannerFeed, AddressFor, OutputFor, Return, sort_outputs, }; // Construct an InInstruction from an external output. @@ -96,15 +96,8 @@ impl ContinuallyRan for ScanForOutputsTask { let queued_outputs = { let mut queued_outputs = ScannerDb::::take_queued_outputs(&mut txn, b); - // Sort the queued outputs in case they weren't queued in a deterministic fashion - queued_outputs.sort_by(|a, b| { - use core::cmp::{Ordering, Ord}; - let res = a.output.id().as_ref().cmp(b.output.id().as_ref()); - assert!(res != Ordering::Equal); - res - }); - + queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output)); queued_outputs }; for queued_output in queued_outputs { From e720fe7692a7e8a3abaecbede81a68da230c90ab Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 27 Aug 2024 16:43:50 -0400 Subject: [PATCH 032/179] Logs, documentation, misc --- processor/scanner/src/db.rs | 31 ++- processor/scanner/src/eventuality/mod.rs | 31 ++- processor/scanner/src/lib.rs | 313 ++--------------------- processor/scanner/src/scan.rs | 2 +- 4 files changed, 69 insertions(+), 308 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index b4d7c27ba..d53bf7c73 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -82,25 +82,34 @@ create_db!( pub(crate) struct ScannerDb(PhantomData); impl ScannerDb { - // activation_block_number is inclusive, so the key will be scanned for starting at the specified - // block + /// Queue a key. + /// + /// Keys may be queued whenever, so long as they're scheduled to activate `WINDOW_LENGTH` blocks + /// after the next block acknowledged after they've been set. There is no requirement that any + /// prior keys have had their processing completed (meaning what should be a length-2 vector may + /// be a length-n vector). + /// + /// A new key MUST NOT be queued to activate a block preceding the finishing of the key prior to + /// its prior. There MUST only be two keys active at one time. + /// + /// activation_block_number is inclusive, so the key will be scanned for starting at the + /// specified block. pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: KeyFor) { // Set this block as notable NotableBlock::set(txn, activation_block_number, &()); + // TODO: Panic if we've ever seen this key before + // Push the key let mut keys: Vec>>> = ActiveKeys::get(txn).unwrap_or(vec![]); - for key_i in &keys { - if key == key_i.key.0 { - panic!("queueing a key prior queued"); - } - } keys.push(SeraiKeyDbEntry { activation_block_number, key: BorshG(key) }); ActiveKeys::set(txn, &keys); } + /// Retire a key. + /// + /// The key retired must be the oldest key. There must be another key actively tracked. // TODO: This will be called from the Eventuality task yet this field is read by the scan task // We need to write the argument for its safety - // TODO: retire_key needs to set the notable block pub(crate) fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { let mut keys: Vec>>> = ActiveKeys::get(txn).expect("retiring key yet no active keys"); @@ -110,6 +119,9 @@ impl ScannerDb { keys.remove(0); ActiveKeys::set(txn, &keys); } + /// Fetch the active keys, as of the next-to-scan-for-outputs Block. + /// + /// This means the scan task should scan for all keys returned by this. pub(crate) fn active_keys_as_of_next_to_scan_for_outputs_block( getter: &impl Get, ) -> Option>>> { @@ -131,7 +143,7 @@ impl ScannerDb { ); keys.push(SeraiKey { key: raw_keys[i].key.0, stage, block_at_which_reporting_starts }); } - assert!(keys.len() <= 2); + assert!(keys.len() <= 2, "more than two keys active"); Some(keys) } @@ -152,7 +164,6 @@ impl ScannerDb { // We can only scan up to whatever block we've checked the Eventualities of, plus the window // length. Since this returns an inclusive bound, we need to subtract 1 // See `eventuality.rs` for more info - // TODO: Adjust based on register eventualities NextToCheckForEventualitiesBlock::get(getter).map(|b| b + S::WINDOW_LENGTH - 1) } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 20e241128..3a472ce2d 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -117,7 +117,7 @@ impl> ContinuallyRan for EventualityTas let block = self.feed.block_by_number(&self.db, b).await?; - log::info!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); + log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); /* This is proper as the keys for the next to scan block (at most `WINDOW_LENGTH` ahead, @@ -147,13 +147,21 @@ impl> ContinuallyRan for EventualityTas let mut outputs = received_external_outputs; for key in keys { - let completed_eventualities = { + let (eventualities_is_empty, completed_eventualities) = { let mut eventualities = EventualityDb::::eventualities(&txn, key.key); let completed_eventualities = block.check_for_eventuality_resolutions(&mut eventualities); EventualityDb::::set_eventualities(&mut txn, key.key, &eventualities); - completed_eventualities + (eventualities.active_eventualities.is_empty(), completed_eventualities) }; + for (tx, completed_eventuality) in completed_eventualities { + log::info!( + "eventuality {} resolved by {}", + hex::encode(completed_eventuality.id()), + hex::encode(tx.as_ref()) + ); + } + // Fetch all non-External outputs let mut non_external_outputs = block.scan_for_outputs(key.key); non_external_outputs.retain(|output| output.kind() != OutputType::External); @@ -213,7 +221,6 @@ impl> ContinuallyRan for EventualityTas outputs.extend(non_external_outputs); } - // TODO: This also has to intake Burns let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns }; scheduler_update.outputs.sort_by(sort_outputs); scheduler_update.forwards.sort_by(sort_outputs); @@ -234,6 +241,22 @@ impl> ContinuallyRan for EventualityTas EventualityDb::::set_eventualities(&mut txn, key, &eventualities); } + for key in keys { + if key.stage == LifetimeStage::Finishing { + let eventualities = EventualityDb::::eventualities(&txn, key.key); + if eventualities.active_eventualities.is_empty() { + log::info!( + "key {} has finished and is being retired", + hex::encode(key.key.to_bytes().as_ref()) + ); + + ScannerDb::::flag_notable(&mut txn, b + S::WINDOW_LENGTH); + // TODO: Retire the key + todo!("TODO") + } + } + } + // Update the next to check block ScannerDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); txn.commit(); diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index d245e2552..2d19207fe 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -196,38 +196,34 @@ impl Scanner { /// /// This means this block was ordered on Serai in relation to `Burn` events, and all validators /// have achieved synchrony on it. - // TODO: If we're acknowledging block `b`, the Eventuality task was already eligible to check it - // for Eventualities. We need this to block until the Eventuality task has actually checked it. - // TODO: Does the prior TODO hold with how the callback is now handled? pub fn acknowledge_block( &mut self, + txn: &mut impl DbTxn, block_number: u64, - key_to_activate: Option<()>, - ) -> Vec> { + key_to_activate: Option>, + ) { + log::info!("acknowledging block {block_number}"); + assert!( + ScannerDb::::is_block_notable(txn, block_number), + "acknowledging a block which wasn't notable" + ); + ScannerDb::::set_highest_acknowledged_block(txn, block_number); + ScannerDb::::queue_key(txn, block_number + S::WINDOW_LENGTH); + } + + /// Queue Burns. + /// + /// The scanner only updates the scheduler with new outputs upon acknowledging a block. We can + /// safely queue Burns so long as they're only actually added once we've handled the outputs from + /// the block acknowledged prior to their queueing. + pub fn queue_burns(&mut self, txn: &mut impl DbTxn, burns: Vec<()>) { + let queue_as_of = ScannerDb::::highest_acknowledged_block(txn) + .expect("queueing Burns yet never acknowledged a block"); todo!("TODO") } } /* -#[derive(Clone, Debug)] -pub enum ScannerEvent { - // Block scanned - Block { - is_retirement_block: bool, - block: >::Id, - outputs: Vec, - }, - // Eventuality completion found on-chain - // TODO: Move this from a tuple - Completed( - Vec, - usize, - [u8; 32], - >::Id, - ::Completion, - ), -} - #[derive(Clone, Debug)] struct ScannerDb(PhantomData, PhantomData); impl ScannerDb { @@ -258,182 +254,8 @@ impl ScannerDb { .get(Self::scanned_block_key()) .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap()).try_into().unwrap()) } - - fn retirement_block_key(key: &::G) -> Vec { - Self::scanner_key(b"retirement_block", key.to_bytes()) - } - fn save_retirement_block( - txn: &mut D::Transaction<'_>, - key: &::G, - block: usize, - ) { - txn.put(Self::retirement_block_key(key), u64::try_from(block).unwrap().to_le_bytes()); - } - fn retirement_block(getter: &G, key: &::G) -> Option { - getter - .get(Self::retirement_block_key(key)) - .map(|bytes| usize::try_from(u64::from_le_bytes(bytes.try_into().unwrap())).unwrap()) - } -} - -impl ScannerHandle { - /// Acknowledge having handled a block. - /// - /// Creates a lock over the Scanner, preventing its independent scanning operations until - /// released. - /// - /// This must only be called on blocks which have been scanned in-memory. - pub async fn ack_block( - &mut self, - txn: &mut D::Transaction<'_>, - id: >::Id, - ) -> (bool, Vec) { - debug!("block {} acknowledged", hex::encode(&id)); - - let mut scanner = self.scanner.long_term_acquire().await; - - // Get the number for this block - let number = ScannerDb::::block_number(txn, &id) - .expect("main loop trying to operate on data we haven't scanned"); - log::trace!("block {} was {number}", hex::encode(&id)); - - let outputs = ScannerDb::::save_scanned_block(txn, number); - // This has a race condition if we try to ack a block we scanned on a prior boot, and we have - // yet to scan it on this boot - assert!(number <= scanner.ram_scanned.unwrap()); - for output in &outputs { - assert!(scanner.ram_outputs.remove(output.id().as_ref())); - } - - assert_eq!(scanner.need_ack.pop_front().unwrap(), number); - - self.held_scanner = Some(scanner); - - // Load the key from the DB, as it will have already been removed from RAM if retired - let key = ScannerDb::::keys(txn)[0].1; - let is_retirement_block = ScannerDb::::retirement_block(txn, &key) == Some(number); - if is_retirement_block { - ScannerDb::::retire_key(txn); - } - (is_retirement_block, outputs) - } - - pub async fn register_eventuality( - &mut self, - key: &[u8], - block_number: usize, - id: [u8; 32], - eventuality: N::Eventuality, - ) { - let mut lock; - // We won't use held_scanner if we're re-registering on boot - (if let Some(scanner) = self.held_scanner.as_mut() { - scanner - } else { - lock = Some(self.scanner.write().await); - lock.as_mut().unwrap().as_mut().unwrap() - }) - .eventualities - .get_mut(key) - .unwrap() - .register(block_number, id, eventuality) - } - - pub async fn release_lock(&mut self) { - self.scanner.restore(self.held_scanner.take().unwrap()).await - } } -impl Scanner { - #[allow(clippy::type_complexity, clippy::new_ret_no_self)] - pub fn new( - network: N, - db: D, - ) -> (ScannerHandle, Vec<(usize, ::G)>) { - let (multisig_completed_send, multisig_completed_recv) = mpsc::unbounded_channel(); - - let keys = ScannerDb::::keys(&db); - let mut eventualities = HashMap::new(); - for key in &keys { - eventualities.insert(key.1.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); - } - } - - // An async function, to be spawned on a task, to discover and report outputs - async fn run( - mut db: D, - network: N, - scanner_hold: ScannerHold, - mut multisig_completed: mpsc::UnboundedReceiver, - ) { - loop { - for block_being_scanned in (ram_scanned + 1) ..= latest_block_to_scan { - // Redo the checks for if we're too far ahead - { - let needing_ack = { - let scanner_lock = scanner_hold.read().await; - let scanner = scanner_lock.as_ref().unwrap(); - scanner.need_ack.front().copied() - }; - - if let Some(needing_ack) = needing_ack { - let limit = needing_ack + N::CONFIRMATIONS; - assert!(block_being_scanned <= limit); - if block_being_scanned == limit { - break; - } - } - } - - let Ok(block) = network.get_block(block_being_scanned).await else { - warn!("couldn't get block {block_being_scanned}"); - break; - }; - let block_id = block.id(); - - info!("scanning block: {} ({block_being_scanned})", hex::encode(&block_id)); - - // Scan new blocks - // TODO: This lock acquisition may be long-lived... - let mut scanner_lock = scanner_hold.write().await; - let scanner = scanner_lock.as_mut().unwrap(); - - let mut has_activation = false; - let mut outputs = vec![]; - let mut completion_block_numbers = vec![]; - for (activation_number, key) in scanner.keys.clone() { - if activation_number > block_being_scanned { - continue; - } - - if activation_number == block_being_scanned { - has_activation = true; - } - - for (id, (block_number, tx, completion)) in network - .get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block) - .await - { - info!( - "eventuality {} resolved by {}, as found on chain", - hex::encode(id), - hex::encode(tx.as_ref()) - ); - - completion_block_numbers.push(block_number); - // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs - if !scanner.emit(ScannerEvent::Completed( - key_vec.clone(), - block_number, - id, - tx, - completion, - )) { - return; - } - } - } - // Panic if we've already seen these outputs for output in &outputs { let id = output.id(); @@ -482,99 +304,4 @@ impl Scanner { } scanner.ram_outputs.insert(id); } - - // We could remove this, if instead of doing the first block which passed - // requirements + CONFIRMATIONS, we simply emitted an event for every block where - // `number % CONFIRMATIONS == 0` (once at the final stage for the existing multisig) - // There's no need at this point, yet the latter may be more suitable for modeling... - async fn check_multisig_completed( - db: &mut D, - multisig_completed: &mut mpsc::UnboundedReceiver, - block_number: usize, - ) -> bool { - match multisig_completed.recv().await { - None => { - info!("Scanner handler was dropped. Shutting down?"); - false - } - Some(completed) => { - // Set the retirement block as block_number + CONFIRMATIONS - if completed { - let mut txn = db.txn(); - // The retiring key is the earliest one still around - let retiring_key = ScannerDb::::keys(&txn)[0].1; - // This value is static w.r.t. the key - ScannerDb::::save_retirement_block( - &mut txn, - &retiring_key, - block_number + N::CONFIRMATIONS, - ); - txn.commit(); - } - true - } - } - } - - drop(scanner_lock); - // Now that we've dropped the Scanner lock, we need to handle the multisig_completed - // channel before we decide if this block should be fired or not - // (holding the Scanner risks a deadlock) - for block_number in completion_block_numbers { - if !check_multisig_completed::(&mut db, &mut multisig_completed, block_number).await - { - return; - }; - } - - // Reacquire the scanner - let mut scanner_lock = scanner_hold.write().await; - let scanner = scanner_lock.as_mut().unwrap(); - - // Only emit an event if any of the following is true: - // - This is an activation block - // - This is a retirement block - // - There's outputs - // as only those blocks are meaningful and warrant obtaining synchrony over - let is_retirement_block = - ScannerDb::::retirement_block(&db, &scanner.keys[0].1) == Some(block_being_scanned); - let sent_block = if has_activation || is_retirement_block || (!outputs.is_empty()) { - // Save the outputs to disk - let mut txn = db.txn(); - ScannerDb::::save_outputs(&mut txn, &block_id, &outputs); - txn.commit(); - - // Send all outputs - if !scanner.emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) { - return; - } - - // Since we're creating a Batch, mark it as needing ack - scanner.need_ack.push_back(block_being_scanned); - true - } else { - false - }; - - // Remove it from memory - if is_retirement_block { - let retired = scanner.keys.remove(0).1; - scanner.eventualities.remove(retired.to_bytes().as_ref()); - } - drop(scanner_lock); - // If we sent a Block event, once again check multisig_completed - if sent_block && - (!check_multisig_completed::( - &mut db, - &mut multisig_completed, - block_being_scanned, - ) - .await) - { - return; - } - } - } - } -} */ diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index 8617ec186..f176680e3 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -245,7 +245,7 @@ impl ContinuallyRan for ScanForOutputsTask { // Send the scan data to the eventuality task ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); - // Send the in instructions to the report task + // Send the InInstructions to the report task ScanToReportDb::::send_in_instructions(&mut txn, b, in_instructions); // Update the next to scan block ScannerDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); From eebb5c27df200385826e2b9f5866d0f0aa2d2b50 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 28 Aug 2024 18:43:40 -0400 Subject: [PATCH 033/179] Implement key retiry --- processor/primitives/src/lib.rs | 13 ++- processor/scanner/src/db.rs | 113 ++++++++++++++--------- processor/scanner/src/eventuality/db.rs | 17 ++++ processor/scanner/src/eventuality/mod.rs | 82 ++++++++++------ processor/scanner/src/lib.rs | 10 +- processor/scanner/src/scan.rs | 17 ++-- 6 files changed, 165 insertions(+), 87 deletions(-) diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs index b0b7ae04e..7a8be2197 100644 --- a/processor/primitives/src/lib.rs +++ b/processor/primitives/src/lib.rs @@ -45,15 +45,20 @@ pub trait Id: } impl Id for [u8; N] where [u8; N]: Default {} -/// A wrapper for a group element which implements the borsh traits. +/// A wrapper for a group element which implements the scale/borsh traits. #[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct BorshG(pub G); -impl BorshSerialize for BorshG { +pub struct EncodableG(pub G); +impl Encode for EncodableG { + fn using_encoded R>(&self, f: F) -> R { + f(self.0.to_bytes().as_ref()) + } +} +impl BorshSerialize for EncodableG { fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { writer.write_all(self.0.to_bytes().as_ref()) } } -impl BorshDeserialize for BorshG { +impl BorshDeserialize for EncodableG { fn deserialize_reader(reader: &mut R) -> borsh::io::Result { let mut repr = G::Repr::default(); reader.read_exact(repr.as_mut())?; diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index d53bf7c73..a37e05f4e 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -7,7 +7,7 @@ use serai_db::{Get, DbTxn, create_db, db_channel}; use serai_in_instructions_primitives::InInstructionWithBalance; -use primitives::{ReceivedOutput, BorshG}; +use primitives::{ReceivedOutput, EncodableG}; use crate::{lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, Return}; @@ -24,6 +24,7 @@ struct SeraiKeyDbEntry { pub(crate) struct SeraiKey { pub(crate) key: K, pub(crate) stage: LifetimeStage, + pub(crate) activation_block_number: u64, pub(crate) block_at_which_reporting_starts: u64, } @@ -45,11 +46,10 @@ impl OutputWithInInstruction { create_db!( Scanner { ActiveKeys: () -> Vec>, + RetireAt: (key: K) -> u64, // The next block to scan for received outputs NextToScanForOutputsBlock: () -> u64, - // The next block to check for resolving eventualities - NextToCheckForEventualitiesBlock: () -> u64, // The next block to potentially report NextToPotentiallyReportBlock: () -> u64, // Highest acknowledged block @@ -95,29 +95,52 @@ impl ScannerDb { /// activation_block_number is inclusive, so the key will be scanned for starting at the /// specified block. pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: KeyFor) { - // Set this block as notable + // Set the block which has a key activate as notable NotableBlock::set(txn, activation_block_number, &()); // TODO: Panic if we've ever seen this key before // Push the key - let mut keys: Vec>>> = ActiveKeys::get(txn).unwrap_or(vec![]); - keys.push(SeraiKeyDbEntry { activation_block_number, key: BorshG(key) }); + let mut keys: Vec>>> = + ActiveKeys::get(txn).unwrap_or(vec![]); + keys.push(SeraiKeyDbEntry { activation_block_number, key: EncodableG(key) }); ActiveKeys::set(txn, &keys); } /// Retire a key. /// /// The key retired must be the oldest key. There must be another key actively tracked. - // TODO: This will be called from the Eventuality task yet this field is read by the scan task - // We need to write the argument for its safety - pub(crate) fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { - let mut keys: Vec>>> = + pub(crate) fn retire_key(txn: &mut impl DbTxn, at_block: u64, key: KeyFor) { + // Set the block which has a key retire as notable + NotableBlock::set(txn, at_block, &()); + + let keys: Vec>>> = ActiveKeys::get(txn).expect("retiring key yet no active keys"); assert!(keys.len() > 1, "retiring our only key"); assert_eq!(keys[0].key.0, key, "not retiring the oldest key"); - keys.remove(0); - ActiveKeys::set(txn, &keys); + + RetireAt::set(txn, EncodableG(key), &at_block); + } + pub(crate) fn tidy_keys(txn: &mut impl DbTxn) { + let mut keys: Vec>>> = + ActiveKeys::get(txn).expect("retiring key yet no active keys"); + let Some(key) = keys.first() else { return }; + + // Get the block we're scanning for next + let block_number = Self::next_to_scan_for_outputs_block(txn).expect( + "tidying keys despite never setting the next to scan for block (done on initialization)", + ); + // If this key is scheduled for retiry... + if let Some(retire_at) = RetireAt::get(txn, key.key) { + // And is retired by/at this block... + if retire_at <= block_number { + // Remove it from the list of keys + let key = keys.remove(0); + ActiveKeys::set(txn, &keys); + // Also clean up the retiry block + RetireAt::del(txn, key.key); + } + } } /// Fetch the active keys, as of the next-to-scan-for-outputs Block. /// @@ -129,9 +152,16 @@ impl ScannerDb { // If we've scanned block 1,000,000, we can't answer the active keys as of block 0 let block_number = Self::next_to_scan_for_outputs_block(getter)?; - let raw_keys: Vec>>> = ActiveKeys::get(getter)?; + let raw_keys: Vec>>> = ActiveKeys::get(getter)?; let mut keys = Vec::with_capacity(2); for i in 0 .. raw_keys.len() { + // Ensure this key isn't retired + if let Some(retire_at) = RetireAt::get(getter, raw_keys[i].key) { + if retire_at <= block_number { + continue; + } + } + // Ensure this key isn't yet to activate if block_number < raw_keys[i].activation_block_number { continue; } @@ -141,7 +171,12 @@ impl ScannerDb { raw_keys[i].activation_block_number, raw_keys.get(i + 1).map(|key| key.activation_block_number), ); - keys.push(SeraiKey { key: raw_keys[i].key.0, stage, block_at_which_reporting_starts }); + keys.push(SeraiKey { + key: raw_keys[i].key.0, + stage, + activation_block_number: raw_keys[i].activation_block_number, + block_at_which_reporting_starts, + }); } assert!(keys.len() <= 2, "more than two keys active"); Some(keys) @@ -154,19 +189,9 @@ impl ScannerDb { ); NextToScanForOutputsBlock::set(txn, &start_block); - // We can receive outputs in this block, but any descending transactions will be in the next - // block. This, with the check on-set, creates a bound that this value in the DB is non-zero. - NextToCheckForEventualitiesBlock::set(txn, &(start_block + 1)); NextToPotentiallyReportBlock::set(txn, &start_block); } - pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option { - // We can only scan up to whatever block we've checked the Eventualities of, plus the window - // length. Since this returns an inclusive bound, we need to subtract 1 - // See `eventuality.rs` for more info - NextToCheckForEventualitiesBlock::get(getter).map(|b| b + S::WINDOW_LENGTH - 1) - } - pub(crate) fn set_next_to_scan_for_outputs_block( txn: &mut impl DbTxn, next_to_scan_for_outputs_block: u64, @@ -177,20 +202,6 @@ impl ScannerDb { NextToScanForOutputsBlock::get(getter) } - pub(crate) fn set_next_to_check_for_eventualities_block( - txn: &mut impl DbTxn, - next_to_check_for_eventualities_block: u64, - ) { - assert!( - next_to_check_for_eventualities_block != 0, - "next to check for eventualities block was 0 when it's bound non-zero" - ); - NextToCheckForEventualitiesBlock::set(txn, &next_to_check_for_eventualities_block); - } - pub(crate) fn next_to_check_for_eventualities_block(getter: &impl Get) -> Option { - NextToCheckForEventualitiesBlock::get(getter) - } - pub(crate) fn set_next_to_potentially_report_block( txn: &mut impl DbTxn, next_to_potentially_report_block: u64, @@ -229,7 +240,15 @@ impl ScannerDb { SerializedQueuedOutputs::set(txn, queue_for_block, &outputs); } - pub(crate) fn flag_notable(txn: &mut impl DbTxn, block_number: u64) { + /* + This is so verbosely named as the DB itself already flags upon external outputs. Specifically, + if any block yields External outputs to accumulate, we flag it as notable. + + There is the slight edge case where some External outputs are queued for accumulation later. We + consider those outputs received as of the block they're queued to (maintaining the policy any + blocks in which we receive outputs is notable). + */ + pub(crate) fn flag_notable_due_to_non_external_output(txn: &mut impl DbTxn, block_number: u64) { assert!( NextToPotentiallyReportBlock::get(txn).unwrap() <= block_number, "already potentially reported a block we're only now flagging as notable" @@ -298,6 +317,17 @@ db_channel! { pub(crate) struct ScanToEventualityDb(PhantomData); impl ScanToEventualityDb { pub(crate) fn send_scan_data(txn: &mut impl DbTxn, block_number: u64, data: &SenderScanData) { + // If we received an External output to accumulate, or have an External output to forward + // (meaning we received an External output), or have an External output to return (again + // meaning we received an External output), set this block as notable due to receiving outputs + // The non-External output case is covered with `flag_notable_due_to_non_external_output` + if !(data.received_external_outputs.is_empty() && + data.forwards.is_empty() && + data.returns.is_empty()) + { + NotableBlock::set(txn, block_number, &()); + } + /* SerializedForwardedOutputsIndex: (block_number: u64) -> Vec, SerializedForwardedOutput: (output_id: &[u8]) -> Vec, @@ -357,11 +387,6 @@ impl ScanToReportDb { block_number: u64, in_instructions: Vec, ) { - if !in_instructions.is_empty() { - // Set this block as notable - NotableBlock::set(txn, block_number, &()); - } - InInstructions::send(txn, (), &BlockBoundInInstructions { block_number, in_instructions }); } diff --git a/processor/scanner/src/eventuality/db.rs b/processor/scanner/src/eventuality/db.rs index e379532d2..baed33c47 100644 --- a/processor/scanner/src/eventuality/db.rs +++ b/processor/scanner/src/eventuality/db.rs @@ -13,12 +13,29 @@ impl Borshy for T {} create_db!( ScannerEventuality { + // The next block to check for resolving eventualities + NextToCheckForEventualitiesBlock: () -> u64, + SerializedEventualities: () -> Vec, } ); pub(crate) struct EventualityDb(PhantomData); impl EventualityDb { + pub(crate) fn set_next_to_check_for_eventualities_block( + txn: &mut impl DbTxn, + next_to_check_for_eventualities_block: u64, + ) { + assert!( + next_to_check_for_eventualities_block != 0, + "next-to-check-for-eventualities block was 0 when it's bound non-zero" + ); + NextToCheckForEventualitiesBlock::set(txn, &next_to_check_for_eventualities_block); + } + pub(crate) fn next_to_check_for_eventualities_block(getter: &impl Get) -> Option { + NextToCheckForEventualitiesBlock::get(getter) + } + pub(crate) fn set_eventualities( txn: &mut impl DbTxn, key: KeyFor, diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 3a472ce2d..f682bf367 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -1,6 +1,6 @@ use group::GroupEncoding; -use serai_db::{DbTxn, Db}; +use serai_db::{Get, DbTxn, Db}; use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, Block}; @@ -14,6 +14,16 @@ use crate::{ mod db; use db::EventualityDb; +/// The latest scannable block, which is determined by this task. +/// +/// This task decides when a key retires, which impacts the scan task. Accordingly, the scanner is +/// only allowed to scan `S::WINDOW_LENGTH - 1` blocks ahead so we can safely schedule keys to +/// retire `S::WINDOW_LENGTH` blocks out. +pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option { + EventualityDb::::next_to_check_for_eventualities_block(getter) + .map(|b| b + S::WINDOW_LENGTH - 1) +} + /* When we scan a block, we receive outputs. When this block is acknowledged, we accumulate those outputs into some scheduler, potentially causing certain transactions to begin their signing @@ -64,6 +74,21 @@ struct EventualityTask> { scheduler: Sch, } +impl> EventualityTask { + pub(crate) fn new(mut db: D, feed: S, scheduler: Sch, start_block: u64) -> Self { + if EventualityDb::::next_to_check_for_eventualities_block(&db).is_none() { + // Initialize the DB + let mut txn = db.txn(); + // We can receive outputs in `start_block`, but any descending transactions will be in the + // next block + EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, start_block + 1); + txn.commit(); + } + + Self { db, feed, scheduler } + } +} + #[async_trait::async_trait] impl> ContinuallyRan for EventualityTask { async fn run_iteration(&mut self) -> Result { @@ -93,7 +118,7 @@ impl> ContinuallyRan for EventualityTas .expect("EventualityTask run before writing the start block"); // Fetch the next block to check - let next_to_check = ScannerDb::::next_to_check_for_eventualities_block(&self.db) + let next_to_check = EventualityDb::::next_to_check_for_eventualities_block(&self.db) .expect("EventualityTask run before writing the start block"); // Check all blocks @@ -121,21 +146,19 @@ impl> ContinuallyRan for EventualityTas /* This is proper as the keys for the next to scan block (at most `WINDOW_LENGTH` ahead, - which is `<= CONFIRMATIONS`) will be the keys to use here. + which is `<= CONFIRMATIONS`) will be the keys to use here, with only minor edge cases. - If we had added a new key (which hasn't actually actived by the block we're currently - working on), it won't have any Eventualities for at least `CONFIRMATIONS` blocks (so it'd - have no impact here). + This may include a key which has yet to activate by our perception. We can simply drop + those. - As for retiring a key, that's done on this task's timeline. We ensure we don't bork the - scanner by officially retiring the key `WINDOW_LENGTH` blocks in the future (ensuring the - scanner never has a malleable view of the keys). + This may not include a key which has retired by the next-to-scan block. This task is the + one which decides when to retire a key, and when it marks a key to be retired, it is done + with it. Accordingly, it's not an issue if such a key was dropped. */ - // TODO: Ensure the add key/remove key DB fns are called by the same task to prevent issues - // there - // TODO: On register eventuality, assert the above timeline assumptions let mut keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) .expect("scanning for a blockchain without any keys set"); + // Since the next-to-scan block is ahead of us, drop keys which have yet to actually activate + keys.retain(|key| b <= key.activation_block_number); let mut txn = self.db.txn(); @@ -146,20 +169,16 @@ impl> ContinuallyRan for EventualityTas scan_data; let mut outputs = received_external_outputs; - for key in keys { - let (eventualities_is_empty, completed_eventualities) = { + for key in &keys { + let completed_eventualities = { let mut eventualities = EventualityDb::::eventualities(&txn, key.key); let completed_eventualities = block.check_for_eventuality_resolutions(&mut eventualities); EventualityDb::::set_eventualities(&mut txn, key.key, &eventualities); - (eventualities.active_eventualities.is_empty(), completed_eventualities) + completed_eventualities }; - for (tx, completed_eventuality) in completed_eventualities { - log::info!( - "eventuality {} resolved by {}", - hex::encode(completed_eventuality.id()), - hex::encode(tx.as_ref()) - ); + for tx in completed_eventualities.keys() { + log::info!("eventuality resolved by {}", hex::encode(tx.as_ref())); } // Fetch all non-External outputs @@ -221,10 +240,12 @@ impl> ContinuallyRan for EventualityTas outputs.extend(non_external_outputs); } + // Update the scheduler let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns }; scheduler_update.outputs.sort_by(sort_outputs); scheduler_update.forwards.sort_by(sort_outputs); scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + // Intake the new Eventualities let new_eventualities = self.scheduler.update(&mut txn, scheduler_update); for (key, new_eventualities) in new_eventualities { let key = { @@ -234,6 +255,11 @@ impl> ContinuallyRan for EventualityTas KeyFor::::from_bytes(&key_repr).unwrap() }; + keys + .iter() + .find(|serai_key| serai_key.key == key) + .expect("queueing eventuality for key which isn't active"); + let mut eventualities = EventualityDb::::eventualities(&txn, key); for new_eventuality in new_eventualities { eventualities.active_eventualities.insert(new_eventuality.lookup(), new_eventuality); @@ -241,24 +267,26 @@ impl> ContinuallyRan for EventualityTas EventualityDb::::set_eventualities(&mut txn, key, &eventualities); } - for key in keys { + // Now that we've intaked any Eventualities caused, check if we're retiring any keys + for key in &keys { if key.stage == LifetimeStage::Finishing { let eventualities = EventualityDb::::eventualities(&txn, key.key); + // TODO: This assumes the Scheduler is empty if eventualities.active_eventualities.is_empty() { log::info!( "key {} has finished and is being retired", hex::encode(key.key.to_bytes().as_ref()) ); - ScannerDb::::flag_notable(&mut txn, b + S::WINDOW_LENGTH); - // TODO: Retire the key - todo!("TODO") + // Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never + // has a malleable view of the keys. + ScannerDb::::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key); } } } - // Update the next to check block - ScannerDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); + // Update the next-to-check block + EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); txn.commit(); } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 2d19207fe..b363faa15 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -14,6 +14,7 @@ mod lifetime; // Database schema definition and associated functions. mod db; +use db::ScannerDb; // Task to index the blockchain, ensuring we don't reorganize finalized blocks. mod index; // Scans blocks for received coins. @@ -208,7 +209,9 @@ impl Scanner { "acknowledging a block which wasn't notable" ); ScannerDb::::set_highest_acknowledged_block(txn, block_number); - ScannerDb::::queue_key(txn, block_number + S::WINDOW_LENGTH); + if let Some(key_to_activate) = key_to_activate { + ScannerDb::::queue_key(txn, block_number + S::WINDOW_LENGTH, key_to_activate); + } } /// Queue Burns. @@ -249,11 +252,6 @@ impl ScannerDb { // Return this block's outputs so they can be pruned from the RAM cache outputs } - fn latest_scanned_block(getter: &G) -> Option { - getter - .get(Self::scanned_block_key()) - .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap()).try_into().unwrap()) - } } // Panic if we've already seen these outputs diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan.rs index f176680e3..201f64a1b 100644 --- a/processor/scanner/src/scan.rs +++ b/processor/scanner/src/scan.rs @@ -13,6 +13,7 @@ use crate::{ lifetime::LifetimeStage, db::{OutputWithInInstruction, SenderScanData, ScannerDb, ScanToReportDb, ScanToEventualityDb}, BlockExt, ScannerFeed, AddressFor, OutputFor, Return, sort_outputs, + eventuality::latest_scannable_block, }; // Construct an InInstruction from an external output. @@ -69,7 +70,7 @@ struct ScanForOutputsTask { impl ContinuallyRan for ScanForOutputsTask { async fn run_iteration(&mut self) -> Result { // Fetch the safe to scan block - let latest_scannable = ScannerDb::::latest_scannable_block(&self.db) + let latest_scannable = latest_scannable_block::(&self.db) .expect("ScanForOutputsTask run before writing the start block"); // Fetch the next block to scan let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db) @@ -80,12 +81,16 @@ impl ContinuallyRan for ScanForOutputsTask { log::info!("scanning block: {} ({b})", hex::encode(block.id())); - assert_eq!(ScannerDb::::next_to_scan_for_outputs_block(&self.db).unwrap(), b); - let keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) - .expect("scanning for a blockchain without any keys set"); - let mut txn = self.db.txn(); + assert_eq!(ScannerDb::::next_to_scan_for_outputs_block(&txn).unwrap(), b); + + // Tidy the keys, then fetch them + // We don't have to tidy them here, we just have to somewhere, so why not here? + ScannerDb::::tidy_keys(&mut txn); + let keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) + .expect("scanning for a blockchain without any keys set"); + let mut scan_data = SenderScanData { block_number: b, received_external_outputs: vec![], @@ -156,7 +161,7 @@ impl ContinuallyRan for ScanForOutputsTask { // We ensure it's over the dust limit to prevent people sending 1 satoshi from causing // an invocation of a consensus/signing protocol if balance.amount.0 >= self.feed.dust(balance.coin).0 { - ScannerDb::::flag_notable(&mut txn, b); + ScannerDb::::flag_notable_due_to_non_external_output(&mut txn, b); } continue; } From ff02b4079c329cc5b50cc198fa62c05b4cc4cd2b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 28 Aug 2024 18:46:39 -0400 Subject: [PATCH 034/179] Make scan.rs a folder, not a file --- processor/scanner/src/{scan.rs => scan/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename processor/scanner/src/{scan.rs => scan/mod.rs} (100%) diff --git a/processor/scanner/src/scan.rs b/processor/scanner/src/scan/mod.rs similarity index 100% rename from processor/scanner/src/scan.rs rename to processor/scanner/src/scan/mod.rs From a92330c2a654bc65a5690c479319e1e1bef92fb0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 28 Aug 2024 19:00:02 -0400 Subject: [PATCH 035/179] Add ScanDb --- processor/scanner/src/db.rs | 50 +++----------------- processor/scanner/src/eventuality/mod.rs | 5 +- processor/scanner/src/report.rs | 6 ++- processor/scanner/src/scan/db.rs | 59 ++++++++++++++++++++++++ processor/scanner/src/scan/mod.rs | 46 +++++++++++++++--- 5 files changed, 113 insertions(+), 53 deletions(-) create mode 100644 processor/scanner/src/scan/db.rs diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index a37e05f4e..e3e31c38c 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -9,7 +9,10 @@ use serai_in_instructions_primitives::InInstructionWithBalance; use primitives::{ReceivedOutput, EncodableG}; -use crate::{lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, Return}; +use crate::{ + lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, Return, + scan::next_to_scan_for_outputs_block, +}; // The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. trait Borshy: BorshSerialize + BorshDeserialize {} @@ -35,7 +38,7 @@ pub(crate) struct OutputWithInInstruction { } impl OutputWithInInstruction { - fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + pub(crate) fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { self.output.write(writer)?; // TODO self.return_address.write(writer)?; self.in_instruction.encode_to(writer); @@ -48,8 +51,6 @@ create_db!( ActiveKeys: () -> Vec>, RetireAt: (key: K) -> u64, - // The next block to scan for received outputs - NextToScanForOutputsBlock: () -> u64, // The next block to potentially report NextToPotentiallyReportBlock: () -> u64, // Highest acknowledged block @@ -74,9 +75,6 @@ create_db!( */ // This collapses from `bool` to `()`, using if the value was set for true and false otherwise NotableBlock: (number: u64) -> (), - - SerializedQueuedOutputs: (block_number: u64) -> Vec, - SerializedOutputs: (block_number: u64) -> Vec, } ); @@ -127,7 +125,7 @@ impl ScannerDb { let Some(key) = keys.first() else { return }; // Get the block we're scanning for next - let block_number = Self::next_to_scan_for_outputs_block(txn).expect( + let block_number = next_to_scan_for_outputs_block::(txn).expect( "tidying keys despite never setting the next to scan for block (done on initialization)", ); // If this key is scheduled for retiry... @@ -150,7 +148,7 @@ impl ScannerDb { ) -> Option>>> { // We don't take this as an argument as we don't keep all historical keys in memory // If we've scanned block 1,000,000, we can't answer the active keys as of block 0 - let block_number = Self::next_to_scan_for_outputs_block(getter)?; + let block_number = next_to_scan_for_outputs_block::(getter)?; let raw_keys: Vec>>> = ActiveKeys::get(getter)?; let mut keys = Vec::with_capacity(2); @@ -183,25 +181,9 @@ impl ScannerDb { } pub(crate) fn set_start_block(txn: &mut impl DbTxn, start_block: u64, id: [u8; 32]) { - assert!( - NextToScanForOutputsBlock::get(txn).is_none(), - "setting start block but prior set start block" - ); - - NextToScanForOutputsBlock::set(txn, &start_block); NextToPotentiallyReportBlock::set(txn, &start_block); } - pub(crate) fn set_next_to_scan_for_outputs_block( - txn: &mut impl DbTxn, - next_to_scan_for_outputs_block: u64, - ) { - NextToScanForOutputsBlock::set(txn, &next_to_scan_for_outputs_block); - } - pub(crate) fn next_to_scan_for_outputs_block(getter: &impl Get) -> Option { - NextToScanForOutputsBlock::get(getter) - } - pub(crate) fn set_next_to_potentially_report_block( txn: &mut impl DbTxn, next_to_potentially_report_block: u64, @@ -222,24 +204,6 @@ impl ScannerDb { HighestAcknowledgedBlock::get(getter) } - pub(crate) fn take_queued_outputs( - txn: &mut impl DbTxn, - block_number: u64, - ) -> Vec> { - todo!("TODO") - } - - pub(crate) fn queue_output_until_block( - txn: &mut impl DbTxn, - queue_for_block: u64, - output: &OutputWithInInstruction, - ) { - let mut outputs = - SerializedQueuedOutputs::get(txn, queue_for_block).unwrap_or(Vec::with_capacity(128)); - output.write(&mut outputs).unwrap(); - SerializedQueuedOutputs::set(txn, queue_for_block, &outputs); - } - /* This is so verbosely named as the DB itself already flags upon external outputs. Specifically, if any block yields External outputs to accumulate, we flag it as notable. diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index f682bf367..a29e5301d 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -9,6 +9,7 @@ use crate::{ lifetime::LifetimeStage, db::{OutputWithInInstruction, ReceiverScanData, ScannerDb, ScanToEventualityDb}, BlockExt, ScannerFeed, KeyFor, SchedulerUpdate, Scheduler, sort_outputs, + scan::{next_to_scan_for_outputs_block, queue_output_until_block}, }; mod db; @@ -104,7 +105,7 @@ impl> ContinuallyRan for EventualityTas */ let exclusive_upper_bound = { // Fetch the next to scan block - let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db) + let next_to_scan = next_to_scan_for_outputs_block::(&self.db) .expect("EventualityTask run before writing the start block"); // If we haven't done any work, return if next_to_scan == 0 { @@ -229,7 +230,7 @@ impl> ContinuallyRan for EventualityTas &txn, &forwarded, ) .expect("forwarded an output yet didn't save its InInstruction to the DB"); - ScannerDb::::queue_output_until_block( + queue_output_until_block::( &mut txn, b + S::WINDOW_LENGTH, &OutputWithInInstruction { output: output.clone(), return_address, in_instruction }, diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report.rs index 39a721069..f69459f06 100644 --- a/processor/scanner/src/report.rs +++ b/processor/scanner/src/report.rs @@ -7,7 +7,9 @@ use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; // TODO: Localize to Report? use crate::{ db::{ScannerDb, ScanToReportDb}, - index, ScannerFeed, ContinuallyRan, + index, + scan::next_to_scan_for_outputs_block, + ScannerFeed, ContinuallyRan, }; /* @@ -27,7 +29,7 @@ impl ContinuallyRan for ReportTask { async fn run_iteration(&mut self) -> Result { let highest_reportable = { // Fetch the next to scan block - let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db) + let next_to_scan = next_to_scan_for_outputs_block::(&self.db) .expect("ReportTask run before writing the start block"); // If we haven't done any work, return if next_to_scan == 0 { diff --git a/processor/scanner/src/scan/db.rs b/processor/scanner/src/scan/db.rs new file mode 100644 index 000000000..905e10be8 --- /dev/null +++ b/processor/scanner/src/scan/db.rs @@ -0,0 +1,59 @@ +use core::marker::PhantomData; +use std::io; + +use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db}; + +use serai_in_instructions_primitives::InInstructionWithBalance; + +use primitives::{EncodableG, ReceivedOutput, EventualityTracker}; + +use crate::{ + lifetime::LifetimeStage, db::OutputWithInInstruction, ScannerFeed, KeyFor, AddressFor, OutputFor, + EventualityFor, Return, scan::next_to_scan_for_outputs_block, +}; + +// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. +trait Borshy: BorshSerialize + BorshDeserialize {} +impl Borshy for T {} + +create_db!( + ScannerScan { + // The next block to scan for received outputs + NextToScanForOutputsBlock: () -> u64, + + SerializedQueuedOutputs: (block_number: u64) -> Vec, + } +); + +pub(crate) struct ScanDb(PhantomData); +impl ScanDb { + pub(crate) fn set_next_to_scan_for_outputs_block( + txn: &mut impl DbTxn, + next_to_scan_for_outputs_block: u64, + ) { + NextToScanForOutputsBlock::set(txn, &next_to_scan_for_outputs_block); + } + pub(crate) fn next_to_scan_for_outputs_block(getter: &impl Get) -> Option { + NextToScanForOutputsBlock::get(getter) + } + + pub(crate) fn take_queued_outputs( + txn: &mut impl DbTxn, + block_number: u64, + ) -> Vec> { + todo!("TODO") + } + + pub(crate) fn queue_output_until_block( + txn: &mut impl DbTxn, + queue_for_block: u64, + output: &OutputWithInInstruction, + ) { + let mut outputs = + SerializedQueuedOutputs::get(txn, queue_for_block).unwrap_or(Vec::with_capacity(128)); + output.write(&mut outputs).unwrap(); + SerializedQueuedOutputs::set(txn, queue_for_block, &outputs); + } +} diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index 201f64a1b..1f143809b 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -1,5 +1,5 @@ use scale::Decode; -use serai_db::{DbTxn, Db}; +use serai_db::{Get, DbTxn, Db}; use serai_primitives::MAX_DATA_LEN; use serai_in_instructions_primitives::{ @@ -16,6 +16,27 @@ use crate::{ eventuality::latest_scannable_block, }; +mod db; +use db::ScanDb; + +pub(crate) fn next_to_scan_for_outputs_block(getter: &impl Get) -> Option { + ScanDb::::next_to_scan_for_outputs_block(getter) +} + +pub(crate) fn queue_output_until_block( + txn: &mut impl DbTxn, + queue_for_block: u64, + output: &OutputWithInInstruction, +) { + assert!( + queue_for_block >= + next_to_scan_for_outputs_block::(txn) + .expect("queueing an output despite no next-to-scan-for-outputs block"), + "queueing an output for a block already scanned" + ); + ScanDb::::queue_output_until_block(txn, queue_for_block, output) +} + // Construct an InInstruction from an external output. // // Also returns the address to return the coins to upon error. @@ -66,6 +87,19 @@ struct ScanForOutputsTask { feed: S, } +impl ScanForOutputsTask { + pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self { + if ScanDb::::next_to_scan_for_outputs_block(&db).is_none() { + // Initialize the DB + let mut txn = db.txn(); + ScanDb::::set_next_to_scan_for_outputs_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, feed } + } +} + #[async_trait::async_trait] impl ContinuallyRan for ScanForOutputsTask { async fn run_iteration(&mut self) -> Result { @@ -73,7 +107,7 @@ impl ContinuallyRan for ScanForOutputsTask { let latest_scannable = latest_scannable_block::(&self.db) .expect("ScanForOutputsTask run before writing the start block"); // Fetch the next block to scan - let next_to_scan = ScannerDb::::next_to_scan_for_outputs_block(&self.db) + let next_to_scan = ScanDb::::next_to_scan_for_outputs_block(&self.db) .expect("ScanForOutputsTask run before writing the start block"); for b in next_to_scan ..= latest_scannable { @@ -83,7 +117,7 @@ impl ContinuallyRan for ScanForOutputsTask { let mut txn = self.db.txn(); - assert_eq!(ScannerDb::::next_to_scan_for_outputs_block(&txn).unwrap(), b); + assert_eq!(ScanDb::::next_to_scan_for_outputs_block(&txn).unwrap(), b); // Tidy the keys, then fetch them // We don't have to tidy them here, we just have to somewhere, so why not here? @@ -100,7 +134,7 @@ impl ContinuallyRan for ScanForOutputsTask { let mut in_instructions = vec![]; let queued_outputs = { - let mut queued_outputs = ScannerDb::::take_queued_outputs(&mut txn, b); + let mut queued_outputs = ScanDb::::take_queued_outputs(&mut txn, b); // Sort the queued outputs in case they weren't queued in a deterministic fashion queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output)); queued_outputs @@ -217,7 +251,7 @@ impl ContinuallyRan for ScanForOutputsTask { // This multisig isn't yet reporting its External outputs to avoid a DoS // Queue the output to be reported when this multisig starts reporting LifetimeStage::ActiveYetNotReporting => { - ScannerDb::::queue_output_until_block( + ScanDb::::queue_output_until_block( &mut txn, key.block_at_which_reporting_starts, &output_with_in_instruction, @@ -253,7 +287,7 @@ impl ContinuallyRan for ScanForOutputsTask { // Send the InInstructions to the report task ScanToReportDb::::send_in_instructions(&mut txn, b, in_instructions); // Update the next to scan block - ScannerDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); + ScanDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); txn.commit(); } From bba553a9bcf5fb27cd8f8c3269a2e1a4eb7cf15c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 28 Aug 2024 19:37:44 -0400 Subject: [PATCH 036/179] Make report.rs a folder, not a file --- processor/scanner/src/{report.rs => report/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename processor/scanner/src/{report.rs => report/mod.rs} (100%) diff --git a/processor/scanner/src/report.rs b/processor/scanner/src/report/mod.rs similarity index 100% rename from processor/scanner/src/report.rs rename to processor/scanner/src/report/mod.rs From 9e134232096b4c099705d656628fab66ae0d526b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 28 Aug 2024 19:58:28 -0400 Subject: [PATCH 037/179] Add ReportDb --- processor/scanner/src/db.rs | 24 +++--------------- processor/scanner/src/eventuality/mod.rs | 18 +++++++------- processor/scanner/src/report/db.rs | 27 +++++++++++++++++++++ processor/scanner/src/report/mod.rs | 31 ++++++++++++++++++------ processor/scanner/src/scan/mod.rs | 4 ++- 5 files changed, 65 insertions(+), 39 deletions(-) create mode 100644 processor/scanner/src/report/db.rs diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index e3e31c38c..7a2d68a97 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -47,7 +47,7 @@ impl OutputWithInInstruction { } create_db!( - Scanner { + ScannerGlobal { ActiveKeys: () -> Vec>, RetireAt: (key: K) -> u64, @@ -78,8 +78,8 @@ create_db!( } ); -pub(crate) struct ScannerDb(PhantomData); -impl ScannerDb { +pub(crate) struct ScannerGlobalDb(PhantomData); +impl ScannerGlobalDb { /// Queue a key. /// /// Keys may be queued whenever, so long as they're scheduled to activate `WINDOW_LENGTH` blocks @@ -180,20 +180,6 @@ impl ScannerDb { Some(keys) } - pub(crate) fn set_start_block(txn: &mut impl DbTxn, start_block: u64, id: [u8; 32]) { - NextToPotentiallyReportBlock::set(txn, &start_block); - } - - pub(crate) fn set_next_to_potentially_report_block( - txn: &mut impl DbTxn, - next_to_potentially_report_block: u64, - ) { - NextToPotentiallyReportBlock::set(txn, &next_to_potentially_report_block); - } - pub(crate) fn next_to_potentially_report_block(getter: &impl Get) -> Option { - NextToPotentiallyReportBlock::get(getter) - } - pub(crate) fn set_highest_acknowledged_block( txn: &mut impl DbTxn, highest_acknowledged_block: u64, @@ -224,10 +210,6 @@ impl ScannerDb { NotableBlock::get(getter, number).is_some() } - pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 { - todo!("TODO") - } - pub(crate) fn return_address_and_in_instruction_for_forwarded_output( getter: &impl Get, output: & as ReceivedOutput, AddressFor>>::Id, diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index a29e5301d..e10aab549 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -4,10 +4,9 @@ use serai_db::{Get, DbTxn, Db}; use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, Block}; -// TODO: Localize to EventualityDb? use crate::{ lifetime::LifetimeStage, - db::{OutputWithInInstruction, ReceiverScanData, ScannerDb, ScanToEventualityDb}, + db::{OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, ScanToEventualityDb}, BlockExt, ScannerFeed, KeyFor, SchedulerUpdate, Scheduler, sort_outputs, scan::{next_to_scan_for_outputs_block, queue_output_until_block}, }; @@ -69,7 +68,7 @@ pub(crate) fn latest_scannable_block(getter: &impl Get) -> Optio This forms a backlog only if the latency of scanning, acknowledgement, and intake (including checking Eventualities) exceeds the window duration (the desired property). */ -struct EventualityTask> { +pub(crate) struct EventualityTask> { db: D, feed: S, scheduler: Sch, @@ -115,7 +114,7 @@ impl> ContinuallyRan for EventualityTas }; // Fetch the highest acknowledged block - let highest_acknowledged = ScannerDb::::highest_acknowledged_block(&self.db) + let highest_acknowledged = ScannerGlobalDb::::highest_acknowledged_block(&self.db) .expect("EventualityTask run before writing the start block"); // Fetch the next block to check @@ -132,7 +131,7 @@ impl> ContinuallyRan for EventualityTas // This is possible since even if we receive coins in block 0, any transactions we'd make // would resolve in block 1 (the first block we'll check under this non-zero rule) let prior_block = b - 1; - if ScannerDb::::is_block_notable(&self.db, prior_block) && + if ScannerGlobalDb::::is_block_notable(&self.db, prior_block) && (prior_block > highest_acknowledged) { break; @@ -156,8 +155,9 @@ impl> ContinuallyRan for EventualityTas one which decides when to retire a key, and when it marks a key to be retired, it is done with it. Accordingly, it's not an issue if such a key was dropped. */ - let mut keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) - .expect("scanning for a blockchain without any keys set"); + let mut keys = + ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) + .expect("scanning for a blockchain without any keys set"); // Since the next-to-scan block is ahead of us, drop keys which have yet to actually activate keys.retain(|key| b <= key.activation_block_number); @@ -226,7 +226,7 @@ impl> ContinuallyRan for EventualityTas }; let (return_address, in_instruction) = - ScannerDb::::return_address_and_in_instruction_for_forwarded_output( + ScannerGlobalDb::::return_address_and_in_instruction_for_forwarded_output( &txn, &forwarded, ) .expect("forwarded an output yet didn't save its InInstruction to the DB"); @@ -281,7 +281,7 @@ impl> ContinuallyRan for EventualityTas // Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never // has a malleable view of the keys. - ScannerDb::::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key); + ScannerGlobalDb::::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key); } } } diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs new file mode 100644 index 000000000..cca2148e8 --- /dev/null +++ b/processor/scanner/src/report/db.rs @@ -0,0 +1,27 @@ +use core::marker::PhantomData; + +use serai_db::{Get, DbTxn, Db, create_db}; + +create_db!( + ScannerReport { + // The next block to potentially report + NextToPotentiallyReportBlock: () -> u64, + } +); + +pub(crate) struct ReportDb; +impl ReportDb { + pub(crate) fn set_next_to_potentially_report_block( + txn: &mut impl DbTxn, + next_to_potentially_report_block: u64, + ) { + NextToPotentiallyReportBlock::set(txn, &next_to_potentially_report_block); + } + pub(crate) fn next_to_potentially_report_block(getter: &impl Get) -> Option { + NextToPotentiallyReportBlock::get(getter) + } + + pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 { + todo!("TODO") + } +} diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs index f69459f06..95bbbbd24 100644 --- a/processor/scanner/src/report/mod.rs +++ b/processor/scanner/src/report/mod.rs @@ -4,14 +4,16 @@ use serai_db::{DbTxn, Db}; use serai_primitives::BlockHash; use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; -// TODO: Localize to Report? use crate::{ - db::{ScannerDb, ScanToReportDb}, + db::{ScannerGlobalDb, ScanToReportDb}, index, scan::next_to_scan_for_outputs_block, ScannerFeed, ContinuallyRan, }; +mod db; +use db::ReportDb; + /* This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion. @@ -19,11 +21,24 @@ use crate::{ Eventualities, have processed the block. This ensures we know if this block is notable, and have the InInstructions for it. */ -struct ReportTask { +pub(crate) struct ReportTask { db: D, feed: S, } +impl ReportTask { + pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self { + if ReportDb::next_to_potentially_report_block(&db).is_none() { + // Initialize the DB + let mut txn = db.txn(); + ReportDb::set_next_to_potentially_report_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, feed } + } +} + #[async_trait::async_trait] impl ContinuallyRan for ReportTask { async fn run_iteration(&mut self) -> Result { @@ -44,7 +59,7 @@ impl ContinuallyRan for ReportTask { last_scanned }; - let next_to_potentially_report = ScannerDb::::next_to_potentially_report_block(&self.db) + let next_to_potentially_report = ReportDb::next_to_potentially_report_block(&self.db) .expect("ReportTask run before writing the start block"); for b in next_to_potentially_report ..= highest_reportable { @@ -53,7 +68,7 @@ impl ContinuallyRan for ReportTask { // Receive the InInstructions for this block // We always do this as we can't trivially tell if we should recv InInstructions before we do let in_instructions = ScanToReportDb::::recv_in_instructions(&mut txn, b); - let notable = ScannerDb::::is_block_notable(&txn, b); + let notable = ScannerGlobalDb::::is_block_notable(&txn, b); if !notable { assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions"); } @@ -61,7 +76,7 @@ impl ContinuallyRan for ReportTask { if notable { let network = S::NETWORK; let block_hash = index::block_id(&txn, b); - let mut batch_id = ScannerDb::::acquire_batch_id(&mut txn); + let mut batch_id = ReportDb::acquire_batch_id(&mut txn); // start with empty batch let mut batches = @@ -77,7 +92,7 @@ impl ContinuallyRan for ReportTask { let instruction = batch.instructions.pop().unwrap(); // bump the id for the new batch - batch_id = ScannerDb::::acquire_batch_id(&mut txn); + batch_id = ReportDb::acquire_batch_id(&mut txn); // make a new batch with this instruction included batches.push(Batch { @@ -93,7 +108,7 @@ impl ContinuallyRan for ReportTask { } // Update the next to potentially report block - ScannerDb::::set_next_to_potentially_report_block(&mut txn, b + 1); + ReportDb::set_next_to_potentially_report_block(&mut txn, b + 1); txn.commit(); } diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index 1f143809b..54f9bd77f 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -8,7 +8,6 @@ use serai_in_instructions_primitives::{ use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Block}; -// TODO: Localize to ScanDb? use crate::{ lifetime::LifetimeStage, db::{OutputWithInInstruction, SenderScanData, ScannerDb, ScanToReportDb, ScanToEventualityDb}, @@ -28,6 +27,9 @@ pub(crate) fn queue_output_until_block( queue_for_block: u64, output: &OutputWithInInstruction, ) { + // This isn't a perfect assertion as by the time this txn commits, we may have already started + // scanning this block. That doesn't change it should never trip as we queue outside the window + // we'll scan assert!( queue_for_block >= next_to_scan_for_outputs_block::(txn) From e5cba72fd325f104f6275aa6789a6f1aa6ce9759 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 28 Aug 2024 20:16:06 -0400 Subject: [PATCH 038/179] Have Scanner::new spawn tasks --- processor/scanner/src/db.rs | 2 +- processor/scanner/src/index/mod.rs | 8 ++-- processor/scanner/src/lib.rs | 61 ++++++++++++++++++++++------- processor/scanner/src/report/db.rs | 4 +- processor/scanner/src/report/mod.rs | 9 +++-- processor/scanner/src/scan/db.rs | 16 +------- processor/scanner/src/scan/mod.rs | 26 ++++++------ 7 files changed, 73 insertions(+), 53 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 7a2d68a97..59af768f5 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -7,7 +7,7 @@ use serai_db::{Get, DbTxn, create_db, db_channel}; use serai_in_instructions_primitives::InInstructionWithBalance; -use primitives::{ReceivedOutput, EncodableG}; +use primitives::{EncodableG, ReceivedOutput}; use crate::{ lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, Return, diff --git a/processor/scanner/src/index/mod.rs b/processor/scanner/src/index/mod.rs index 7c70eedc7..930ce55ac 100644 --- a/processor/scanner/src/index/mod.rs +++ b/processor/scanner/src/index/mod.rs @@ -21,12 +21,12 @@ pub(crate) fn block_id(getter: &impl Get, block_number: u64) -> [u8; 32] { This task finds the finalized blocks, verifies they're continguous, and saves their IDs. */ -struct IndexFinalizedTask { +pub(crate) struct IndexTask { db: D, feed: S, } -impl IndexFinalizedTask { +impl IndexTask { pub(crate) async fn new(mut db: D, feed: S, start_block: u64) -> Self { if IndexDb::block_id(&db, start_block).is_none() { // Fetch the block for its ID @@ -36,7 +36,7 @@ impl IndexFinalizedTask { match feed.unchecked_block_header_by_number(start_block).await { Ok(block) => break block, Err(e) => { - log::warn!("IndexFinalizedTask couldn't fetch start block {start_block}: {e:?}"); + log::warn!("IndexTask couldn't fetch start block {start_block}: {e:?}"); tokio::time::sleep(core::time::Duration::from_secs(delay)).await; delay += Self::DELAY_BETWEEN_ITERATIONS; delay = delay.min(Self::MAX_DELAY_BETWEEN_ITERATIONS); @@ -57,7 +57,7 @@ impl IndexFinalizedTask { } #[async_trait::async_trait] -impl ContinuallyRan for IndexFinalizedTask { +impl ContinuallyRan for IndexTask { async fn run_iteration(&mut self) -> Result { // Fetch the latest finalized block let our_latest_finalized = IndexDb::latest_finalized_block(&self.db) diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index b363faa15..3515da05c 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use group::GroupEncoding; -use serai_db::{Get, DbTxn}; +use serai_db::{Get, DbTxn, Db}; use serai_primitives::{NetworkId, Coin, Amount}; @@ -14,7 +14,7 @@ mod lifetime; // Database schema definition and associated functions. mod db; -use db::ScannerDb; +use db::ScannerGlobalDb; // Task to index the blockchain, ensuring we don't reorganize finalized blocks. mod index; // Scans blocks for received coins. @@ -50,7 +50,7 @@ impl BlockExt for B { /// /// This defines the primitive types used, along with various getters necessary for indexing. #[async_trait::async_trait] -pub trait ScannerFeed: Send + Sync { +pub trait ScannerFeed: 'static + Send + Sync + Clone { /// The ID of the network being scanned for. const NETWORK: NetworkId; @@ -170,7 +170,7 @@ pub struct SchedulerUpdate { } /// The object responsible for accumulating outputs and planning new transactions. -pub trait Scheduler: Send { +pub trait Scheduler: 'static + Send { /// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for. /// /// The `Vec` used as the key in the returned HashMap should be the encoded key the @@ -183,14 +183,38 @@ pub trait Scheduler: Send { } /// A representation of a scanner. -pub struct Scanner(PhantomData); +#[allow(non_snake_case)] +pub struct Scanner { + eventuality_handle: RunNowHandle, + _S: PhantomData, +} impl Scanner { /// Create a new scanner. /// /// This will begin its execution, spawning several asynchronous tasks. // TODO: Take start_time and binary search here? - pub fn new(start_block: u64) -> Self { - todo!("TODO") + pub async fn new(db: impl Db, feed: S, scheduler: impl Scheduler, start_block: u64) -> Self { + let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; + let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); + let report_task = report::ReportTask::<_, S>::new(db.clone(), start_block); + let eventuality_task = eventuality::EventualityTask::new(db, feed, scheduler, start_block); + + let (_index_handle, index_run) = RunNowHandle::new(); + let (scan_handle, scan_run) = RunNowHandle::new(); + let (report_handle, report_run) = RunNowHandle::new(); + let (eventuality_handle, eventuality_run) = RunNowHandle::new(); + + // Upon indexing a new block, scan it + tokio::spawn(index_task.continually_run(index_run, vec![scan_handle.clone()])); + // Upon scanning a block, report it + tokio::spawn(scan_task.continually_run(scan_run, vec![report_handle])); + // Upon reporting a block, we do nothing + tokio::spawn(report_task.continually_run(report_run, vec![])); + // Upon handling the Eventualities in a block, we run the scan task as we've advanced the + // window its allowed to scan + tokio::spawn(eventuality_task.continually_run(eventuality_run, vec![scan_handle])); + + Self { eventuality_handle, _S: PhantomData } } /// Acknowledge a block. @@ -199,19 +223,26 @@ impl Scanner { /// have achieved synchrony on it. pub fn acknowledge_block( &mut self, - txn: &mut impl DbTxn, + mut txn: impl DbTxn, block_number: u64, key_to_activate: Option>, ) { log::info!("acknowledging block {block_number}"); assert!( - ScannerDb::::is_block_notable(txn, block_number), + ScannerGlobalDb::::is_block_notable(&txn, block_number), "acknowledging a block which wasn't notable" ); - ScannerDb::::set_highest_acknowledged_block(txn, block_number); + ScannerGlobalDb::::set_highest_acknowledged_block(&mut txn, block_number); if let Some(key_to_activate) = key_to_activate { - ScannerDb::::queue_key(txn, block_number + S::WINDOW_LENGTH, key_to_activate); + ScannerGlobalDb::::queue_key(&mut txn, block_number + S::WINDOW_LENGTH, key_to_activate); } + + // Commit the txn + txn.commit(); + // Run the Eventuality task since we've advanced it + // We couldn't successfully do this if that txn was still floating around, uncommitted + // The execution of this task won't actually have more work until the txn is committed + self.eventuality_handle.run_now(); } /// Queue Burns. @@ -220,7 +251,7 @@ impl Scanner { /// safely queue Burns so long as they're only actually added once we've handled the outputs from /// the block acknowledged prior to their queueing. pub fn queue_burns(&mut self, txn: &mut impl DbTxn, burns: Vec<()>) { - let queue_as_of = ScannerDb::::highest_acknowledged_block(txn) + let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(txn) .expect("queueing Burns yet never acknowledged a block"); todo!("TODO") } @@ -228,8 +259,8 @@ impl Scanner { /* #[derive(Clone, Debug)] -struct ScannerDb(PhantomData, PhantomData); -impl ScannerDb { +struct ScannerGlobalDb(PhantomData, PhantomData); +impl ScannerGlobalDb { fn seen_key(id: &>::Id) -> Vec { Self::scanner_key(b"seen", id) } @@ -295,7 +326,7 @@ impl ScannerDb { TODO2: Only update ram_outputs after committing the TXN in question. */ - let seen = ScannerDb::::seen(&db, &id); + let seen = ScannerGlobalDb::::seen(&db, &id); let id = id.as_ref().to_vec(); if seen || scanner.ram_outputs.contains(&id) { panic!("scanned an output multiple times"); diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs index cca2148e8..745aa7725 100644 --- a/processor/scanner/src/report/db.rs +++ b/processor/scanner/src/report/db.rs @@ -1,6 +1,4 @@ -use core::marker::PhantomData; - -use serai_db::{Get, DbTxn, Db, create_db}; +use serai_db::{Get, DbTxn, create_db}; create_db!( ScannerReport { diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs index 95bbbbd24..18f842e2d 100644 --- a/processor/scanner/src/report/mod.rs +++ b/processor/scanner/src/report/mod.rs @@ -1,3 +1,5 @@ +use core::marker::PhantomData; + use scale::Encode; use serai_db::{DbTxn, Db}; @@ -21,13 +23,14 @@ use db::ReportDb; Eventualities, have processed the block. This ensures we know if this block is notable, and have the InInstructions for it. */ +#[allow(non_snake_case)] pub(crate) struct ReportTask { db: D, - feed: S, + _S: PhantomData, } impl ReportTask { - pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self { + pub(crate) fn new(mut db: D, start_block: u64) -> Self { if ReportDb::next_to_potentially_report_block(&db).is_none() { // Initialize the DB let mut txn = db.txn(); @@ -35,7 +38,7 @@ impl ReportTask { txn.commit(); } - Self { db, feed } + Self { db, _S: PhantomData } } } diff --git a/processor/scanner/src/scan/db.rs b/processor/scanner/src/scan/db.rs index 905e10be8..9b98150fa 100644 --- a/processor/scanner/src/scan/db.rs +++ b/processor/scanner/src/scan/db.rs @@ -1,22 +1,8 @@ use core::marker::PhantomData; -use std::io; -use scale::Encode; -use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db}; -use serai_in_instructions_primitives::InInstructionWithBalance; - -use primitives::{EncodableG, ReceivedOutput, EventualityTracker}; - -use crate::{ - lifetime::LifetimeStage, db::OutputWithInInstruction, ScannerFeed, KeyFor, AddressFor, OutputFor, - EventualityFor, Return, scan::next_to_scan_for_outputs_block, -}; - -// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. -trait Borshy: BorshSerialize + BorshDeserialize {} -impl Borshy for T {} +use crate::{db::OutputWithInInstruction, ScannerFeed}; create_db!( ScannerScan { diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index 54f9bd77f..b427b535b 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -10,7 +10,9 @@ use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Block}; use crate::{ lifetime::LifetimeStage, - db::{OutputWithInInstruction, SenderScanData, ScannerDb, ScanToReportDb, ScanToEventualityDb}, + db::{ + OutputWithInInstruction, SenderScanData, ScannerGlobalDb, ScanToReportDb, ScanToEventualityDb, + }, BlockExt, ScannerFeed, AddressFor, OutputFor, Return, sort_outputs, eventuality::latest_scannable_block, }; @@ -84,12 +86,12 @@ fn in_instruction_from_output( ) } -struct ScanForOutputsTask { +pub(crate) struct ScanTask { db: D, feed: S, } -impl ScanForOutputsTask { +impl ScanTask { pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self { if ScanDb::::next_to_scan_for_outputs_block(&db).is_none() { // Initialize the DB @@ -103,14 +105,14 @@ impl ScanForOutputsTask { } #[async_trait::async_trait] -impl ContinuallyRan for ScanForOutputsTask { +impl ContinuallyRan for ScanTask { async fn run_iteration(&mut self) -> Result { // Fetch the safe to scan block - let latest_scannable = latest_scannable_block::(&self.db) - .expect("ScanForOutputsTask run before writing the start block"); + let latest_scannable = + latest_scannable_block::(&self.db).expect("ScanTask run before writing the start block"); // Fetch the next block to scan let next_to_scan = ScanDb::::next_to_scan_for_outputs_block(&self.db) - .expect("ScanForOutputsTask run before writing the start block"); + .expect("ScanTask run before writing the start block"); for b in next_to_scan ..= latest_scannable { let block = self.feed.block_by_number(&self.db, b).await?; @@ -123,8 +125,8 @@ impl ContinuallyRan for ScanForOutputsTask { // Tidy the keys, then fetch them // We don't have to tidy them here, we just have to somewhere, so why not here? - ScannerDb::::tidy_keys(&mut txn); - let keys = ScannerDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) + ScannerGlobalDb::::tidy_keys(&mut txn); + let keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) .expect("scanning for a blockchain without any keys set"); let mut scan_data = SenderScanData { @@ -197,7 +199,7 @@ impl ContinuallyRan for ScanForOutputsTask { // We ensure it's over the dust limit to prevent people sending 1 satoshi from causing // an invocation of a consensus/signing protocol if balance.amount.0 >= self.feed.dust(balance.coin).0 { - ScannerDb::::flag_notable_due_to_non_external_output(&mut txn, b); + ScannerGlobalDb::::flag_notable_due_to_non_external_output(&mut txn, b); } continue; } @@ -284,10 +286,10 @@ impl ContinuallyRan for ScanForOutputsTask { } } - // Send the scan data to the eventuality task - ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); // Send the InInstructions to the report task ScanToReportDb::::send_in_instructions(&mut txn, b, in_instructions); + // Send the scan data to the eventuality task + ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); // Update the next to scan block ScanDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); txn.commit(); From 85bab09cc169630e1b10a96f07bacd03d6c5f8cd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 28 Aug 2024 23:31:31 -0400 Subject: [PATCH 039/179] Fill in various DB functions --- processor/primitives/src/eventuality.rs | 19 ++++++++++++++--- processor/primitives/src/output.rs | 7 +++++- processor/scanner/src/db.rs | 27 +++++++++++++++++++++--- processor/scanner/src/eventuality/db.rs | 26 +++++++++++++++-------- processor/scanner/src/eventuality/mod.rs | 2 +- processor/scanner/src/report/db.rs | 6 +++++- processor/scanner/src/scan/db.rs | 9 +++++++- 7 files changed, 77 insertions(+), 19 deletions(-) diff --git a/processor/primitives/src/eventuality.rs b/processor/primitives/src/eventuality.rs index 7203031b2..eb6cda9c2 100644 --- a/processor/primitives/src/eventuality.rs +++ b/processor/primitives/src/eventuality.rs @@ -23,9 +23,9 @@ pub trait Eventuality: Sized + Send + Sync { fn forwarded_output(&self) -> Option; /// Read an Eventuality. - fn read(reader: &mut R) -> io::Result; - /// Serialize an Eventuality to a `Vec`. - fn serialize(&self) -> Vec; + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write an Eventuality. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; } /// A tracker of unresolved Eventualities. @@ -36,3 +36,16 @@ pub struct EventualityTracker { /// These are keyed by their lookups. pub active_eventualities: HashMap, E>, } + +impl Default for EventualityTracker { + fn default() -> Self { + EventualityTracker { active_eventualities: HashMap::new() } + } +} + +impl EventualityTracker { + /// Insert an Eventuality into the tracker. + pub fn insert(&mut self, eventuality: E) { + self.active_eventualities.insert(eventuality.lookup(), eventuality); + } +} diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs index 152a59e08..777b2c524 100644 --- a/processor/primitives/src/output.rs +++ b/processor/primitives/src/output.rs @@ -8,7 +8,12 @@ use serai_primitives::{ExternalAddress, Balance}; use crate::Id; /// An address on the external network. -pub trait Address: Send + Sync + TryFrom {} +pub trait Address: Send + Sync + TryFrom { + /// Write this address. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; + /// Read an address. + fn read(reader: &mut impl io::Read) -> io::Result; +} /// The type of the output. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 59af768f5..810859a63 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -1,13 +1,13 @@ use core::marker::PhantomData; use std::io; -use scale::Encode; +use scale::{Encode, Decode, IoReader}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db, db_channel}; use serai_in_instructions_primitives::InInstructionWithBalance; -use primitives::{EncodableG, ReceivedOutput}; +use primitives::{EncodableG, Address, ReceivedOutput}; use crate::{ lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, Return, @@ -38,9 +38,30 @@ pub(crate) struct OutputWithInInstruction { } impl OutputWithInInstruction { + pub(crate) fn read(reader: &mut impl io::Read) -> io::Result { + let output = OutputFor::::read(reader)?; + let return_address = { + let mut opt = [0xff]; + reader.read_exact(&mut opt)?; + assert!((opt[0] == 0) || (opt[0] == 1)); + if opt[0] == 0 { + None + } else { + Some(AddressFor::::read(reader)?) + } + }; + let in_instruction = + InInstructionWithBalance::decode(&mut IoReader(reader)).map_err(io::Error::other)?; + Ok(Self { output, return_address, in_instruction }) + } pub(crate) fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { self.output.write(writer)?; - // TODO self.return_address.write(writer)?; + if let Some(return_address) = &self.return_address { + writer.write_all(&[1])?; + return_address.write(writer)?; + } else { + writer.write_all(&[0])?; + } self.in_instruction.encode_to(writer); Ok(()) } diff --git a/processor/scanner/src/eventuality/db.rs b/processor/scanner/src/eventuality/db.rs index baed33c47..c5a07b047 100644 --- a/processor/scanner/src/eventuality/db.rs +++ b/processor/scanner/src/eventuality/db.rs @@ -1,22 +1,18 @@ use core::marker::PhantomData; -use borsh::{BorshSerialize, BorshDeserialize}; +use scale::Encode; use serai_db::{Get, DbTxn, create_db}; -use primitives::EventualityTracker; +use primitives::{EncodableG, Eventuality, EventualityTracker}; use crate::{ScannerFeed, KeyFor, EventualityFor}; -// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. -trait Borshy: BorshSerialize + BorshDeserialize {} -impl Borshy for T {} - create_db!( ScannerEventuality { // The next block to check for resolving eventualities NextToCheckForEventualitiesBlock: () -> u64, - SerializedEventualities: () -> Vec, + SerializedEventualities: (key: K) -> Vec, } ); @@ -41,13 +37,25 @@ impl EventualityDb { key: KeyFor, eventualities: &EventualityTracker>, ) { - todo!("TODO") + let mut serialized = Vec::with_capacity(eventualities.active_eventualities.len() * 128); + for eventuality in eventualities.active_eventualities.values() { + eventuality.write(&mut serialized).unwrap(); + } + SerializedEventualities::set(txn, EncodableG(key), &serialized); } pub(crate) fn eventualities( getter: &impl Get, key: KeyFor, ) -> EventualityTracker> { - todo!("TODO") + let serialized = SerializedEventualities::get(getter, EncodableG(key)).unwrap_or(vec![]); + let mut serialized = serialized.as_slice(); + + let mut res = EventualityTracker::default(); + while !serialized.is_empty() { + let eventuality = EventualityFor::::read(&mut serialized).unwrap(); + res.insert(eventuality); + } + res } } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index e10aab549..b5dc3dd94 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -263,7 +263,7 @@ impl> ContinuallyRan for EventualityTas let mut eventualities = EventualityDb::::eventualities(&txn, key); for new_eventuality in new_eventualities { - eventualities.active_eventualities.insert(new_eventuality.lookup(), new_eventuality); + eventualities.insert(new_eventuality); } EventualityDb::::set_eventualities(&mut txn, key, &eventualities); } diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs index 745aa7725..2fd98d4be 100644 --- a/processor/scanner/src/report/db.rs +++ b/processor/scanner/src/report/db.rs @@ -4,6 +4,8 @@ create_db!( ScannerReport { // The next block to potentially report NextToPotentiallyReportBlock: () -> u64, + // The next Batch ID to use + NextBatchId: () -> u32, } ); @@ -20,6 +22,8 @@ impl ReportDb { } pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 { - todo!("TODO") + let id = NextBatchId::get(txn).unwrap_or(0); + NextBatchId::set(txn, &(id + 1)); + id } } diff --git a/processor/scanner/src/scan/db.rs b/processor/scanner/src/scan/db.rs index 9b98150fa..6df84df18 100644 --- a/processor/scanner/src/scan/db.rs +++ b/processor/scanner/src/scan/db.rs @@ -29,7 +29,14 @@ impl ScanDb { txn: &mut impl DbTxn, block_number: u64, ) -> Vec> { - todo!("TODO") + let serialized = SerializedQueuedOutputs::get(txn, block_number).unwrap_or(vec![]); + let mut serialized = serialized.as_slice(); + + let mut res = Vec::with_capacity(serialized.len() / 128); + while !serialized.is_empty() { + res.push(OutputWithInInstruction::::read(&mut serialized).unwrap()); + } + res } pub(crate) fn queue_output_until_block( From 3a23c2c3defa40c241290a612173cb71e773d931 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 28 Aug 2024 23:45:17 -0400 Subject: [PATCH 040/179] Cache the cost to aggregate --- processor/scanner/src/lib.rs | 2 +- processor/scanner/src/scan/mod.rs | 24 ++++++++++++++++++++---- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 3515da05c..d8a29951c 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -141,7 +141,7 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { async fn cost_to_aggregate( &self, coin: Coin, - block_number: u64, + reference_block: &Self::Block, ) -> Result; /// The dust threshold for the specified coin. diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index b427b535b..59d0f197c 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use scale::Decode; use serai_db::{Get, DbTxn, Db}; @@ -129,14 +131,17 @@ impl ContinuallyRan for ScanTask { let keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) .expect("scanning for a blockchain without any keys set"); + // The scan data for this block let mut scan_data = SenderScanData { block_number: b, received_external_outputs: vec![], forwards: vec![], returns: vec![], }; + // The InInstructions for this block let mut in_instructions = vec![]; + // The outputs queued for this block let queued_outputs = { let mut queued_outputs = ScanDb::::take_queued_outputs(&mut txn, b); // Sort the queued outputs in case they weren't queued in a deterministic fashion @@ -148,6 +153,11 @@ impl ContinuallyRan for ScanTask { in_instructions.push(queued_output.in_instruction); } + // We subtract the cost to aggregate from some outputs we scan + // This cost is fetched with an asynchronous function which may be non-trivial + // We cache the result of this function here to avoid calling it multiple times + let mut costs_to_aggregate = HashMap::with_capacity(1); + // Scan for each key for key in keys { for output in block.scan_for_outputs(key.key) { @@ -207,13 +217,19 @@ impl ContinuallyRan for ScanTask { // Check this isn't dust let balance_to_use = { let mut balance = output.balance(); + // First, subtract 2 * the cost to aggregate, as detailed in // `spec/processor/UTXO Management.md` - // TODO: Cache this - let cost_to_aggregate = - self.feed.cost_to_aggregate(balance.coin, b).await.map_err(|e| { + + // We cache this, so if it isn't yet cached, insert it into the cache + if let std::collections::hash_map::Entry::Vacant(e) = + costs_to_aggregate.entry(balance.coin) + { + e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| { format!("couldn't fetch cost to aggregate {:?} at {b}: {e:?}", balance.coin) - })?; + })?); + } + let cost_to_aggregate = costs_to_aggregate[&balance.coin]; balance.amount.0 -= 2 * cost_to_aggregate.0; // Now, check it's still past the dust threshold From 84b0bec7b0bbd00b0b620923b407efec0f491e1d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 00:01:31 -0400 Subject: [PATCH 041/179] Add API to publish Batches with This doesn't have to be abstract, we can generate the message and use the message-queue API, yet this should help with testing. --- processor/scanner/src/lib.rs | 27 ++++++++++++++++++++++++--- processor/scanner/src/report/mod.rs | 22 +++++++++++++++------- 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index d8a29951c..3e828fcb1 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -6,6 +6,7 @@ use group::GroupEncoding; use serai_db::{Get, DbTxn, Db}; use serai_primitives::{NetworkId, Coin, Amount}; +use serai_in_instructions_primitives::Batch; use primitives::{task::*, Address, ReceivedOutput, Block}; @@ -81,7 +82,7 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// An error encountered when fetching data from the blockchain. /// /// This MUST be an ephemeral error. Retrying fetching data from the blockchain MUST eventually - /// resolve without manual intervention. + /// resolve without manual intervention/changing the arguments. type EphemeralError: Debug; /// Fetch the number of the latest finalized block. @@ -156,6 +157,20 @@ type AddressFor = <::Block as Block>::Address; type OutputFor = <::Block as Block>::Output; type EventualityFor = <::Block as Block>::Eventuality; +#[async_trait::async_trait] +pub trait BatchPublisher: 'static + Send + Sync { + /// An error encountered when publishing the Batch. + /// + /// This MUST be an ephemeral error. Retrying publication MUST eventually resolve without manual + /// intervention/changing the arguments. + type EphemeralError: Debug; + + /// Publish a Batch. + /// + /// This function must be safe to call with the same Batch multiple times. + async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError>; +} + /// A return to occur. pub struct Return { address: AddressFor, @@ -193,10 +208,16 @@ impl Scanner { /// /// This will begin its execution, spawning several asynchronous tasks. // TODO: Take start_time and binary search here? - pub async fn new(db: impl Db, feed: S, scheduler: impl Scheduler, start_block: u64) -> Self { + pub async fn new( + db: impl Db, + feed: S, + batch_publisher: impl BatchPublisher, + scheduler: impl Scheduler, + start_block: u64, + ) -> Self { let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); - let report_task = report::ReportTask::<_, S>::new(db.clone(), start_block); + let report_task = report::ReportTask::<_, S, _>::new(db.clone(), batch_publisher, start_block); let eventuality_task = eventuality::EventualityTask::new(db, feed, scheduler, start_block); let (_index_handle, index_run) = RunNowHandle::new(); diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs index 18f842e2d..b789ea58b 100644 --- a/processor/scanner/src/report/mod.rs +++ b/processor/scanner/src/report/mod.rs @@ -6,11 +6,12 @@ use serai_db::{DbTxn, Db}; use serai_primitives::BlockHash; use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; +use primitives::task::ContinuallyRan; use crate::{ db::{ScannerGlobalDb, ScanToReportDb}, index, scan::next_to_scan_for_outputs_block, - ScannerFeed, ContinuallyRan, + ScannerFeed, BatchPublisher, }; mod db; @@ -24,13 +25,14 @@ use db::ReportDb; the InInstructions for it. */ #[allow(non_snake_case)] -pub(crate) struct ReportTask { +pub(crate) struct ReportTask { db: D, + batch_publisher: B, _S: PhantomData, } -impl ReportTask { - pub(crate) fn new(mut db: D, start_block: u64) -> Self { +impl ReportTask { + pub(crate) fn new(mut db: D, batch_publisher: B, start_block: u64) -> Self { if ReportDb::next_to_potentially_report_block(&db).is_none() { // Initialize the DB let mut txn = db.txn(); @@ -38,12 +40,12 @@ impl ReportTask { txn.commit(); } - Self { db, _S: PhantomData } + Self { db, batch_publisher, _S: PhantomData } } } #[async_trait::async_trait] -impl ContinuallyRan for ReportTask { +impl ContinuallyRan for ReportTask { async fn run_iteration(&mut self) -> Result { let highest_reportable = { // Fetch the next to scan block @@ -107,7 +109,13 @@ impl ContinuallyRan for ReportTask { } } - todo!("TODO: Set/emit batches"); + for batch in batches { + self + .batch_publisher + .publish_batch(batch) + .await + .map_err(|e| format!("failed to publish batch: {e:?}"))?; + } } // Update the next to potentially report block From d8223463fa1d4018b7dce3bda7a0566bceac67ec Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 12:45:47 -0400 Subject: [PATCH 042/179] Route burns through the scanner --- Cargo.lock | 1 + processor/scanner/Cargo.toml | 1 + processor/scanner/src/db.rs | 31 +++++- processor/scanner/src/eventuality/db.rs | 16 ++- processor/scanner/src/eventuality/mod.rs | 124 ++++++++++++++++++----- processor/scanner/src/lib.rs | 89 ++++++++++++++-- 6 files changed, 223 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4cc54e15e..2a9de4b9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8670,6 +8670,7 @@ dependencies = [ "hex", "log", "parity-scale-codec", + "serai-coins-primitives", "serai-db", "serai-in-instructions-primitives", "serai-primitives", diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index a16b55f2c..e7cdef97a 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -37,6 +37,7 @@ serai-db = { path = "../../common/db" } serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std"] } +serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std"] } messages = { package = "serai-processor-messages", path = "../messages" } primitives = { package = "serai-processor-primitives", path = "../primitives" } diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 810859a63..a6272eeb8 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -6,6 +6,7 @@ use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db, db_channel}; use serai_in_instructions_primitives::InInstructionWithBalance; +use serai_coins_primitives::OutInstructionWithBalance; use primitives::{EncodableG, Address, ReceivedOutput}; @@ -336,9 +337,9 @@ impl ScanToEventualityDb { } #[derive(BorshSerialize, BorshDeserialize)] -pub(crate) struct BlockBoundInInstructions { - pub(crate) block_number: u64, - pub(crate) in_instructions: Vec, +struct BlockBoundInInstructions { + block_number: u64, + in_instructions: Vec, } db_channel! { @@ -370,3 +371,27 @@ impl ScanToReportDb { data.in_instructions } } + +db_channel! { + ScannerSubstrateEventuality { + Burns: (acknowledged_block: u64) -> Vec, + } +} + +pub(crate) struct SubstrateToEventualityDb; +impl SubstrateToEventualityDb { + pub(crate) fn send_burns( + txn: &mut impl DbTxn, + acknowledged_block: u64, + burns: &Vec, + ) { + Burns::send(txn, acknowledged_block, burns); + } + + pub(crate) fn try_recv_burns( + txn: &mut impl DbTxn, + acknowledged_block: u64, + ) -> Option> { + Burns::try_recv(txn, acknowledged_block) + } +} diff --git a/processor/scanner/src/eventuality/db.rs b/processor/scanner/src/eventuality/db.rs index c5a07b047..f810ba2f6 100644 --- a/processor/scanner/src/eventuality/db.rs +++ b/processor/scanner/src/eventuality/db.rs @@ -11,6 +11,8 @@ create_db!( ScannerEventuality { // The next block to check for resolving eventualities NextToCheckForEventualitiesBlock: () -> u64, + // The latest block this task has handled which was notable + LatestHandledNotableBlock: () -> u64, SerializedEventualities: (key: K) -> Vec, } @@ -22,16 +24,22 @@ impl EventualityDb { txn: &mut impl DbTxn, next_to_check_for_eventualities_block: u64, ) { - assert!( - next_to_check_for_eventualities_block != 0, - "next-to-check-for-eventualities block was 0 when it's bound non-zero" - ); NextToCheckForEventualitiesBlock::set(txn, &next_to_check_for_eventualities_block); } pub(crate) fn next_to_check_for_eventualities_block(getter: &impl Get) -> Option { NextToCheckForEventualitiesBlock::get(getter) } + pub(crate) fn set_latest_handled_notable_block( + txn: &mut impl DbTxn, + latest_handled_notable_block: u64, + ) { + LatestHandledNotableBlock::set(txn, &latest_handled_notable_block); + } + pub(crate) fn latest_handled_notable_block(getter: &impl Get) -> Option { + LatestHandledNotableBlock::get(getter) + } + pub(crate) fn set_eventualities( txn: &mut impl DbTxn, key: KeyFor, diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index b5dc3dd94..38176ed46 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -6,7 +6,10 @@ use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, use crate::{ lifetime::LifetimeStage, - db::{OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, ScanToEventualityDb}, + db::{ + OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb, + ScanToEventualityDb, + }, BlockExt, ScannerFeed, KeyFor, SchedulerUpdate, Scheduler, sort_outputs, scan::{next_to_scan_for_outputs_block, queue_output_until_block}, }; @@ -20,6 +23,7 @@ use db::EventualityDb; /// only allowed to scan `S::WINDOW_LENGTH - 1` blocks ahead so we can safely schedule keys to /// retire `S::WINDOW_LENGTH` blocks out. pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option { + assert!(S::WINDOW_LENGTH > 0); EventualityDb::::next_to_check_for_eventualities_block(getter) .map(|b| b + S::WINDOW_LENGTH - 1) } @@ -79,24 +83,81 @@ impl> EventualityTask { if EventualityDb::::next_to_check_for_eventualities_block(&db).is_none() { // Initialize the DB let mut txn = db.txn(); - // We can receive outputs in `start_block`, but any descending transactions will be in the - // next block - EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, start_block + 1); + EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, start_block); txn.commit(); } Self { db, feed, scheduler } } + + // Returns a boolean of if we intaked any Burns. + fn intake_burns(&mut self) -> bool { + let mut intaked_any = false; + + // If we've handled an notable block, we may have Burns being queued with it as the reference + if let Some(latest_handled_notable_block) = + EventualityDb::::latest_handled_notable_block(&self.db) + { + let mut txn = self.db.txn(); + // Drain the entire channel + while let Some(burns) = + SubstrateToEventualityDb::try_recv_burns(&mut txn, latest_handled_notable_block) + { + intaked_any = true; + + let new_eventualities = self.scheduler.fulfill(&mut txn, burns); + + // TODO: De-duplicate this with below instance via a helper function + for (key, new_eventualities) in new_eventualities { + let key = { + let mut key_repr = as GroupEncoding>::Repr::default(); + assert_eq!(key.len(), key_repr.as_ref().len()); + key_repr.as_mut().copy_from_slice(&key); + KeyFor::::from_bytes(&key_repr).unwrap() + }; + + let mut eventualities = EventualityDb::::eventualities(&txn, key); + for new_eventuality in new_eventualities { + eventualities.insert(new_eventuality); + } + EventualityDb::::set_eventualities(&mut txn, key, &eventualities); + } + } + txn.commit(); + } + + intaked_any + } } #[async_trait::async_trait] impl> ContinuallyRan for EventualityTask { async fn run_iteration(&mut self) -> Result { + // Fetch the highest acknowledged block + let Some(highest_acknowledged) = ScannerGlobalDb::::highest_acknowledged_block(&self.db) + else { + // If we've never acknowledged a block, return + return Ok(false); + }; + + // A boolean of if we've made any progress to return at the end of the function + let mut made_progress = false; + + // Start by intaking any Burns we have sitting around + made_progress |= self.intake_burns(); + /* - The set of Eventualities only increase when a block is acknowledged. Accordingly, we can only - iterate up to (and including) the block currently pending acknowledgement. "including" is - because even if block `b` causes new Eventualities, they'll only potentially resolve in block - `b + 1`. + Eventualities increase upon one of two cases: + + 1) We're fulfilling Burns + 2) We acknowledged a block + + We can't know the processor has intaked all Burns it should have when we process block `b`. + We solve this by executing a consensus protocol whenever a resolution for an Eventuality + created to fulfill Burns occurs. Accordingly, we force ourselves to obtain synchrony on such + blocks (and all preceding Burns). + + This means we can only iterate up to the block currently pending acknowledgement. We only know blocks will need acknowledgement *for sure* if they were scanned. The only other causes are key activation and retirement (both scheduled outside the scan window). This makes @@ -113,32 +174,38 @@ impl> ContinuallyRan for EventualityTas next_to_scan }; - // Fetch the highest acknowledged block - let highest_acknowledged = ScannerGlobalDb::::highest_acknowledged_block(&self.db) - .expect("EventualityTask run before writing the start block"); - // Fetch the next block to check let next_to_check = EventualityDb::::next_to_check_for_eventualities_block(&self.db) .expect("EventualityTask run before writing the start block"); // Check all blocks - let mut iterated = false; for b in next_to_check .. exclusive_upper_bound { - // If the prior block was notable *and* not acknowledged, break - // This is so if it caused any Eventualities (which may resolve this block), we have them - { - // This `- 1` is safe as next to check is bound to be non-zero - // This is possible since even if we receive coins in block 0, any transactions we'd make - // would resolve in block 1 (the first block we'll check under this non-zero rule) - let prior_block = b - 1; - if ScannerGlobalDb::::is_block_notable(&self.db, prior_block) && - (prior_block > highest_acknowledged) - { + let is_block_notable = ScannerGlobalDb::::is_block_notable(&self.db, b); + if is_block_notable { + /* + If this block is notable *and* not acknowledged, break. + + This is so if Burns queued prior to this block's acknowledgement caused any Eventualities + (which may resolve this block), we have them. If it wasn't for that, it'd be so if this + block's acknowledgement caused any Eventualities, we have them, though those would only + potentially resolve in the next block (letting us scan this block without delay). + */ + if b > highest_acknowledged { break; } + + // Since this block is notable, ensure we've intaked all the Burns preceding it + // We can know with certainty that the channel is fully populated at this time since we've + // acknowledged a newer block (so we've handled the state up to this point and new state + // will be for the newer block) + #[allow(unused_assignments)] + { + made_progress |= self.intake_burns(); + } } - iterated = true; + // Since we're handling this block, we are making progress + made_progress = true; let block = self.feed.block_by_number(&self.db, b).await?; @@ -186,6 +253,7 @@ impl> ContinuallyRan for EventualityTas let mut non_external_outputs = block.scan_for_outputs(key.key); non_external_outputs.retain(|output| output.kind() != OutputType::External); // Drop any outputs less than the dust limit + // TODO: Either further filter to outputs we made or also check cost_to_aggregate non_external_outputs.retain(|output| { let balance = output.balance(); balance.amount.0 >= self.feed.dust(balance.coin).0 @@ -288,10 +356,16 @@ impl> ContinuallyRan for EventualityTas // Update the next-to-check block EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); + + // If this block was notable, update the latest-handled notable block + if is_block_notable { + EventualityDb::::set_latest_handled_notable_block(&mut txn, b); + } + txn.commit(); } // Run dependents if we successfully checked any blocks - Ok(iterated) + Ok(made_progress) } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 3e828fcb1..27395d790 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -7,6 +7,7 @@ use serai_db::{Get, DbTxn, Db}; use serai_primitives::{NetworkId, Coin, Amount}; use serai_in_instructions_primitives::Batch; +use serai_coins_primitives::OutInstructionWithBalance; use primitives::{task::*, Address, ReceivedOutput, Block}; @@ -15,7 +16,7 @@ mod lifetime; // Database schema definition and associated functions. mod db; -use db::ScannerGlobalDb; +use db::{ScannerGlobalDb, SubstrateToEventualityDb}; // Task to index the blockchain, ensuring we don't reorganize finalized blocks. mod index; // Scans blocks for received coins. @@ -147,7 +148,7 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// The dust threshold for the specified coin. /// - /// This MUST be constant. Serai MJUST NOT create internal outputs worth less than this. This + /// This MUST be constant. Serai MUST NOT create internal outputs worth less than this. This /// SHOULD be a value worth handling at a human level. fn dust(&self, coin: Coin) -> Amount; } @@ -195,6 +196,40 @@ pub trait Scheduler: 'static + Send { txn: &mut impl DbTxn, update: SchedulerUpdate, ) -> HashMap, Vec>>; + + /// Fulfill a series of payments, yielding the Eventualities now to be scanned for. + /// + /// Any Eventualities returned by this function must include an output-to-self (such as a Branch + /// or Change), unless they descend from a transaction returned by this function which satisfies + /// that requirement. + /// + /// The `Vec` used as the key in the returned HashMap should be the encoded key the + /// Eventualities are for. + /* + We need an output-to-self so we can detect a block with an Eventuality completion with regards + to Burns, forcing us to ensure we have accumulated all the Burns we should by the time we + handle that block. We explicitly don't require children have this requirement as by detecting + the first resolution, we ensure we'll accumulate the Burns (therefore becoming aware of the + childrens' Eventualities, enabling recognizing their resolutions). + + This carve out enables the following: + + ------------------ Fulfillment TX ---------------------- + | Primary Output | ---------------> | New Primary Output | + ------------------ | ---------------------- + | + | ------------------------------ + |------> | Branching Output for Burns | + ------------------------------ + + Without wasting pointless Change outputs on every transaction (as there's a single parent which + has an output-to-self). + */ + fn fulfill( + &mut self, + txn: &mut impl DbTxn, + payments: Vec, + ) -> HashMap, Vec>>; } /// A representation of a scanner. @@ -242,6 +277,8 @@ impl Scanner { /// /// This means this block was ordered on Serai in relation to `Burn` events, and all validators /// have achieved synchrony on it. + /// + /// The calls to this function must be ordered with regards to `queue_burns`. pub fn acknowledge_block( &mut self, mut txn: impl DbTxn, @@ -249,10 +286,23 @@ impl Scanner { key_to_activate: Option>, ) { log::info!("acknowledging block {block_number}"); + assert!( ScannerGlobalDb::::is_block_notable(&txn, block_number), "acknowledging a block which wasn't notable" ); + if let Some(prior_highest_acknowledged_block) = + ScannerGlobalDb::::highest_acknowledged_block(&txn) + { + assert!(block_number > prior_highest_acknowledged_block, "acknowledging blocks out-of-order"); + for b in (prior_highest_acknowledged_block + 1) .. (block_number - 1) { + assert!( + !ScannerGlobalDb::::is_block_notable(&txn, b), + "skipped acknowledging a block which was notable" + ); + } + } + ScannerGlobalDb::::set_highest_acknowledged_block(&mut txn, block_number); if let Some(key_to_activate) = key_to_activate { ScannerGlobalDb::::queue_key(&mut txn, block_number + S::WINDOW_LENGTH, key_to_activate); @@ -268,13 +318,38 @@ impl Scanner { /// Queue Burns. /// - /// The scanner only updates the scheduler with new outputs upon acknowledging a block. We can - /// safely queue Burns so long as they're only actually added once we've handled the outputs from - /// the block acknowledged prior to their queueing. - pub fn queue_burns(&mut self, txn: &mut impl DbTxn, burns: Vec<()>) { + /// The scanner only updates the scheduler with new outputs upon acknowledging a block. The + /// ability to fulfill Burns, and therefore their order, is dependent on the current output + /// state. This immediately sets a bound that this function is ordered with regards to + /// `acknowledge_block`. + /* + The fact Burns can be queued during any Substrate block is problematic. The scanner is allowed + to scan anything within the window set by the Eventuality task. The Eventuality task is allowed + to handle all blocks until it reaches a block needing acknowledgement. + + This means we may queue Burns when the latest acknowledged block is 1, yet we've already + scanned 101. Such Burns may complete back in block 2, and we simply wouldn't have noticed due + to not having yet generated the Eventualities. + + We solve this by mandating all transactions made as the result of an Eventuality include a + output-to-self worth at least `N::DUST`. If that occurs, the scanner will force a consensus + protocol on block 2. Accordingly, we won't scan all the way to block 101 (missing the + resolution of the Eventuality) as we'll obtain synchrony on block 2 and all Burns queued prior + to it. + + Another option would be to re-check historical blocks, yet this would potentially redo an + unbounded amount of work. It would also not allow us to safely detect if received outputs were + in fact the result of Eventualities or not. + + Another option would be to schedule Burns after the next-acknowledged block, yet this would add + latency and likely practically require we add regularly scheduled notable blocks (which may be + unnecessary). + */ + pub fn queue_burns(&mut self, txn: &mut impl DbTxn, burns: &Vec) { let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(txn) .expect("queueing Burns yet never acknowledged a block"); - todo!("TODO") + + SubstrateToEventualityDb::send_burns(txn, queue_as_of, burns) } } From 497d7fb131e351a8f53293597c99a8c6b4221a05 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 12:49:35 -0400 Subject: [PATCH 043/179] Clarify output-to-self to output-to-Serai There's only the requirement it's to an active key which is being reported for. --- processor/scanner/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 27395d790..77bed7fc5 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -199,14 +199,14 @@ pub trait Scheduler: 'static + Send { /// Fulfill a series of payments, yielding the Eventualities now to be scanned for. /// - /// Any Eventualities returned by this function must include an output-to-self (such as a Branch + /// Any Eventualities returned by this function must include an output-to-Serai (such as a Branch /// or Change), unless they descend from a transaction returned by this function which satisfies /// that requirement. /// /// The `Vec` used as the key in the returned HashMap should be the encoded key the /// Eventualities are for. /* - We need an output-to-self so we can detect a block with an Eventuality completion with regards + We need an output-to-Serai so we can detect a block with an Eventuality completion with regards to Burns, forcing us to ensure we have accumulated all the Burns we should by the time we handle that block. We explicitly don't require children have this requirement as by detecting the first resolution, we ensure we'll accumulate the Burns (therefore becoming aware of the @@ -223,7 +223,7 @@ pub trait Scheduler: 'static + Send { ------------------------------ Without wasting pointless Change outputs on every transaction (as there's a single parent which - has an output-to-self). + has an output-to-Serai, the new primary output). */ fn fulfill( &mut self, @@ -332,7 +332,7 @@ impl Scanner { to not having yet generated the Eventualities. We solve this by mandating all transactions made as the result of an Eventuality include a - output-to-self worth at least `N::DUST`. If that occurs, the scanner will force a consensus + output-to-Serai worth at least `DUST`. If that occurs, the scanner will force a consensus protocol on block 2. Accordingly, we won't scan all the way to block 101 (missing the resolution of the Eventuality) as we'll obtain synchrony on block 2 and all Burns queued prior to it. From ca52e6a18862fbc4ee5d8d2a7d94cd9463876682 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 14:57:43 -0400 Subject: [PATCH 044/179] Add helper to intake Eventualities --- processor/scanner/src/eventuality/mod.rs | 80 ++++++++++++------------ processor/scanner/src/lib.rs | 1 - 2 files changed, 40 insertions(+), 41 deletions(-) diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 38176ed46..7b5e3eed0 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use group::GroupEncoding; use serai_db::{Get, DbTxn, Db}; @@ -10,7 +12,7 @@ use crate::{ OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb, ScanToEventualityDb, }, - BlockExt, ScannerFeed, KeyFor, SchedulerUpdate, Scheduler, sort_outputs, + BlockExt, ScannerFeed, KeyFor, EventualityFor, SchedulerUpdate, Scheduler, sort_outputs, scan::{next_to_scan_for_outputs_block, queue_output_until_block}, }; @@ -28,6 +30,29 @@ pub(crate) fn latest_scannable_block(getter: &impl Get) -> Optio .map(|b| b + S::WINDOW_LENGTH - 1) } +/// Intake a set of Eventualities into the DB. +/// +/// The HashMap is keyed by the key these Eventualities are for. +fn intake_eventualities( + txn: &mut impl DbTxn, + to_intake: HashMap, Vec>>, +) { + for (key, new_eventualities) in to_intake { + let key = { + let mut key_repr = as GroupEncoding>::Repr::default(); + assert_eq!(key.len(), key_repr.as_ref().len()); + key_repr.as_mut().copy_from_slice(&key); + KeyFor::::from_bytes(&key_repr).unwrap() + }; + + let mut eventualities = EventualityDb::::eventualities(txn, key); + for new_eventuality in new_eventualities { + eventualities.insert(new_eventuality); + } + EventualityDb::::set_eventualities(txn, key, &eventualities); + } +} + /* When we scan a block, we receive outputs. When this block is acknowledged, we accumulate those outputs into some scheduler, potentially causing certain transactions to begin their signing @@ -106,22 +131,7 @@ impl> EventualityTask { intaked_any = true; let new_eventualities = self.scheduler.fulfill(&mut txn, burns); - - // TODO: De-duplicate this with below instance via a helper function - for (key, new_eventualities) in new_eventualities { - let key = { - let mut key_repr = as GroupEncoding>::Repr::default(); - assert_eq!(key.len(), key_repr.as_ref().len()); - key_repr.as_mut().copy_from_slice(&key); - KeyFor::::from_bytes(&key_repr).unwrap() - }; - - let mut eventualities = EventualityDb::::eventualities(&txn, key); - for new_eventuality in new_eventualities { - eventualities.insert(new_eventuality); - } - EventualityDb::::set_eventualities(&mut txn, key, &eventualities); - } + intake_eventualities::(&mut txn, new_eventualities); } txn.commit(); } @@ -310,30 +320,20 @@ impl> ContinuallyRan for EventualityTas } // Update the scheduler - let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns }; - scheduler_update.outputs.sort_by(sort_outputs); - scheduler_update.forwards.sort_by(sort_outputs); - scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); - // Intake the new Eventualities - let new_eventualities = self.scheduler.update(&mut txn, scheduler_update); - for (key, new_eventualities) in new_eventualities { - let key = { - let mut key_repr = as GroupEncoding>::Repr::default(); - assert_eq!(key.len(), key_repr.as_ref().len()); - key_repr.as_mut().copy_from_slice(&key); - KeyFor::::from_bytes(&key_repr).unwrap() - }; - - keys - .iter() - .find(|serai_key| serai_key.key == key) - .expect("queueing eventuality for key which isn't active"); - - let mut eventualities = EventualityDb::::eventualities(&txn, key); - for new_eventuality in new_eventualities { - eventualities.insert(new_eventuality); + { + let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns }; + scheduler_update.outputs.sort_by(sort_outputs); + scheduler_update.forwards.sort_by(sort_outputs); + scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + // Intake the new Eventualities + let new_eventualities = self.scheduler.update(&mut txn, scheduler_update); + for key in new_eventualities.keys() { + keys + .iter() + .find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice()) + .expect("intaking Eventuality for key which isn't active"); } - EventualityDb::::set_eventualities(&mut txn, key, &eventualities); + intake_eventualities::(&mut txn, new_eventualities); } // Now that we've intaked any Eventualities caused, check if we're retiring any keys diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 77bed7fc5..5f7e44a28 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -242,7 +242,6 @@ impl Scanner { /// Create a new scanner. /// /// This will begin its execution, spawning several asynchronous tasks. - // TODO: Take start_time and binary search here? pub async fn new( db: impl Db, feed: S, From b610135f3a77e4fee36338dacf559edf7d73fcf9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 15:26:04 -0400 Subject: [PATCH 045/179] Add key management to the scheduler --- processor/scanner/src/eventuality/db.rs | 23 ++++++++++++++++- processor/scanner/src/eventuality/mod.rs | 14 +++++++++- processor/scanner/src/lib.rs | 33 +++++++++++++++++++----- 3 files changed, 62 insertions(+), 8 deletions(-) diff --git a/processor/scanner/src/eventuality/db.rs b/processor/scanner/src/eventuality/db.rs index f810ba2f6..da8a3024c 100644 --- a/processor/scanner/src/eventuality/db.rs +++ b/processor/scanner/src/eventuality/db.rs @@ -1,12 +1,17 @@ use core::marker::PhantomData; use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db}; use primitives::{EncodableG, Eventuality, EventualityTracker}; use crate::{ScannerFeed, KeyFor, EventualityFor}; +// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. +trait Borshy: BorshSerialize + BorshDeserialize {} +impl Borshy for T {} + create_db!( ScannerEventuality { // The next block to check for resolving eventualities @@ -15,6 +20,8 @@ create_db!( LatestHandledNotableBlock: () -> u64, SerializedEventualities: (key: K) -> Vec, + + RetiredKey: (block_number: u64) -> K, } ); @@ -51,7 +58,6 @@ impl EventualityDb { } SerializedEventualities::set(txn, EncodableG(key), &serialized); } - pub(crate) fn eventualities( getter: &impl Get, key: KeyFor, @@ -66,4 +72,19 @@ impl EventualityDb { } res } + + pub(crate) fn retire_key(txn: &mut impl DbTxn, block_number: u64, key: KeyFor) { + assert!( + RetiredKey::get::>>(txn, block_number).is_none(), + "retiring multiple keys within the same block" + ); + RetiredKey::set(txn, block_number, &EncodableG(key)); + } + pub(crate) fn take_retired_key(txn: &mut impl DbTxn, block_number: u64) -> Option> { + let res = RetiredKey::get::>>(txn, block_number).map(|res| res.0); + if res.is_some() { + RetiredKey::del::>>(txn, block_number); + } + res + } } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 7b5e3eed0..c5f93789f 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -248,6 +248,11 @@ impl> ContinuallyRan for EventualityTas let mut outputs = received_external_outputs; for key in &keys { + // If this is the key's activation block, activate it + if key.activation_block_number == b { + self.scheduler.activate_key(&mut txn, key.key); + } + let completed_eventualities = { let mut eventualities = EventualityDb::::eventualities(&txn, key.key); let completed_eventualities = block.check_for_eventuality_resolutions(&mut eventualities); @@ -349,11 +354,18 @@ impl> ContinuallyRan for EventualityTas // Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never // has a malleable view of the keys. - ScannerGlobalDb::::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key); + let retire_at = b + S::WINDOW_LENGTH; + ScannerGlobalDb::::retire_key(&mut txn, retire_at, key.key); + EventualityDb::::retire_key(&mut txn, retire_at, key.key); } } } + // If we retired any key at this block, retire it within the scheduler + if let Some(key) = EventualityDb::::take_retired_key(&mut txn, b) { + self.scheduler.retire_key(&mut txn, key); + } + // Update the next-to-check block EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 5f7e44a28..d90ca08e9 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -137,6 +137,12 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { Ok(block) } + /// The dust threshold for the specified coin. + /// + /// This MUST be constant. Serai MUST NOT create internal outputs worth less than this. This + /// SHOULD be a value worth handling at a human level. + fn dust(&self, coin: Coin) -> Amount; + /// The cost to aggregate an input as of the specified block. /// /// This is defined as the transaction fee for a 2-input, 1-output transaction. @@ -145,12 +151,6 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { coin: Coin, reference_block: &Self::Block, ) -> Result; - - /// The dust threshold for the specified coin. - /// - /// This MUST be constant. Serai MUST NOT create internal outputs worth less than this. This - /// SHOULD be a value worth handling at a human level. - fn dust(&self, coin: Coin) -> Amount; } type KeyFor = <::Block as Block>::Key; @@ -187,6 +187,27 @@ pub struct SchedulerUpdate { /// The object responsible for accumulating outputs and planning new transactions. pub trait Scheduler: 'static + Send { + /// Activate a key. + /// + /// This SHOULD setup any necessary database structures. This SHOULD NOT cause the new key to + /// be used as the primary key. The multisig rotation time clearly establishes its steps. + fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor); + + /// Flush all outputs within a retiring key to the new key. + /// + /// When a key is activated, the existing multisig should retain its outputs and utility for a + /// certain time period. With `flush_key`, all outputs should be directed towards fulfilling some + /// obligation or the `new_key`. Every output MUST be connected to an Eventuality. If a key no + /// longer has active Eventualities, it MUST be able to be retired. + // TODO: Call this + fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor, new_key: KeyFor); + + /// Retire a key as it'll no longer be used. + /// + /// Any key retired MUST NOT still have outputs associated with it. This SHOULD be a NOP other + /// than any assertions and database cleanup. + fn retire_key(&mut self, txn: &mut impl DbTxn, key: KeyFor); + /// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for. /// /// The `Vec` used as the key in the returned HashMap should be the encoded key the From 7db8bd9a8f310562fedf405dc583a3cc55d4d700 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 16:27:00 -0400 Subject: [PATCH 046/179] Call flush_key --- processor/scanner/src/db.rs | 36 +++++++++++++++++--- processor/scanner/src/eventuality/mod.rs | 9 ++++- processor/scanner/src/lifetime.rs | 43 +++++++++++++++--------- spec/processor/Multisig Rotation.md | 3 +- 4 files changed, 68 insertions(+), 23 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index a6272eeb8..20aa29995 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -11,7 +11,8 @@ use serai_coins_primitives::OutInstructionWithBalance; use primitives::{EncodableG, Address, ReceivedOutput}; use crate::{ - lifetime::LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, Return, + lifetime::{LifetimeStage, Lifetime}, + ScannerFeed, KeyFor, AddressFor, OutputFor, Return, scan::next_to_scan_for_outputs_block, }; @@ -30,6 +31,7 @@ pub(crate) struct SeraiKey { pub(crate) stage: LifetimeStage, pub(crate) activation_block_number: u64, pub(crate) block_at_which_reporting_starts: u64, + pub(crate) block_at_which_forwarding_starts: Option, } pub(crate) struct OutputWithInInstruction { @@ -82,7 +84,7 @@ create_db!( /* A block is notable if one of three conditions are met: - 1) We activated a key within this block. + 1) We activated a key within this block (or explicitly forward to an activated key). 2) We retired a key within this block. 3) We received outputs within this block. @@ -120,9 +122,32 @@ impl ScannerGlobalDb { // TODO: Panic if we've ever seen this key before - // Push the key + // Fetch the existing keys let mut keys: Vec>>> = ActiveKeys::get(txn).unwrap_or(vec![]); + + // If this new key retires a key, mark the block at which forwarding explicitly occurs notable + // This lets us obtain synchrony over the transactions we'll make to accomplish this + if let Some(key_retired_by_this) = keys.last() { + NotableBlock::set( + txn, + Lifetime::calculate::( + // The 'current block number' used for this calculation + activation_block_number, + // The activation block of the key we're getting the lifetime of + key_retired_by_this.activation_block_number, + // The activation block of the key which will retire this key + Some(activation_block_number), + ) + .block_at_which_forwarding_starts + .expect( + "didn't calculate the block forwarding starts at despite passing the next key's info", + ), + &(), + ); + } + + // Push and save the next key keys.push(SeraiKeyDbEntry { activation_block_number, key: EncodableG(key) }); ActiveKeys::set(txn, &keys); } @@ -185,8 +210,8 @@ impl ScannerGlobalDb { if block_number < raw_keys[i].activation_block_number { continue; } - let (stage, block_at_which_reporting_starts) = - LifetimeStage::calculate_stage_and_reporting_start_block::( + let Lifetime { stage, block_at_which_reporting_starts, block_at_which_forwarding_starts } = + Lifetime::calculate::( block_number, raw_keys[i].activation_block_number, raw_keys.get(i + 1).map(|key| key.activation_block_number), @@ -196,6 +221,7 @@ impl ScannerGlobalDb { stage, activation_block_number: raw_keys[i].activation_block_number, block_at_which_reporting_starts, + block_at_which_forwarding_starts, }); } assert!(keys.len() <= 2, "more than two keys active"); diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index c5f93789f..002131cc6 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -341,8 +341,15 @@ impl> ContinuallyRan for EventualityTas intake_eventualities::(&mut txn, new_eventualities); } - // Now that we've intaked any Eventualities caused, check if we're retiring any keys for key in &keys { + // If this is the block at which forwarding starts for this key, flush it + // We do this after we issue the above update for any efficiencies gained by doing so + if key.block_at_which_forwarding_starts == Some(b) { + assert!(key.key != keys.last().unwrap().key); + self.scheduler.flush_key(&mut txn, key.key, keys.last().unwrap().key); + } + + // Now that we've intaked any Eventualities caused, check if we're retiring any keys if key.stage == LifetimeStage::Finishing { let eventualities = EventualityDb::::eventualities(&txn, key.key); // TODO: This assumes the Scheduler is empty diff --git a/processor/scanner/src/lifetime.rs b/processor/scanner/src/lifetime.rs index 09df7a37c..e15c0f55c 100644 --- a/processor/scanner/src/lifetime.rs +++ b/processor/scanner/src/lifetime.rs @@ -35,17 +35,25 @@ pub(crate) enum LifetimeStage { Finishing, } -impl LifetimeStage { - /// Get the stage of its lifetime this multisig is in, and the block at which we start reporting - /// outputs to it. +/// The lifetime of the multisig, including various block numbers. +pub(crate) struct Lifetime { + pub(crate) stage: LifetimeStage, + pub(crate) block_at_which_reporting_starts: u64, + // This is only Some if the next key's activation block number is passed to calculate, and the + // stage is at least `LifetimeStage::Active.` + pub(crate) block_at_which_forwarding_starts: Option, +} + +impl Lifetime { + /// Get the lifetime of this multisig. /// /// Panics if the multisig being calculated for isn't actually active and a variety of other /// insane cases. - pub(crate) fn calculate_stage_and_reporting_start_block( + pub(crate) fn calculate( block_number: u64, activation_block_number: u64, next_keys_activation_block_number: Option, - ) -> (Self, u64) { + ) -> Self { assert!( activation_block_number >= block_number, "calculating lifetime stage for an inactive multisig" @@ -55,14 +63,14 @@ impl LifetimeStage { let active_yet_not_reporting_end_block = activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; // The exclusive end block is the inclusive start block - let reporting_start_block = active_yet_not_reporting_end_block; + let block_at_which_reporting_starts = active_yet_not_reporting_end_block; if block_number < active_yet_not_reporting_end_block { - return (LifetimeStage::ActiveYetNotReporting, reporting_start_block); + return Lifetime { stage: LifetimeStage::ActiveYetNotReporting, block_at_which_reporting_starts, block_at_which_forwarding_starts: None }; } let Some(next_keys_activation_block_number) = next_keys_activation_block_number else { // If there is no next multisig, this is the active multisig - return (LifetimeStage::Active, reporting_start_block); + return Lifetime { stage: LifetimeStage::Active, block_at_which_reporting_starts, block_at_which_forwarding_starts: None }; }; assert!( @@ -70,19 +78,22 @@ impl LifetimeStage { "next set of keys activated before this multisig activated" ); - // If the new multisig is still having its activation block finalized on-chain, this multisig - // is still active (step 3) let new_active_yet_not_reporting_end_block = next_keys_activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; + let new_active_and_used_for_change_end_block = + new_active_yet_not_reporting_end_block + S::CONFIRMATIONS; + // The exclusive end block is the inclusive start block + let block_at_which_forwarding_starts = Some(new_active_and_used_for_change_end_block); + + // If the new multisig is still having its activation block finalized on-chain, this multisig + // is still active (step 3) if block_number < new_active_yet_not_reporting_end_block { - return (LifetimeStage::Active, reporting_start_block); + return Lifetime { stage: LifetimeStage::Active, block_at_which_reporting_starts, block_at_which_forwarding_starts }; } // Step 4 details a further CONFIRMATIONS - let new_active_and_used_for_change_end_block = - new_active_yet_not_reporting_end_block + S::CONFIRMATIONS; if block_number < new_active_and_used_for_change_end_block { - return (LifetimeStage::UsingNewForChange, reporting_start_block); + return Lifetime { stage: LifetimeStage::UsingNewForChange, block_at_which_reporting_starts, block_at_which_forwarding_starts }; } // Step 5 details a further 6 hours @@ -90,10 +101,10 @@ impl LifetimeStage { let new_active_and_forwarded_to_end_block = new_active_and_used_for_change_end_block + (6 * 6 * S::TEN_MINUTES); if block_number < new_active_and_forwarded_to_end_block { - return (LifetimeStage::Forwarding, reporting_start_block); + return Lifetime { stage: LifetimeStage::Forwarding, block_at_which_reporting_starts, block_at_which_forwarding_starts }; } // Step 6 - (LifetimeStage::Finishing, reporting_start_block) + Lifetime { stage: LifetimeStage::Finishing, block_at_which_reporting_starts, block_at_which_forwarding_starts } } } diff --git a/spec/processor/Multisig Rotation.md b/spec/processor/Multisig Rotation.md index ff5c3d286..916ce56b4 100644 --- a/spec/processor/Multisig Rotation.md +++ b/spec/processor/Multisig Rotation.md @@ -102,7 +102,8 @@ The following timeline is established: 5) For the next 6 hours, all non-`Branch` outputs received are immediately forwarded to the new multisig. Only external transactions to the new multisig - are included in `Batch`s. + are included in `Batch`s. Any outputs not yet transferred as change are + explicitly transferred. The new multisig infers the `InInstruction`, and refund address, for forwarded `External` outputs via reading what they were for the original From 06d0e4925411cc330578f476c20ba6732aa51f2b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 17:37:45 -0400 Subject: [PATCH 047/179] Pass the lifetime information to the scheduler Enables it to decide which keys to use for fulfillment/change. --- processor/scanner/src/eventuality/db.rs | 22 ------- processor/scanner/src/eventuality/mod.rs | 80 +++++++++++++++--------- processor/scanner/src/lib.rs | 14 ++++- processor/scanner/src/lifetime.rs | 40 +++++++++--- 4 files changed, 95 insertions(+), 61 deletions(-) diff --git a/processor/scanner/src/eventuality/db.rs b/processor/scanner/src/eventuality/db.rs index da8a3024c..2bd020258 100644 --- a/processor/scanner/src/eventuality/db.rs +++ b/processor/scanner/src/eventuality/db.rs @@ -1,17 +1,12 @@ use core::marker::PhantomData; use scale::Encode; -use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db}; use primitives::{EncodableG, Eventuality, EventualityTracker}; use crate::{ScannerFeed, KeyFor, EventualityFor}; -// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. -trait Borshy: BorshSerialize + BorshDeserialize {} -impl Borshy for T {} - create_db!( ScannerEventuality { // The next block to check for resolving eventualities @@ -20,8 +15,6 @@ create_db!( LatestHandledNotableBlock: () -> u64, SerializedEventualities: (key: K) -> Vec, - - RetiredKey: (block_number: u64) -> K, } ); @@ -72,19 +65,4 @@ impl EventualityDb { } res } - - pub(crate) fn retire_key(txn: &mut impl DbTxn, block_number: u64, key: KeyFor) { - assert!( - RetiredKey::get::>>(txn, block_number).is_none(), - "retiring multiple keys within the same block" - ); - RetiredKey::set(txn, block_number, &EncodableG(key)); - } - pub(crate) fn take_retired_key(txn: &mut impl DbTxn, block_number: u64) -> Option> { - let res = RetiredKey::get::>>(txn, block_number).map(|res| res.0); - if res.is_some() { - RetiredKey::del::>>(txn, block_number); - } - res - } } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 002131cc6..400c5690d 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -9,7 +9,7 @@ use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, use crate::{ lifetime::LifetimeStage, db::{ - OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb, + SeraiKey, OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb, ScanToEventualityDb, }, BlockExt, ScannerFeed, KeyFor, EventualityFor, SchedulerUpdate, Scheduler, sort_outputs, @@ -115,6 +115,34 @@ impl> EventualityTask { Self { db, feed, scheduler } } + fn keys_and_keys_with_stages( + &self, + block_number: u64, + ) -> (Vec>>, Vec<(KeyFor, LifetimeStage)>) { + /* + This is proper as the keys for the next-to-scan block (at most `WINDOW_LENGTH` ahead, + which is `<= CONFIRMATIONS`) will be the keys to use here, with only minor edge cases. + + This may include a key which has yet to activate by our perception. We can simply drop + those. + + This may not include a key which has retired by the next-to-scan block. This task is the + one which decides when to retire a key, and when it marks a key to be retired, it is done + with it. Accordingly, it's not an issue if such a key was dropped. + + This also may include a key we've retired which has yet to officially retire. That's fine as + we'll do nothing with it, and the Scheduler traits document this behavior. + */ + assert!(S::WINDOW_LENGTH <= S::CONFIRMATIONS); + let mut keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) + .expect("scanning for a blockchain without any keys set"); + // Since the next-to-scan block is ahead of us, drop keys which have yet to actually activate + keys.retain(|key| block_number <= key.activation_block_number); + let keys_with_stages = keys.iter().map(|key| (key.key, key.stage)).collect::>(); + + (keys, keys_with_stages) + } + // Returns a boolean of if we intaked any Burns. fn intake_burns(&mut self) -> bool { let mut intaked_any = false; @@ -123,6 +151,11 @@ impl> EventualityTask { if let Some(latest_handled_notable_block) = EventualityDb::::latest_handled_notable_block(&self.db) { + // We always intake Burns per this block as it's the block we have consensus on + // We would have a consensus failure if some thought the change should be the old key and + // others the new key + let (_keys, keys_with_stages) = self.keys_and_keys_with_stages(latest_handled_notable_block); + let mut txn = self.db.txn(); // Drain the entire channel while let Some(burns) = @@ -130,7 +163,7 @@ impl> EventualityTask { { intaked_any = true; - let new_eventualities = self.scheduler.fulfill(&mut txn, burns); + let new_eventualities = self.scheduler.fulfill(&mut txn, &keys_with_stages, burns); intake_eventualities::(&mut txn, new_eventualities); } txn.commit(); @@ -154,6 +187,7 @@ impl> ContinuallyRan for EventualityTas let mut made_progress = false; // Start by intaking any Burns we have sitting around + // It's important we run this regardless of if we have a new block to handle made_progress |= self.intake_burns(); /* @@ -206,8 +240,8 @@ impl> ContinuallyRan for EventualityTas // Since this block is notable, ensure we've intaked all the Burns preceding it // We can know with certainty that the channel is fully populated at this time since we've - // acknowledged a newer block (so we've handled the state up to this point and new state - // will be for the newer block) + // acknowledged a newer block (so we've handled the state up to this point and any new + // state will be for the newer block) #[allow(unused_assignments)] { made_progress |= self.intake_burns(); @@ -221,22 +255,7 @@ impl> ContinuallyRan for EventualityTas log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); - /* - This is proper as the keys for the next to scan block (at most `WINDOW_LENGTH` ahead, - which is `<= CONFIRMATIONS`) will be the keys to use here, with only minor edge cases. - - This may include a key which has yet to activate by our perception. We can simply drop - those. - - This may not include a key which has retired by the next-to-scan block. This task is the - one which decides when to retire a key, and when it marks a key to be retired, it is done - with it. Accordingly, it's not an issue if such a key was dropped. - */ - let mut keys = - ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) - .expect("scanning for a blockchain without any keys set"); - // Since the next-to-scan block is ahead of us, drop keys which have yet to actually activate - keys.retain(|key| b <= key.activation_block_number); + let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b); let mut txn = self.db.txn(); @@ -331,7 +350,8 @@ impl> ContinuallyRan for EventualityTas scheduler_update.forwards.sort_by(sort_outputs); scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); // Intake the new Eventualities - let new_eventualities = self.scheduler.update(&mut txn, scheduler_update); + let new_eventualities = + self.scheduler.update(&mut txn, &keys_with_stages, scheduler_update); for key in new_eventualities.keys() { keys .iter() @@ -345,7 +365,10 @@ impl> ContinuallyRan for EventualityTas // If this is the block at which forwarding starts for this key, flush it // We do this after we issue the above update for any efficiencies gained by doing so if key.block_at_which_forwarding_starts == Some(b) { - assert!(key.key != keys.last().unwrap().key); + assert!( + key.key != keys.last().unwrap().key, + "key which was forwarding was the last key (which has no key after it to forward to)" + ); self.scheduler.flush_key(&mut txn, key.key, keys.last().unwrap().key); } @@ -361,18 +384,15 @@ impl> ContinuallyRan for EventualityTas // Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never // has a malleable view of the keys. - let retire_at = b + S::WINDOW_LENGTH; - ScannerGlobalDb::::retire_key(&mut txn, retire_at, key.key); - EventualityDb::::retire_key(&mut txn, retire_at, key.key); + ScannerGlobalDb::::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key); + + // We tell the scheduler to retire it now as we're done with it, and this fn doesn't + // require it be called with a canonical order + self.scheduler.retire_key(&mut txn, key.key); } } } - // If we retired any key at this block, retire it within the scheduler - if let Some(key) = EventualityDb::::take_retired_key(&mut txn, b) { - self.scheduler.retire_key(&mut txn, key); - } - // Update the next-to-check block EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index d90ca08e9..2cbae096d 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -13,6 +13,7 @@ use primitives::{task::*, Address, ReceivedOutput, Block}; // Logic for deciding where in its lifetime a multisig is. mod lifetime; +pub use lifetime::LifetimeStage; // Database schema definition and associated functions. mod db; @@ -205,16 +206,22 @@ pub trait Scheduler: 'static + Send { /// Retire a key as it'll no longer be used. /// /// Any key retired MUST NOT still have outputs associated with it. This SHOULD be a NOP other - /// than any assertions and database cleanup. + /// than any assertions and database cleanup. This MUST NOT be expected to be called in a fashion + /// ordered to any other calls. fn retire_key(&mut self, txn: &mut impl DbTxn, key: KeyFor); /// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for. /// + /// `active_keys` is the list of active keys, potentially including a key for which we've already + /// called `retire_key` on. If so, its stage will be `Finishing` and no further operations will + /// be expected for it. Nonetheless, it may be present. + /// /// The `Vec` used as the key in the returned HashMap should be the encoded key the /// Eventualities are for. fn update( &mut self, txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], update: SchedulerUpdate, ) -> HashMap, Vec>>; @@ -224,6 +231,10 @@ pub trait Scheduler: 'static + Send { /// or Change), unless they descend from a transaction returned by this function which satisfies /// that requirement. /// + /// `active_keys` is the list of active keys, potentially including a key for which we've already + /// called `retire_key` on. If so, its stage will be `Finishing` and no further operations will + /// be expected for it. Nonetheless, it may be present. + /// /// The `Vec` used as the key in the returned HashMap should be the encoded key the /// Eventualities are for. /* @@ -249,6 +260,7 @@ pub trait Scheduler: 'static + Send { fn fulfill( &mut self, txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], payments: Vec, ) -> HashMap, Vec>>; } diff --git a/processor/scanner/src/lifetime.rs b/processor/scanner/src/lifetime.rs index e15c0f55c..bef6af8b4 100644 --- a/processor/scanner/src/lifetime.rs +++ b/processor/scanner/src/lifetime.rs @@ -6,8 +6,8 @@ use crate::ScannerFeed; /// rotation process. Steps 7-8 regard a multisig which isn't retiring yet retired, and /// accordingly, no longer exists, so they are not modelled here (as this only models active /// multisigs. Inactive multisigs aren't represented in the first place). -#[derive(PartialEq)] -pub(crate) enum LifetimeStage { +#[derive(Clone, Copy, PartialEq)] +pub enum LifetimeStage { /// A new multisig, once active, shouldn't actually start receiving coins until several blocks /// later. If any UI is premature in sending to this multisig, we delay to report the outputs to /// prevent some DoS concerns. @@ -65,12 +65,20 @@ impl Lifetime { // The exclusive end block is the inclusive start block let block_at_which_reporting_starts = active_yet_not_reporting_end_block; if block_number < active_yet_not_reporting_end_block { - return Lifetime { stage: LifetimeStage::ActiveYetNotReporting, block_at_which_reporting_starts, block_at_which_forwarding_starts: None }; + return Lifetime { + stage: LifetimeStage::ActiveYetNotReporting, + block_at_which_reporting_starts, + block_at_which_forwarding_starts: None, + }; } let Some(next_keys_activation_block_number) = next_keys_activation_block_number else { // If there is no next multisig, this is the active multisig - return Lifetime { stage: LifetimeStage::Active, block_at_which_reporting_starts, block_at_which_forwarding_starts: None }; + return Lifetime { + stage: LifetimeStage::Active, + block_at_which_reporting_starts, + block_at_which_forwarding_starts: None, + }; }; assert!( @@ -88,12 +96,20 @@ impl Lifetime { // If the new multisig is still having its activation block finalized on-chain, this multisig // is still active (step 3) if block_number < new_active_yet_not_reporting_end_block { - return Lifetime { stage: LifetimeStage::Active, block_at_which_reporting_starts, block_at_which_forwarding_starts }; + return Lifetime { + stage: LifetimeStage::Active, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }; } // Step 4 details a further CONFIRMATIONS if block_number < new_active_and_used_for_change_end_block { - return Lifetime { stage: LifetimeStage::UsingNewForChange, block_at_which_reporting_starts, block_at_which_forwarding_starts }; + return Lifetime { + stage: LifetimeStage::UsingNewForChange, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }; } // Step 5 details a further 6 hours @@ -101,10 +117,18 @@ impl Lifetime { let new_active_and_forwarded_to_end_block = new_active_and_used_for_change_end_block + (6 * 6 * S::TEN_MINUTES); if block_number < new_active_and_forwarded_to_end_block { - return Lifetime { stage: LifetimeStage::Forwarding, block_at_which_reporting_starts, block_at_which_forwarding_starts }; + return Lifetime { + stage: LifetimeStage::Forwarding, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }; } // Step 6 - Lifetime { stage: LifetimeStage::Finishing, block_at_which_reporting_starts, block_at_which_forwarding_starts } + Lifetime { + stage: LifetimeStage::Finishing, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + } } } From d1852f974005c96446c5d9b323480ec8e3b13ee5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 21:35:22 -0400 Subject: [PATCH 048/179] Add sanity checks we haven't prior reported an InInstruction for/accumulated an output --- processor/scanner/src/eventuality/db.rs | 19 +++++- processor/scanner/src/eventuality/mod.rs | 20 +++++- processor/scanner/src/lib.rs | 79 ------------------------ processor/scanner/src/scan/db.rs | 20 +++++- processor/scanner/src/scan/mod.rs | 30 ++++++++- 5 files changed, 80 insertions(+), 88 deletions(-) diff --git a/processor/scanner/src/eventuality/db.rs b/processor/scanner/src/eventuality/db.rs index 2bd020258..3e5088d1c 100644 --- a/processor/scanner/src/eventuality/db.rs +++ b/processor/scanner/src/eventuality/db.rs @@ -3,9 +3,9 @@ use core::marker::PhantomData; use scale::Encode; use serai_db::{Get, DbTxn, create_db}; -use primitives::{EncodableG, Eventuality, EventualityTracker}; +use primitives::{EncodableG, ReceivedOutput, Eventuality, EventualityTracker}; -use crate::{ScannerFeed, KeyFor, EventualityFor}; +use crate::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor}; create_db!( ScannerEventuality { @@ -15,6 +15,8 @@ create_db!( LatestHandledNotableBlock: () -> u64, SerializedEventualities: (key: K) -> Vec, + + AccumulatedOutput: (id: &[u8]) -> (), } ); @@ -65,4 +67,17 @@ impl EventualityDb { } res } + + pub(crate) fn prior_accumulated_output( + getter: &impl Get, + id: & as ReceivedOutput, AddressFor>>::Id, + ) -> bool { + AccumulatedOutput::get(getter, id.as_ref()).is_some() + } + pub(crate) fn accumulated_output( + txn: &mut impl DbTxn, + id: & as ReceivedOutput, AddressFor>>::Id, + ) { + AccumulatedOutput::set(txn, id.as_ref(), &()); + } } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 400c5690d..43f6b7842 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -12,7 +12,8 @@ use crate::{ SeraiKey, OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb, ScanToEventualityDb, }, - BlockExt, ScannerFeed, KeyFor, EventualityFor, SchedulerUpdate, Scheduler, sort_outputs, + BlockExt, ScannerFeed, KeyFor, OutputFor, EventualityFor, SchedulerUpdate, Scheduler, + sort_outputs, scan::{next_to_scan_for_outputs_block, queue_output_until_block}, }; @@ -349,6 +350,22 @@ impl> ContinuallyRan for EventualityTas scheduler_update.outputs.sort_by(sort_outputs); scheduler_update.forwards.sort_by(sort_outputs); scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + + // Sanity check we've never accumulated these outputs before + { + let a: core::slice::Iter<'_, OutputFor> = scheduler_update.outputs.iter(); + let b: core::slice::Iter<'_, OutputFor> = scheduler_update.forwards.iter(); + let c = scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output); + + for output in a.chain(b).chain(c) { + assert!( + !EventualityDb::::prior_accumulated_output(&txn, &output.id()), + "prior accumulated an output with this ID" + ); + EventualityDb::::accumulated_output(&mut txn, &output.id()); + } + } + // Intake the new Eventualities let new_eventualities = self.scheduler.update(&mut txn, &keys_with_stages, scheduler_update); @@ -375,7 +392,6 @@ impl> ContinuallyRan for EventualityTas // Now that we've intaked any Eventualities caused, check if we're retiring any keys if key.stage == LifetimeStage::Finishing { let eventualities = EventualityDb::::eventualities(&txn, key.key); - // TODO: This assumes the Scheduler is empty if eventualities.active_eventualities.is_empty() { log::info!( "key {} has finished and is being retired", diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 2cbae096d..7c6466ff5 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -200,7 +200,6 @@ pub trait Scheduler: 'static + Send { /// certain time period. With `flush_key`, all outputs should be directed towards fulfilling some /// obligation or the `new_key`. Every output MUST be connected to an Eventuality. If a key no /// longer has active Eventualities, it MUST be able to be retired. - // TODO: Call this fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor, new_key: KeyFor); /// Retire a key as it'll no longer be used. @@ -384,81 +383,3 @@ impl Scanner { SubstrateToEventualityDb::send_burns(txn, queue_as_of, burns) } } - -/* -#[derive(Clone, Debug)] -struct ScannerGlobalDb(PhantomData, PhantomData); -impl ScannerGlobalDb { - fn seen_key(id: &>::Id) -> Vec { - Self::scanner_key(b"seen", id) - } - fn seen(getter: &G, id: &>::Id) -> bool { - getter.get(Self::seen_key(id)).is_some() - } - - fn save_scanned_block(txn: &mut D::Transaction<'_>, block: usize) -> Vec { - let id = Self::block(txn, block); // It may be None for the first key rotated to - let outputs = - if let Some(id) = id.as_ref() { Self::outputs(txn, id).unwrap_or(vec![]) } else { vec![] }; - - // Mark all the outputs from this block as seen - for output in &outputs { - txn.put(Self::seen_key(&output.id()), b""); - } - - txn.put(Self::scanned_block_key(), u64::try_from(block).unwrap().to_le_bytes()); - - // Return this block's outputs so they can be pruned from the RAM cache - outputs - } -} - - // Panic if we've already seen these outputs - for output in &outputs { - let id = output.id(); - info!( - "block {} had output {} worth {:?}", - hex::encode(&block_id), - hex::encode(&id), - output.balance(), - ); - - // On Bitcoin, the output ID should be unique for a given chain - // On Monero, it's trivial to make an output sharing an ID with another - // We should only scan outputs with valid IDs however, which will be unique - - /* - The safety of this code must satisfy the following conditions: - 1) seen is not set for the first occurrence - 2) seen is set for any future occurrence - - seen is only written to after this code completes. Accordingly, it cannot be set - before the first occurrence UNLESSS it's set, yet the last scanned block isn't. - They are both written in the same database transaction, preventing this. - - As for future occurrences, the RAM entry ensures they're handled properly even if - the database has yet to be set. - - On reboot, which will clear the RAM, if seen wasn't set, neither was latest scanned - block. Accordingly, this will scan from some prior block, re-populating the RAM. - - If seen was set, then this will be successfully read. - - There's also no concern ram_outputs was pruned, yet seen wasn't set, as pruning - from ram_outputs will acquire a write lock (preventing this code from acquiring - its own write lock and running), and during its holding of the write lock, it - commits the transaction setting seen and the latest scanned block. - - This last case isn't true. Committing seen/latest_scanned_block happens after - relinquishing the write lock. - - TODO2: Only update ram_outputs after committing the TXN in question. - */ - let seen = ScannerGlobalDb::::seen(&db, &id); - let id = id.as_ref().to_vec(); - if seen || scanner.ram_outputs.contains(&id) { - panic!("scanned an output multiple times"); - } - scanner.ram_outputs.insert(id); - } -*/ diff --git a/processor/scanner/src/scan/db.rs b/processor/scanner/src/scan/db.rs index 6df84df18..44023bc86 100644 --- a/processor/scanner/src/scan/db.rs +++ b/processor/scanner/src/scan/db.rs @@ -2,7 +2,9 @@ use core::marker::PhantomData; use serai_db::{Get, DbTxn, create_db}; -use crate::{db::OutputWithInInstruction, ScannerFeed}; +use primitives::ReceivedOutput; + +use crate::{db::OutputWithInInstruction, ScannerFeed, KeyFor, AddressFor, OutputFor}; create_db!( ScannerScan { @@ -10,6 +12,8 @@ create_db!( NextToScanForOutputsBlock: () -> u64, SerializedQueuedOutputs: (block_number: u64) -> Vec, + + ReportedInInstructionForOutput: (id: &[u8]) -> (), } ); @@ -38,7 +42,6 @@ impl ScanDb { } res } - pub(crate) fn queue_output_until_block( txn: &mut impl DbTxn, queue_for_block: u64, @@ -49,4 +52,17 @@ impl ScanDb { output.write(&mut outputs).unwrap(); SerializedQueuedOutputs::set(txn, queue_for_block, &outputs); } + + pub(crate) fn prior_reported_in_instruction_for_output( + getter: &impl Get, + id: & as ReceivedOutput, AddressFor>>::Id, + ) -> bool { + ReportedInInstructionForOutput::get(getter, id.as_ref()).is_some() + } + pub(crate) fn reported_in_instruction_for_output( + txn: &mut impl DbTxn, + id: & as ReceivedOutput, AddressFor>>::Id, + ) { + ReportedInInstructionForOutput::set(txn, id.as_ref(), &()); + } } diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index 59d0f197c..f76adb001 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -149,8 +149,8 @@ impl ContinuallyRan for ScanTask { queued_outputs }; for queued_output in queued_outputs { + in_instructions.push((queued_output.output.id(), queued_output.in_instruction)); scan_data.received_external_outputs.push(queued_output.output); - in_instructions.push(queued_output.in_instruction); } // We subtract the cost to aggregate from some outputs we scan @@ -297,13 +297,37 @@ impl ContinuallyRan for ScanTask { // Ensures we didn't miss a `continue` above assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange)); - scan_data.received_external_outputs.push(output_with_in_instruction.output.clone()); - in_instructions.push(output_with_in_instruction.in_instruction); + in_instructions.push(( + output_with_in_instruction.output.id(), + output_with_in_instruction.in_instruction, + )); + scan_data.received_external_outputs.push(output_with_in_instruction.output); } } + // Sort the InInstructions by the output ID + in_instructions.sort_by(|(output_id_a, _), (output_id_b, _)| { + use core::cmp::{Ordering, Ord}; + let res = output_id_a.as_ref().cmp(output_id_b.as_ref()); + assert!(res != Ordering::Equal, "two outputs within a collection had the same ID"); + res + }); + // Check we haven't prior reported an InInstruction for this output + // This is a sanity check which is intended to prevent multiple instances of sriXYZ on-chain + // due to a single output + for (id, _) in &in_instructions { + assert!( + !ScanDb::::prior_reported_in_instruction_for_output(&txn, id), + "prior reported an InInstruction for an output with this ID" + ); + ScanDb::::reported_in_instruction_for_output(&mut txn, id); + } + // Reformat the InInstructions to just the InInstructions + let in_instructions = + in_instructions.into_iter().map(|(_id, in_instruction)| in_instruction).collect::>(); // Send the InInstructions to the report task ScanToReportDb::::send_in_instructions(&mut txn, b, in_instructions); + // Send the scan data to the eventuality task ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); // Update the next to scan block From 8c3a37c30cb918acc396afe97e9d7f14f433f6c4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 21:47:25 -0400 Subject: [PATCH 049/179] Add note on why LifetimeStage is monotonic --- processor/scanner/src/eventuality/mod.rs | 46 +++++++++++++++++------- 1 file changed, 33 insertions(+), 13 deletions(-) diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 43f6b7842..7db188dec 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -351,31 +351,51 @@ impl> ContinuallyRan for EventualityTas scheduler_update.forwards.sort_by(sort_outputs); scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); - // Sanity check we've never accumulated these outputs before - { + let empty = { let a: core::slice::Iter<'_, OutputFor> = scheduler_update.outputs.iter(); let b: core::slice::Iter<'_, OutputFor> = scheduler_update.forwards.iter(); let c = scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output); + let mut all_outputs = a.chain(b).chain(c).peekable(); + + // If we received any output, sanity check this block is notable + let empty = all_outputs.peek().is_none(); + if !empty { + assert!(is_block_notable, "accumulating output(s) in non-notable block"); + } - for output in a.chain(b).chain(c) { + // Sanity check we've never accumulated these outputs before + for output in all_outputs { assert!( !EventualityDb::::prior_accumulated_output(&txn, &output.id()), "prior accumulated an output with this ID" ); EventualityDb::::accumulated_output(&mut txn, &output.id()); } - } - // Intake the new Eventualities - let new_eventualities = - self.scheduler.update(&mut txn, &keys_with_stages, scheduler_update); - for key in new_eventualities.keys() { - keys - .iter() - .find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice()) - .expect("intaking Eventuality for key which isn't active"); + empty + }; + + if !empty { + // Accumulate the outputs + /* + This uses the `keys_with_stages` for the current block, yet this block is notable. + Accordingly, all future intaked Burns will use at least this block when determining + what LifetimeStage a key is. That makes the LifetimeStage monotonically incremented. If + this block wasn't notable, we'd potentially intake Burns with the LifetimeStage + determined off an earlier block than this (enabling an earlier LifetimeStage to be used + after a later one was already used). + */ + let new_eventualities = + self.scheduler.update(&mut txn, &keys_with_stages, scheduler_update); + // Intake the new Eventualities + for key in new_eventualities.keys() { + keys + .iter() + .find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice()) + .expect("intaking Eventuality for key which isn't active"); + } + intake_eventualities::(&mut txn, new_eventualities); } - intake_eventualities::(&mut txn, new_eventualities); } for key in &keys { From 7dc567dbcca8ea64ff3b758d7bc8c4929dfbf0f2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 21:58:56 -0400 Subject: [PATCH 050/179] Have the Eventuality task drop outputs which aren't ours and aren't worth it to aggregate We could drop these entirely, yet there's some degree of utility to be able to add coins to Serai in this manner. --- processor/scanner/src/eventuality/mod.rs | 28 ++++++++++++++++++++++-- processor/scanner/src/scan/mod.rs | 5 ++++- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 7db188dec..9068769be 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{HashSet, HashMap}; use group::GroupEncoding; @@ -288,7 +288,6 @@ impl> ContinuallyRan for EventualityTas let mut non_external_outputs = block.scan_for_outputs(key.key); non_external_outputs.retain(|output| output.kind() != OutputType::External); // Drop any outputs less than the dust limit - // TODO: Either further filter to outputs we made or also check cost_to_aggregate non_external_outputs.retain(|output| { let balance = output.balance(); balance.amount.0 >= self.feed.dust(balance.coin).0 @@ -315,6 +314,31 @@ impl> ContinuallyRan for EventualityTas .retain(|output| completed_eventualities.contains_key(&output.transaction_id())); } + // Finally, for non-External outputs we didn't make, we check they're worth more than the + // cost to aggregate them to avoid some profitable spam attacks by malicious miners + { + // Fetch and cache the costs to aggregate as this call may be expensive + let coins = + non_external_outputs.iter().map(|output| output.balance().coin).collect::>(); + let mut costs_to_aggregate = HashMap::new(); + for coin in coins { + costs_to_aggregate.insert( + coin, + self.feed.cost_to_aggregate(coin, &block).await.map_err(|e| { + format!("EventualityTask couldn't fetch cost to aggregate {coin:?} at {b}: {e:?}") + })?, + ); + } + + // Only retain out outputs/outputs sufficiently worthwhile + non_external_outputs.retain(|output| { + completed_eventualities.contains_key(&output.transaction_id()) || { + let balance = output.balance(); + balance.amount.0 >= (2 * costs_to_aggregate[&balance.coin].0) + } + }); + } + // Now, we iterate over all Forwarded outputs and queue their InInstructions for output in non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded) diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index f76adb001..405861ba1 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -226,7 +226,10 @@ impl ContinuallyRan for ScanTask { costs_to_aggregate.entry(balance.coin) { e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| { - format!("couldn't fetch cost to aggregate {:?} at {b}: {e:?}", balance.coin) + format!( + "ScanTask couldn't fetch cost to aggregate {:?} at {b}: {e:?}", + balance.coin + ) })?); } let cost_to_aggregate = costs_to_aggregate[&balance.coin]; From e888b97eb2ad2089a0a4f6d8a664366f4032bbd4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 29 Aug 2024 23:47:43 -0400 Subject: [PATCH 051/179] Check a queued key has never been queued before Re-queueing should only happen with a malicious supermajority and breaks indexing by the key. --- processor/scanner/src/db.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 20aa29995..6630c0a3f 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -72,6 +72,8 @@ impl OutputWithInInstruction { create_db!( ScannerGlobal { + QueuedKey: (key: K) -> (), + ActiveKeys: () -> Vec>, RetireAt: (key: K) -> u64, @@ -120,7 +122,10 @@ impl ScannerGlobalDb { // Set the block which has a key activate as notable NotableBlock::set(txn, activation_block_number, &()); - // TODO: Panic if we've ever seen this key before + // Check this key has never been queued before + // This should only happen if a malicious supermajority collude, and breaks indexing by the key + assert!(QueuedKey::get(txn, EncodableG(key)).is_none(), "key being queued was prior queued"); + QueuedKey::set(txn, EncodableG(key), &()); // Fetch the existing keys let mut keys: Vec>>> = From d441d6f6b729ab15ce116ad03167690eff50a065 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 30 Aug 2024 00:11:00 -0400 Subject: [PATCH 052/179] Impl ScanData serialization in the DB --- processor/scanner/src/db.rs | 84 ++++++++++++++++++++++++++++++------ processor/scanner/src/lib.rs | 15 ++++++- 2 files changed, 85 insertions(+), 14 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 6630c0a3f..698bf5468 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -1,5 +1,5 @@ use core::marker::PhantomData; -use std::io; +use std::io::{self, Read, Write}; use scale::{Encode, Decode, IoReader}; use borsh::{BorshSerialize, BorshDeserialize}; @@ -301,15 +301,9 @@ pub(crate) struct ReceiverScanData { pub(crate) returns: Vec>, } -#[derive(BorshSerialize, BorshDeserialize)] -pub(crate) struct SerializedScanData { - pub(crate) block_number: u64, - pub(crate) data: Vec, -} - db_channel! { ScannerScanEventuality { - ScannedBlock: (empty_key: ()) -> SerializedScanData, + ScannedBlock: (empty_key: ()) -> Vec, } } @@ -328,6 +322,8 @@ impl ScanToEventualityDb { } /* + TODO + SerializedForwardedOutputsIndex: (block_number: u64) -> Vec, SerializedForwardedOutput: (output_id: &[u8]) -> Vec, @@ -352,18 +348,80 @@ impl ScanToEventualityDb { } */ - ScannedBlock::send(txn, (), todo!("TODO")); + let mut buf = vec![]; + buf.write_all(&data.block_number.to_le_bytes()).unwrap(); + buf + .write_all(&u32::try_from(data.received_external_outputs.len()).unwrap().to_le_bytes()) + .unwrap(); + for output in &data.received_external_outputs { + output.write(&mut buf).unwrap(); + } + buf.write_all(&u32::try_from(data.forwards.len()).unwrap().to_le_bytes()).unwrap(); + for output_with_in_instruction in &data.forwards { + // Only write the output, as we saved the InInstruction above as needed + output_with_in_instruction.output.write(&mut buf).unwrap(); + } + buf.write_all(&u32::try_from(data.returns.len()).unwrap().to_le_bytes()).unwrap(); + for output in &data.returns { + output.write(&mut buf).unwrap(); + } + ScannedBlock::send(txn, (), &buf); } - pub(crate) fn recv_scan_data(txn: &mut impl DbTxn, block_number: u64) -> ReceiverScanData { + pub(crate) fn recv_scan_data( + txn: &mut impl DbTxn, + expected_block_number: u64, + ) -> ReceiverScanData { let data = ScannedBlock::try_recv(txn, ()).expect("receiving data for a scanned block not yet sent"); + let mut data = data.as_slice(); + + let block_number = { + let mut block_number = [0; 8]; + data.read_exact(&mut block_number).unwrap(); + u64::from_le_bytes(block_number) + }; assert_eq!( - block_number, data.block_number, + block_number, expected_block_number, "received data for a scanned block distinct than expected" ); - let data = &data.data; - todo!("TODO") + let received_external_outputs = { + let mut len = [0; 4]; + data.read_exact(&mut len).unwrap(); + let len = usize::try_from(u32::from_le_bytes(len)).unwrap(); + + let mut received_external_outputs = Vec::with_capacity(len); + for _ in 0 .. len { + received_external_outputs.push(OutputFor::::read(&mut data).unwrap()); + } + received_external_outputs + }; + + let forwards = { + let mut len = [0; 4]; + data.read_exact(&mut len).unwrap(); + let len = usize::try_from(u32::from_le_bytes(len)).unwrap(); + + let mut forwards = Vec::with_capacity(len); + for _ in 0 .. len { + forwards.push(OutputFor::::read(&mut data).unwrap()); + } + forwards + }; + + let returns = { + let mut len = [0; 4]; + data.read_exact(&mut len).unwrap(); + let len = usize::try_from(u32::from_le_bytes(len)).unwrap(); + + let mut returns = Vec::with_capacity(len); + for _ in 0 .. len { + returns.push(Return::::read(&mut data).unwrap()); + } + returns + }; + + ReceiverScanData { block_number, received_external_outputs, forwards, returns } } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 7c6466ff5..927fc145d 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -1,5 +1,5 @@ use core::{marker::PhantomData, fmt::Debug}; -use std::collections::HashMap; +use std::{io, collections::HashMap}; use group::GroupEncoding; @@ -179,6 +179,19 @@ pub struct Return { output: OutputFor, } +impl Return { + pub(crate) fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.address.write(writer)?; + self.output.write(writer) + } + + pub(crate) fn read(reader: &mut impl io::Read) -> io::Result { + let address = AddressFor::::read(reader)?; + let output = OutputFor::::read(reader)?; + Ok(Return { address, output }) + } +} + /// An update for the scheduler. pub struct SchedulerUpdate { outputs: Vec>, From 7283a53b09d3199708b3631c750d06ec22771135 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 30 Aug 2024 00:11:31 -0400 Subject: [PATCH 053/179] Remove unused ID -> number lookup --- processor/scanner/src/index/db.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/processor/scanner/src/index/db.rs b/processor/scanner/src/index/db.rs index a46d6fa64..9254f9bcb 100644 --- a/processor/scanner/src/index/db.rs +++ b/processor/scanner/src/index/db.rs @@ -4,8 +4,6 @@ create_db!( ScannerIndex { // A lookup of a block's number to its ID BlockId: (number: u64) -> [u8; 32], - // A lookup of a block's ID to its number - BlockNumber: (id: [u8; 32]) -> u64, // The latest finalized block to appear on the blockchain LatestFinalizedBlock: () -> u64, @@ -16,14 +14,10 @@ pub(crate) struct IndexDb; impl IndexDb { pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: [u8; 32]) { BlockId::set(txn, number, &id); - BlockNumber::set(txn, id, &number); } pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option<[u8; 32]> { BlockId::get(getter, number) } - pub(crate) fn block_number(getter: &impl Get, id: [u8; 32]) -> Option { - BlockNumber::get(getter, id) - } pub(crate) fn set_latest_finalized_block(txn: &mut impl DbTxn, latest_finalized_block: u64) { LatestFinalizedBlock::set(txn, &latest_finalized_block); From d5434b0848aa37f41ecde8e0490e893de0a2d682 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 30 Aug 2024 00:20:34 -0400 Subject: [PATCH 054/179] Route the DB w.r.t. forwarded outputs' information --- processor/scanner/src/db.rs | 47 +++++++++++------------- processor/scanner/src/eventuality/mod.rs | 1 + 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 698bf5468..cc86afebb 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -101,6 +101,8 @@ create_db!( */ // This collapses from `bool` to `()`, using if the value was set for true and false otherwise NotableBlock: (number: u64) -> (), + + SerializedForwardedOutput: (id: &[u8]) -> Vec, } ); @@ -267,7 +269,15 @@ impl ScannerGlobalDb { getter: &impl Get, output: & as ReceivedOutput, AddressFor>>::Id, ) -> Option<(Option>, InInstructionWithBalance)> { - todo!("TODO") + let buf = SerializedForwardedOutput::get(getter, output.as_ref())?; + let mut buf = buf.as_slice(); + + let mut opt = [0xff]; + buf.read_exact(&mut opt).unwrap(); + assert!((opt[0] == 0) || (opt[0] == 1)); + + let address = (opt[0] == 1).then(|| AddressFor::::read(&mut buf).unwrap()); + Some((address, InInstructionWithBalance::decode(&mut IoReader(buf)).unwrap())) } } @@ -321,32 +331,19 @@ impl ScanToEventualityDb { NotableBlock::set(txn, block_number, &()); } - /* - TODO - - SerializedForwardedOutputsIndex: (block_number: u64) -> Vec, - SerializedForwardedOutput: (output_id: &[u8]) -> Vec, - - pub(crate) fn save_output_being_forwarded( - txn: &mut impl DbTxn, - block_forwarded_from: u64, - output: &OutputWithInInstruction, - ) { - let mut buf = Vec::with_capacity(128); - output.write(&mut buf).unwrap(); - - let id = output.output.id(); - - // Save this to an index so we can later fetch all outputs to forward - let mut forwarded_outputs = SerializedForwardedOutputsIndex::get(txn, block_forwarded_from) - .unwrap_or(Vec::with_capacity(32)); - forwarded_outputs.extend(id.as_ref()); - SerializedForwardedOutputsIndex::set(txn, block_forwarded_from, &forwarded_outputs); + // Save all the forwarded outputs' data + for forward in &data.forwards { + let mut buf = vec![]; + if let Some(address) = &forward.return_address { + buf.write_all(&[1]).unwrap(); + address.write(&mut buf).unwrap(); + } else { + buf.write_all(&[0]).unwrap(); + } + forward.in_instruction.encode_to(&mut buf); - // Save the output itself - SerializedForwardedOutput::set(txn, id.as_ref(), &buf); + SerializedForwardedOutput::set(txn, forward.output.id().as_ref(), &buf); } - */ let mut buf = vec![]; buf.write_all(&data.block_number.to_le_bytes()).unwrap(); diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 9068769be..3be7f3ce9 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -116,6 +116,7 @@ impl> EventualityTask { Self { db, feed, scheduler } } + #[allow(clippy::type_complexity)] fn keys_and_keys_with_stages( &self, block_number: u64, From 41b5c9b7a7ff5efced90007b0b1a6f6f5c0fbb31 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 30 Aug 2024 01:19:29 -0400 Subject: [PATCH 055/179] Have acknowledge_block take in the results of the InInstructions executed If any failed, the scanner now creates a Burn for the return. --- processor/primitives/src/output.rs | 2 +- processor/scanner/src/db.rs | 59 +++++++++++++++++++++++----- processor/scanner/src/lib.rs | 43 ++++++++++++++++++-- processor/scanner/src/report/db.rs | 61 ++++++++++++++++++++++++++++- processor/scanner/src/report/mod.rs | 51 ++++++++++++++++++------ processor/scanner/src/scan/mod.rs | 18 +++++++-- 6 files changed, 203 insertions(+), 31 deletions(-) diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs index 777b2c524..9a3009407 100644 --- a/processor/primitives/src/output.rs +++ b/processor/primitives/src/output.rs @@ -8,7 +8,7 @@ use serai_primitives::{ExternalAddress, Balance}; use crate::Id; /// An address on the external network. -pub trait Address: Send + Sync + TryFrom { +pub trait Address: Send + Sync + Into + TryFrom { /// Write this address. fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; /// Read an address. diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index cc86afebb..f45d29664 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -47,11 +47,7 @@ impl OutputWithInInstruction { let mut opt = [0xff]; reader.read_exact(&mut opt)?; assert!((opt[0] == 0) || (opt[0] == 1)); - if opt[0] == 0 { - None - } else { - Some(AddressFor::::read(reader)?) - } + (opt[0] == 1).then(|| AddressFor::::read(reader)).transpose()? }; let in_instruction = InInstructionWithBalance::decode(&mut IoReader(reader)).map_err(io::Error::other)?; @@ -422,10 +418,39 @@ impl ScanToEventualityDb { } } +pub(crate) struct Returnable { + pub(crate) return_address: Option>, + pub(crate) in_instruction: InInstructionWithBalance, +} + +impl Returnable { + fn read(reader: &mut impl io::Read) -> io::Result { + let mut opt = [0xff]; + reader.read_exact(&mut opt).unwrap(); + assert!((opt[0] == 0) || (opt[0] == 1)); + + let return_address = (opt[0] == 1).then(|| AddressFor::::read(reader)).transpose()?; + + let in_instruction = + InInstructionWithBalance::decode(&mut IoReader(reader)).map_err(io::Error::other)?; + Ok(Returnable { return_address, in_instruction }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + if let Some(return_address) = &self.return_address { + writer.write_all(&[1])?; + return_address.write(writer)?; + } else { + writer.write_all(&[0])?; + } + self.in_instruction.encode_to(writer); + Ok(()) + } +} + #[derive(BorshSerialize, BorshDeserialize)] struct BlockBoundInInstructions { block_number: u64, - in_instructions: Vec, + returnable_in_instructions: Vec, } db_channel! { @@ -439,22 +464,36 @@ impl ScanToReportDb { pub(crate) fn send_in_instructions( txn: &mut impl DbTxn, block_number: u64, - in_instructions: Vec, + returnable_in_instructions: &[Returnable], ) { - InInstructions::send(txn, (), &BlockBoundInInstructions { block_number, in_instructions }); + let mut buf = vec![]; + for returnable_in_instruction in returnable_in_instructions { + returnable_in_instruction.write(&mut buf).unwrap(); + } + InInstructions::send( + txn, + (), + &BlockBoundInInstructions { block_number, returnable_in_instructions: buf }, + ); } pub(crate) fn recv_in_instructions( txn: &mut impl DbTxn, block_number: u64, - ) -> Vec { + ) -> Vec> { let data = InInstructions::try_recv(txn, ()) .expect("receiving InInstructions for a scanned block not yet sent"); assert_eq!( block_number, data.block_number, "received InInstructions for a scanned block distinct than expected" ); - data.in_instructions + let mut buf = data.returnable_in_instructions.as_slice(); + + let mut returnable_in_instructions = vec![]; + while !buf.is_empty() { + returnable_in_instructions.push(Returnable::read(&mut buf).unwrap()); + } + returnable_in_instructions } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 927fc145d..93ed961db 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -7,7 +7,7 @@ use serai_db::{Get, DbTxn, Db}; use serai_primitives::{NetworkId, Coin, Amount}; use serai_in_instructions_primitives::Batch; -use serai_coins_primitives::OutInstructionWithBalance; +use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance}; use primitives::{task::*, Address, ReceivedOutput, Block}; @@ -327,6 +327,8 @@ impl Scanner { &mut self, mut txn: impl DbTxn, block_number: u64, + batch_id: u32, + in_instruction_succeededs: Vec, key_to_activate: Option>, ) { log::info!("acknowledging block {block_number}"); @@ -338,8 +340,12 @@ impl Scanner { if let Some(prior_highest_acknowledged_block) = ScannerGlobalDb::::highest_acknowledged_block(&txn) { - assert!(block_number > prior_highest_acknowledged_block, "acknowledging blocks out-of-order"); - for b in (prior_highest_acknowledged_block + 1) .. (block_number - 1) { + // If a single block produced multiple Batches, the block number won't increment + assert!( + block_number >= prior_highest_acknowledged_block, + "acknowledging blocks out-of-order" + ); + for b in (prior_highest_acknowledged_block + 1) .. block_number { assert!( !ScannerGlobalDb::::is_block_notable(&txn, b), "skipped acknowledging a block which was notable" @@ -352,6 +358,37 @@ impl Scanner { ScannerGlobalDb::::queue_key(&mut txn, block_number + S::WINDOW_LENGTH, key_to_activate); } + // Return the balances for any InInstructions which failed to execute + { + let return_information = report::take_return_information::(&mut txn, batch_id) + .expect("didn't save the return information for Batch we published"); + assert_eq!( + in_instruction_succeededs.len(), + return_information.len(), + "amount of InInstruction succeededs differed from amount of return information saved" + ); + + // We map these into standard Burns + let mut returns = vec![]; + for (succeeded, return_information) in + in_instruction_succeededs.into_iter().zip(return_information) + { + if succeeded { + continue; + } + + if let Some(report::ReturnInformation { address, balance }) = return_information { + returns.push(OutInstructionWithBalance { + instruction: OutInstruction { address: address.into(), data: None }, + balance, + }); + } + } + // We send them as stemming from this block + // TODO: These should be handled with any Burns from this block + SubstrateToEventualityDb::send_burns(&mut txn, block_number, &returns); + } + // Commit the txn txn.commit(); // Run the Eventuality task since we've advanced it diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs index 2fd98d4be..4c96a360f 100644 --- a/processor/scanner/src/report/db.rs +++ b/processor/scanner/src/report/db.rs @@ -1,16 +1,34 @@ +use core::marker::PhantomData; +use std::io::{Read, Write}; + +use scale::{Encode, Decode, IoReader}; use serai_db::{Get, DbTxn, create_db}; +use serai_primitives::Balance; + +use primitives::Address; + +use crate::{ScannerFeed, AddressFor}; + create_db!( ScannerReport { // The next block to potentially report NextToPotentiallyReportBlock: () -> u64, // The next Batch ID to use NextBatchId: () -> u32, + + // The return addresses for the InInstructions within a Batch + SerializedReturnAddresses: (batch: u32) -> Vec, } ); -pub(crate) struct ReportDb; -impl ReportDb { +pub(crate) struct ReturnInformation { + pub(crate) address: AddressFor, + pub(crate) balance: Balance, +} + +pub(crate) struct ReportDb(PhantomData); +impl ReportDb { pub(crate) fn set_next_to_potentially_report_block( txn: &mut impl DbTxn, next_to_potentially_report_block: u64, @@ -26,4 +44,43 @@ impl ReportDb { NextBatchId::set(txn, &(id + 1)); id } + + pub(crate) fn save_return_information( + txn: &mut impl DbTxn, + id: u32, + return_information: &Vec>>, + ) { + let mut buf = Vec::with_capacity(return_information.len() * (32 + 1 + 8)); + for return_information in return_information { + if let Some(ReturnInformation { address, balance }) = return_information { + buf.write_all(&[1]).unwrap(); + address.write(&mut buf).unwrap(); + balance.encode_to(&mut buf); + } else { + buf.write_all(&[0]).unwrap(); + } + } + SerializedReturnAddresses::set(txn, id, &buf); + } + pub(crate) fn take_return_information( + txn: &mut impl DbTxn, + id: u32, + ) -> Option>>> { + let buf = SerializedReturnAddresses::get(txn, id)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / (32 + 1 + 8)); + while !buf.is_empty() { + let mut opt = [0xff]; + buf.read_exact(&mut opt).unwrap(); + assert!((opt[0] == 0) || (opt[0] == 1)); + + res.push((opt[0] == 1).then(|| { + let address = AddressFor::::read(&mut buf).unwrap(); + let balance = Balance::decode(&mut IoReader(&mut buf)).unwrap(); + ReturnInformation { address, balance } + })); + } + Some(res) + } } diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs index b789ea58b..8ac2c06b4 100644 --- a/processor/scanner/src/report/mod.rs +++ b/processor/scanner/src/report/mod.rs @@ -8,15 +8,23 @@ use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; use primitives::task::ContinuallyRan; use crate::{ - db::{ScannerGlobalDb, ScanToReportDb}, + db::{Returnable, ScannerGlobalDb, ScanToReportDb}, index, scan::next_to_scan_for_outputs_block, ScannerFeed, BatchPublisher, }; mod db; +pub(crate) use db::ReturnInformation; use db::ReportDb; +pub(crate) fn take_return_information( + txn: &mut impl DbTxn, + id: u32, +) -> Option>>> { + ReportDb::::take_return_information(txn, id) +} + /* This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion. @@ -33,10 +41,10 @@ pub(crate) struct ReportTask { impl ReportTask { pub(crate) fn new(mut db: D, batch_publisher: B, start_block: u64) -> Self { - if ReportDb::next_to_potentially_report_block(&db).is_none() { + if ReportDb::::next_to_potentially_report_block(&db).is_none() { // Initialize the DB let mut txn = db.txn(); - ReportDb::set_next_to_potentially_report_block(&mut txn, start_block); + ReportDb::::set_next_to_potentially_report_block(&mut txn, start_block); txn.commit(); } @@ -64,7 +72,7 @@ impl ContinuallyRan for ReportTask::next_to_potentially_report_block(&self.db) .expect("ReportTask run before writing the start block"); for b in next_to_potentially_report ..= highest_reportable { @@ -81,32 +89,53 @@ impl ContinuallyRan for ReportTask::acquire_batch_id(&mut txn); // start with empty batch let mut batches = vec![Batch { network, id: batch_id, block: BlockHash(block_hash), instructions: vec![] }]; + // We also track the return information for the InInstructions within a Batch in case they + // error + let mut return_information = vec![vec![]]; + + for Returnable { return_address, in_instruction } in in_instructions { + let balance = in_instruction.balance; - for instruction in in_instructions { let batch = batches.last_mut().unwrap(); - batch.instructions.push(instruction); + batch.instructions.push(in_instruction); // check if batch is over-size if batch.encode().len() > MAX_BATCH_SIZE { // pop the last instruction so it's back in size - let instruction = batch.instructions.pop().unwrap(); + let in_instruction = batch.instructions.pop().unwrap(); // bump the id for the new batch - batch_id = ReportDb::acquire_batch_id(&mut txn); + batch_id = ReportDb::::acquire_batch_id(&mut txn); // make a new batch with this instruction included batches.push(Batch { network, id: batch_id, block: BlockHash(block_hash), - instructions: vec![instruction], + instructions: vec![in_instruction], }); + // Since we're allocating a new batch, allocate a new set of return addresses for it + return_information.push(vec![]); } + + // For the set of return addresses for the InInstructions for the batch we just pushed + // onto, push this InInstruction's return addresses + return_information + .last_mut() + .unwrap() + .push(return_address.map(|address| ReturnInformation { address, balance })); + } + + // Save the return addresses to the databse + assert_eq!(batches.len(), return_information.len()); + for (batch, return_information) in batches.iter().zip(&return_information) { + assert_eq!(batch.instructions.len(), return_information.len()); + ReportDb::::save_return_information(&mut txn, batch.id, return_information); } for batch in batches { @@ -119,7 +148,7 @@ impl ContinuallyRan for ReportTask::set_next_to_potentially_report_block(&mut txn, b + 1); txn.commit(); } diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index 405861ba1..4d6ca16e8 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -13,7 +13,8 @@ use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Block}; use crate::{ lifetime::LifetimeStage, db::{ - OutputWithInInstruction, SenderScanData, ScannerGlobalDb, ScanToReportDb, ScanToEventualityDb, + OutputWithInInstruction, Returnable, SenderScanData, ScannerGlobalDb, ScanToReportDb, + ScanToEventualityDb, }, BlockExt, ScannerFeed, AddressFor, OutputFor, Return, sort_outputs, eventuality::latest_scannable_block, @@ -149,7 +150,13 @@ impl ContinuallyRan for ScanTask { queued_outputs }; for queued_output in queued_outputs { - in_instructions.push((queued_output.output.id(), queued_output.in_instruction)); + in_instructions.push(( + queued_output.output.id(), + Returnable { + return_address: queued_output.return_address, + in_instruction: queued_output.in_instruction, + }, + )); scan_data.received_external_outputs.push(queued_output.output); } @@ -302,7 +309,10 @@ impl ContinuallyRan for ScanTask { in_instructions.push(( output_with_in_instruction.output.id(), - output_with_in_instruction.in_instruction, + Returnable { + return_address: output_with_in_instruction.return_address, + in_instruction: output_with_in_instruction.in_instruction, + }, )); scan_data.received_external_outputs.push(output_with_in_instruction.output); } @@ -329,7 +339,7 @@ impl ContinuallyRan for ScanTask { let in_instructions = in_instructions.into_iter().map(|(_id, in_instruction)| in_instruction).collect::>(); // Send the InInstructions to the report task - ScanToReportDb::::send_in_instructions(&mut txn, b, in_instructions); + ScanToReportDb::::send_in_instructions(&mut txn, b, &in_instructions); // Send the scan data to the eventuality task ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); From 5b1ccaf7459191e40831ec2321a5f602919668d1 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 30 Aug 2024 01:33:40 -0400 Subject: [PATCH 056/179] Replace acknowledge_block with acknowledge_batch --- processor/scanner/src/lib.rs | 44 +++++++++++++++++++++-------- processor/scanner/src/report/db.rs | 13 ++++++++- processor/scanner/src/report/mod.rs | 11 ++++++-- 3 files changed, 53 insertions(+), 15 deletions(-) diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 93ed961db..f92002d6a 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -317,21 +317,33 @@ impl Scanner { Self { eventuality_handle, _S: PhantomData } } - /// Acknowledge a block. + /// Acknowledge a Batch having been published on Serai. /// - /// This means this block was ordered on Serai in relation to `Burn` events, and all validators - /// have achieved synchrony on it. + /// This means the specified Batch was ordered on Serai in relation to Burn events, and all + /// validators have achieved synchrony on it. + /// + /// `in_instruction_succeededs` is the result of executing each InInstruction within this batch, + /// true if it succeeded and false if it did not (and did not cause any state changes on Serai). + /// + /// `burns` is a list of Burns to queue with the acknowledgement of this Batch for efficiency's + /// sake. Any Burns passed here MUST NOT be passed into any other call of `acknowledge_batch` nor + /// `queue_burns`. Doing so will cause them to be executed multiple times. /// /// The calls to this function must be ordered with regards to `queue_burns`. - pub fn acknowledge_block( + pub fn acknowledge_batch( &mut self, mut txn: impl DbTxn, - block_number: u64, batch_id: u32, in_instruction_succeededs: Vec, + mut burns: Vec, key_to_activate: Option>, ) { - log::info!("acknowledging block {block_number}"); + log::info!("acknowledging batch {batch_id}"); + + // TODO: We need to take all of these arguments and send them to a task + // Then, when we do have this block number, we need to execute this function + let block_number = report::take_block_number_for_batch::(&mut txn, batch_id) + .expect("didn't have the block number for a Batch"); assert!( ScannerGlobalDb::::is_block_notable(&txn, block_number), @@ -369,7 +381,6 @@ impl Scanner { ); // We map these into standard Burns - let mut returns = vec![]; for (succeeded, return_information) in in_instruction_succeededs.into_iter().zip(return_information) { @@ -378,15 +389,18 @@ impl Scanner { } if let Some(report::ReturnInformation { address, balance }) = return_information { - returns.push(OutInstructionWithBalance { + burns.push(OutInstructionWithBalance { instruction: OutInstruction { address: address.into(), data: None }, balance, }); } } - // We send them as stemming from this block - // TODO: These should be handled with any Burns from this block - SubstrateToEventualityDb::send_burns(&mut txn, block_number, &returns); + } + + if !burns.is_empty() { + // We send these Burns as stemming from this block we just acknowledged + // This causes them to be acted on after we accumulate the outputs from this block + SubstrateToEventualityDb::send_burns(&mut txn, block_number, &burns); } // Commit the txn @@ -402,7 +416,9 @@ impl Scanner { /// The scanner only updates the scheduler with new outputs upon acknowledging a block. The /// ability to fulfill Burns, and therefore their order, is dependent on the current output /// state. This immediately sets a bound that this function is ordered with regards to - /// `acknowledge_block`. + /// `acknowledge_batch`. + /// + /// The Burns specified here MUST NOT also be passed to `acknowledge_batch`. /* The fact Burns can be queued during any Substrate block is problematic. The scanner is allowed to scan anything within the window set by the Eventuality task. The Eventuality task is allowed @@ -427,6 +443,10 @@ impl Scanner { unnecessary). */ pub fn queue_burns(&mut self, txn: &mut impl DbTxn, burns: &Vec) { + if burns.is_empty() { + return; + } + let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(txn) .expect("queueing Burns yet never acknowledged a block"); diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs index 4c96a360f..baff66354 100644 --- a/processor/scanner/src/report/db.rs +++ b/processor/scanner/src/report/db.rs @@ -17,6 +17,9 @@ create_db!( // The next Batch ID to use NextBatchId: () -> u32, + // The block number which caused a batch + BlockNumberForBatch: (batch: u32) -> u64, + // The return addresses for the InInstructions within a Batch SerializedReturnAddresses: (batch: u32) -> Vec, } @@ -39,12 +42,19 @@ impl ReportDb { NextToPotentiallyReportBlock::get(getter) } - pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 { + pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn, block_number: u64) -> u32 { let id = NextBatchId::get(txn).unwrap_or(0); NextBatchId::set(txn, &(id + 1)); + BlockNumberForBatch::set(txn, id, &block_number); id } + pub(crate) fn take_block_number_for_batch(txn: &mut impl DbTxn, id: u32) -> Option { + let block_number = BlockNumberForBatch::get(txn, id)?; + BlockNumberForBatch::del(txn, id); + Some(block_number) + } + pub(crate) fn save_return_information( txn: &mut impl DbTxn, id: u32, @@ -67,6 +77,7 @@ impl ReportDb { id: u32, ) -> Option>>> { let buf = SerializedReturnAddresses::get(txn, id)?; + SerializedReturnAddresses::del(txn, id); let mut buf = buf.as_slice(); let mut res = Vec::with_capacity(buf.len() / (32 + 1 + 8)); diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs index 8ac2c06b4..ba851713b 100644 --- a/processor/scanner/src/report/mod.rs +++ b/processor/scanner/src/report/mod.rs @@ -25,6 +25,13 @@ pub(crate) fn take_return_information( ReportDb::::take_return_information(txn, id) } +pub(crate) fn take_block_number_for_batch( + txn: &mut impl DbTxn, + id: u32, +) -> Option { + ReportDb::::take_block_number_for_batch(txn, id) +} + /* This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion. @@ -89,7 +96,7 @@ impl ContinuallyRan for ReportTask::acquire_batch_id(&mut txn); + let mut batch_id = ReportDb::::acquire_batch_id(&mut txn, b); // start with empty batch let mut batches = @@ -110,7 +117,7 @@ impl ContinuallyRan for ReportTask::acquire_batch_id(&mut txn); + batch_id = ReportDb::::acquire_batch_id(&mut txn, b); // make a new batch with this instruction included batches.push(Batch { From 68c46a6e8fabaf9594ea5d06d1ad4dbb62079471 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 30 Aug 2024 02:27:22 -0400 Subject: [PATCH 057/179] Don't have `acknowledge_batch` immediately run `acknowledge_batch` can only be run if we know what the Batch should be. If we don't know what the Batch should be, we have to block until we do. Specifically, we need the block number associated with the Batch. Instead of blocking over the Scanner API, the Scanner API now solely queues actions. A new task intakes those actions once we can. This ensures we can intake the entire Substrate chain, even if our daemon for the external network is stalled at its genesis block. All of this for the block number alone seems ridiculous. To go from the block hash in the Batch to the block number without this task, we'd at least need the index task to be up to date (still requiring blocking or an API returning ephemeral errors). --- processor/scanner/src/lib.rs | 111 +++++------------ processor/scanner/src/substrate/db.rs | 89 ++++++++++++++ processor/scanner/src/substrate/mod.rs | 162 +++++++++++++++++++++++++ 3 files changed, 282 insertions(+), 80 deletions(-) create mode 100644 processor/scanner/src/substrate/db.rs create mode 100644 processor/scanner/src/substrate/mod.rs diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index f92002d6a..53bb9030d 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -7,7 +7,7 @@ use serai_db::{Get, DbTxn, Db}; use serai_primitives::{NetworkId, Coin, Amount}; use serai_in_instructions_primitives::Batch; -use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance}; +use serai_coins_primitives::OutInstructionWithBalance; use primitives::{task::*, Address, ReceivedOutput, Block}; @@ -17,15 +17,16 @@ pub use lifetime::LifetimeStage; // Database schema definition and associated functions. mod db; -use db::{ScannerGlobalDb, SubstrateToEventualityDb}; // Task to index the blockchain, ensuring we don't reorganize finalized blocks. mod index; // Scans blocks for received coins. mod scan; +/// Task which reports Batches to Substrate. +mod report; +/// Task which handles events from Substrate once we can. +mod substrate; /// Check blocks for transactions expected to eventually occur. mod eventuality; -/// Task which reports `Batch`s to Substrate. -mod report; pub(crate) fn sort_outputs>( a: &O, @@ -280,7 +281,7 @@ pub trait Scheduler: 'static + Send { /// A representation of a scanner. #[allow(non_snake_case)] pub struct Scanner { - eventuality_handle: RunNowHandle, + substrate_handle: RunNowHandle, _S: PhantomData, } impl Scanner { @@ -297,24 +298,29 @@ impl Scanner { let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); let report_task = report::ReportTask::<_, S, _>::new(db.clone(), batch_publisher, start_block); + let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone()); let eventuality_task = eventuality::EventualityTask::new(db, feed, scheduler, start_block); let (_index_handle, index_run) = RunNowHandle::new(); let (scan_handle, scan_run) = RunNowHandle::new(); let (report_handle, report_run) = RunNowHandle::new(); + let (substrate_handle, substrate_run) = RunNowHandle::new(); let (eventuality_handle, eventuality_run) = RunNowHandle::new(); // Upon indexing a new block, scan it tokio::spawn(index_task.continually_run(index_run, vec![scan_handle.clone()])); // Upon scanning a block, report it tokio::spawn(scan_task.continually_run(scan_run, vec![report_handle])); - // Upon reporting a block, we do nothing + // Upon reporting a block, we do nothing (as the burden is on Substrate which won't be + // immediately ready) tokio::spawn(report_task.continually_run(report_run, vec![])); + // Upon handling an event from Substrate, we run the Eventuality task (as it's what's affected) + tokio::spawn(substrate_task.continually_run(substrate_run, vec![eventuality_handle])); // Upon handling the Eventualities in a block, we run the scan task as we've advanced the // window its allowed to scan tokio::spawn(eventuality_task.continually_run(eventuality_run, vec![scan_handle])); - Self { eventuality_handle, _S: PhantomData } + Self { substrate_handle, _S: PhantomData } } /// Acknowledge a Batch having been published on Serai. @@ -335,80 +341,23 @@ impl Scanner { mut txn: impl DbTxn, batch_id: u32, in_instruction_succeededs: Vec, - mut burns: Vec, + burns: Vec, key_to_activate: Option>, ) { log::info!("acknowledging batch {batch_id}"); - // TODO: We need to take all of these arguments and send them to a task - // Then, when we do have this block number, we need to execute this function - let block_number = report::take_block_number_for_batch::(&mut txn, batch_id) - .expect("didn't have the block number for a Batch"); - - assert!( - ScannerGlobalDb::::is_block_notable(&txn, block_number), - "acknowledging a block which wasn't notable" + // Queue acknowledging this block via the Substrate task + substrate::queue_acknowledge_batch::( + &mut txn, + batch_id, + in_instruction_succeededs, + burns, + key_to_activate, ); - if let Some(prior_highest_acknowledged_block) = - ScannerGlobalDb::::highest_acknowledged_block(&txn) - { - // If a single block produced multiple Batches, the block number won't increment - assert!( - block_number >= prior_highest_acknowledged_block, - "acknowledging blocks out-of-order" - ); - for b in (prior_highest_acknowledged_block + 1) .. block_number { - assert!( - !ScannerGlobalDb::::is_block_notable(&txn, b), - "skipped acknowledging a block which was notable" - ); - } - } - - ScannerGlobalDb::::set_highest_acknowledged_block(&mut txn, block_number); - if let Some(key_to_activate) = key_to_activate { - ScannerGlobalDb::::queue_key(&mut txn, block_number + S::WINDOW_LENGTH, key_to_activate); - } - - // Return the balances for any InInstructions which failed to execute - { - let return_information = report::take_return_information::(&mut txn, batch_id) - .expect("didn't save the return information for Batch we published"); - assert_eq!( - in_instruction_succeededs.len(), - return_information.len(), - "amount of InInstruction succeededs differed from amount of return information saved" - ); - - // We map these into standard Burns - for (succeeded, return_information) in - in_instruction_succeededs.into_iter().zip(return_information) - { - if succeeded { - continue; - } - - if let Some(report::ReturnInformation { address, balance }) = return_information { - burns.push(OutInstructionWithBalance { - instruction: OutInstruction { address: address.into(), data: None }, - balance, - }); - } - } - } - - if !burns.is_empty() { - // We send these Burns as stemming from this block we just acknowledged - // This causes them to be acted on after we accumulate the outputs from this block - SubstrateToEventualityDb::send_burns(&mut txn, block_number, &burns); - } - - // Commit the txn + // Commit this txn so this data is flushed txn.commit(); - // Run the Eventuality task since we've advanced it - // We couldn't successfully do this if that txn was still floating around, uncommitted - // The execution of this task won't actually have more work until the txn is committed - self.eventuality_handle.run_now(); + // Then run the Substrate task + self.substrate_handle.run_now(); } /// Queue Burns. @@ -442,14 +391,16 @@ impl Scanner { latency and likely practically require we add regularly scheduled notable blocks (which may be unnecessary). */ - pub fn queue_burns(&mut self, txn: &mut impl DbTxn, burns: &Vec) { + pub fn queue_burns(&mut self, mut txn: impl DbTxn, burns: Vec) { if burns.is_empty() { return; } - let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(txn) - .expect("queueing Burns yet never acknowledged a block"); - - SubstrateToEventualityDb::send_burns(txn, queue_as_of, burns) + // Queue queueing these burns via the Substrate task + substrate::queue_queue_burns::(&mut txn, burns); + // Commit this txn so this data is flushed + txn.commit(); + // Then run the Substrate task + self.substrate_handle.run_now(); } } diff --git a/processor/scanner/src/substrate/db.rs b/processor/scanner/src/substrate/db.rs new file mode 100644 index 000000000..697897c25 --- /dev/null +++ b/processor/scanner/src/substrate/db.rs @@ -0,0 +1,89 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use serai_coins_primitives::OutInstructionWithBalance; + +use crate::{ScannerFeed, KeyFor}; + +#[derive(BorshSerialize, BorshDeserialize)] +struct AcknowledgeBatchEncodable { + batch_id: u32, + in_instruction_succeededs: Vec, + burns: Vec, + key_to_activate: Option>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +enum ActionEncodable { + AcknowledgeBatch(AcknowledgeBatchEncodable), + QueueBurns(Vec), +} + +pub(crate) struct AcknowledgeBatch { + pub(crate) batch_id: u32, + pub(crate) in_instruction_succeededs: Vec, + pub(crate) burns: Vec, + pub(crate) key_to_activate: Option>, +} + +pub(crate) enum Action { + AcknowledgeBatch(AcknowledgeBatch), + QueueBurns(Vec), +} + +db_channel!( + ScannerSubstrate { + Actions: (empty_key: ()) -> ActionEncodable, + } +); + +pub(crate) struct SubstrateDb(PhantomData); +impl SubstrateDb { + pub(crate) fn queue_acknowledge_batch( + txn: &mut impl DbTxn, + batch_id: u32, + in_instruction_succeededs: Vec, + burns: Vec, + key_to_activate: Option>, + ) { + Actions::send( + txn, + (), + &ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { + batch_id, + in_instruction_succeededs, + burns, + key_to_activate: key_to_activate.map(|key| key.to_bytes().as_ref().to_vec()), + }), + ); + } + pub(crate) fn queue_queue_burns(txn: &mut impl DbTxn, burns: Vec) { + Actions::send(txn, (), &ActionEncodable::QueueBurns(burns)); + } + + pub(crate) fn next_action(txn: &mut impl DbTxn) -> Option> { + let action_encodable = Actions::try_recv(txn, ())?; + Some(match action_encodable { + ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { + batch_id, + in_instruction_succeededs, + burns, + key_to_activate, + }) => Action::AcknowledgeBatch(AcknowledgeBatch { + batch_id, + in_instruction_succeededs, + burns, + key_to_activate: key_to_activate.map(|key| { + let mut repr = as GroupEncoding>::Repr::default(); + repr.as_mut().copy_from_slice(&key); + KeyFor::::from_bytes(&repr).unwrap() + }), + }), + ActionEncodable::QueueBurns(burns) => Action::QueueBurns(burns), + }) + } +} diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs new file mode 100644 index 000000000..4feb85d58 --- /dev/null +++ b/processor/scanner/src/substrate/mod.rs @@ -0,0 +1,162 @@ +use core::marker::PhantomData; + +use serai_db::{DbTxn, Db}; + +use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance}; + +use primitives::task::ContinuallyRan; +use crate::{ + db::{ScannerGlobalDb, SubstrateToEventualityDb}, + report, ScannerFeed, KeyFor, +}; + +mod db; +use db::*; + +pub(crate) fn queue_acknowledge_batch( + txn: &mut impl DbTxn, + batch_id: u32, + in_instruction_succeededs: Vec, + burns: Vec, + key_to_activate: Option>, +) { + SubstrateDb::::queue_acknowledge_batch( + txn, + batch_id, + in_instruction_succeededs, + burns, + key_to_activate, + ) +} +pub(crate) fn queue_queue_burns( + txn: &mut impl DbTxn, + burns: Vec, +) { + SubstrateDb::::queue_queue_burns(txn, burns) +} + +/* + When Serai acknowledges a Batch, we can only handle it once we've scanned the chain and generated + the same Batch ourselves. This takes the `acknowledge_batch`, `queue_burns` arguments and sits on + them until we're able to process them. +*/ +#[allow(non_snake_case)] +pub(crate) struct SubstrateTask { + db: D, + _S: PhantomData, +} + +impl SubstrateTask { + pub(crate) fn new(db: D) -> Self { + Self { db, _S: PhantomData } + } +} + +#[async_trait::async_trait] +impl ContinuallyRan for SubstrateTask { + async fn run_iteration(&mut self) -> Result { + let mut made_progress = false; + loop { + // Fetch the next action to handle + let mut txn = self.db.txn(); + let Some(action) = SubstrateDb::::next_action(&mut txn) else { + drop(txn); + return Ok(made_progress); + }; + + match action { + Action::AcknowledgeBatch(AcknowledgeBatch { + batch_id, + in_instruction_succeededs, + mut burns, + key_to_activate, + }) => { + // Check if we have the information for this batch + let Some(block_number) = report::take_block_number_for_batch::(&mut txn, batch_id) + else { + // If we don't, drop this txn (restoring the action to the database) + drop(txn); + return Ok(made_progress); + }; + + // Mark we made progress and handle this + made_progress = true; + + assert!( + ScannerGlobalDb::::is_block_notable(&txn, block_number), + "acknowledging a block which wasn't notable" + ); + if let Some(prior_highest_acknowledged_block) = + ScannerGlobalDb::::highest_acknowledged_block(&txn) + { + // If a single block produced multiple Batches, the block number won't increment + assert!( + block_number >= prior_highest_acknowledged_block, + "acknowledging blocks out-of-order" + ); + for b in (prior_highest_acknowledged_block + 1) .. block_number { + assert!( + !ScannerGlobalDb::::is_block_notable(&txn, b), + "skipped acknowledging a block which was notable" + ); + } + } + + ScannerGlobalDb::::set_highest_acknowledged_block(&mut txn, block_number); + if let Some(key_to_activate) = key_to_activate { + ScannerGlobalDb::::queue_key( + &mut txn, + block_number + S::WINDOW_LENGTH, + key_to_activate, + ); + } + + // Return the balances for any InInstructions which failed to execute + { + let return_information = report::take_return_information::(&mut txn, batch_id) + .expect("didn't save the return information for Batch we published"); + assert_eq!( + in_instruction_succeededs.len(), + return_information.len(), + "amount of InInstruction succeededs differed from amount of return information saved" + ); + + // We map these into standard Burns + for (succeeded, return_information) in + in_instruction_succeededs.into_iter().zip(return_information) + { + if succeeded { + continue; + } + + if let Some(report::ReturnInformation { address, balance }) = return_information { + burns.push(OutInstructionWithBalance { + instruction: OutInstruction { address: address.into(), data: None }, + balance, + }); + } + } + } + + if !burns.is_empty() { + // We send these Burns as stemming from this block we just acknowledged + // This causes them to be acted on after we accumulate the outputs from this block + SubstrateToEventualityDb::send_burns(&mut txn, block_number, &burns); + } + } + + Action::QueueBurns(burns) => { + // We can instantly handle this so long as we've handled all prior actions + made_progress = true; + + let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(&txn) + .expect("queueing Burns yet never acknowledged a block"); + + SubstrateToEventualityDb::send_burns(&mut txn, queue_as_of, &burns); + } + } + + txn.commit(); + } + } +} From b6db456391589f19d6b325016f39896a97dddb58 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 30 Aug 2024 19:51:53 -0400 Subject: [PATCH 058/179] Add crate for the transaction-chaining Scheduler --- .github/workflows/tests.yml | 1 + Cargo.toml | 1 + deny.toml | 3 +++ .../scheduler/transaction-chaining/Cargo.toml | 22 +++++++++++++++++++ .../scheduler/transaction-chaining/LICENSE | 15 +++++++++++++ .../scheduler/transaction-chaining/README.md | 19 ++++++++++++++++ .../scheduler/transaction-chaining/src/lib.rs | 3 +++ 7 files changed, 64 insertions(+) create mode 100644 processor/scheduler/transaction-chaining/Cargo.toml create mode 100644 processor/scheduler/transaction-chaining/LICENSE create mode 100644 processor/scheduler/transaction-chaining/README.md create mode 100644 processor/scheduler/transaction-chaining/src/lib.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5032676f7..070c5b589 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -42,6 +42,7 @@ jobs: -p serai-processor-key-gen \ -p serai-processor-frost-attempt-manager \ -p serai-processor-primitives \ + -p serai-processor-transaction-chaining-scheduler \ -p serai-processor-scanner \ -p serai-processor \ -p tendermint-machine \ diff --git a/Cargo.toml b/Cargo.toml index 7ad08a517..27e5e5620 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,6 +74,7 @@ members = [ "processor/frost-attempt-manager", "processor/primitives", + "processor/scheduler/transaction-chaining", "processor/scanner", "processor", diff --git a/deny.toml b/deny.toml index ea61fcc1c..7531f3b79 100644 --- a/deny.toml +++ b/deny.toml @@ -48,6 +48,9 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-messages" }, { allow = ["AGPL-3.0"], name = "serai-processor-key-gen" }, { allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" }, + + { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, + { allow = ["AGPL-3.0"], name = "serai-processor-scanner" }, { allow = ["AGPL-3.0"], name = "serai-processor" }, { allow = ["AGPL-3.0"], name = "tributary-chain" }, diff --git a/processor/scheduler/transaction-chaining/Cargo.toml b/processor/scheduler/transaction-chaining/Cargo.toml new file mode 100644 index 000000000..360da6c51 --- /dev/null +++ b/processor/scheduler/transaction-chaining/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "serai-processor-transaction-chaining-scheduler" +version = "0.1.0" +description = "Scheduler for networks with transaction chaining for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/transaction-chaining" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale"] + +[lints] +workspace = true + +[dependencies] diff --git a/processor/scheduler/transaction-chaining/LICENSE b/processor/scheduler/transaction-chaining/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/scheduler/transaction-chaining/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/transaction-chaining/README.md b/processor/scheduler/transaction-chaining/README.md new file mode 100644 index 000000000..0788ff53c --- /dev/null +++ b/processor/scheduler/transaction-chaining/README.md @@ -0,0 +1,19 @@ +# Transaction Chaining Scheduler + +A scheduler of transactions for networks premised on the UTXO model which +support transaction chaining. Transaction chaining refers to the ability to +obtain an identifier for an output within a transaction not yet signed usable +to build and sign a transaction spending it. + +### Design + +The scheduler is designed to achieve fulfillment of all expected payments with +an `O(1)` delay (regardless of prior scheduler state), `O(log n)` time, and +`O(n)` computational complexity. + +Due to the ability to chain transactions, we can immediately plan/sign dependent +transactions. For the time/computational complexity, we use a tree to fulfill +payments. This quickly gives us the ability to make as many outputs as necessary +(regardless of per-transaction output limits) and only has the latency of +including a chain of `O(log n)` transactions on-chain. The only computational +overhead is in creating the transactions which are branches in the tree. diff --git a/processor/scheduler/transaction-chaining/src/lib.rs b/processor/scheduler/transaction-chaining/src/lib.rs new file mode 100644 index 000000000..3639aa043 --- /dev/null +++ b/processor/scheduler/transaction-chaining/src/lib.rs @@ -0,0 +1,3 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] From 3c436304ab0a0fb15c5122528cf59c4f6c68c9ad Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 1 Sep 2024 00:01:01 -0400 Subject: [PATCH 059/179] Add processor/scheduler/utxo/primitives Includes the necessary signing functions and the fee amortization logic. Moves transaction-chaining to utxo/transaction-chaining. --- .github/workflows/tests.yml | 1 + Cargo.lock | 14 ++ Cargo.toml | 3 +- deny.toml | 1 + .../scheduler/utxo/primitives/Cargo.toml | 25 +++ processor/scheduler/utxo/primitives/LICENSE | 15 ++ processor/scheduler/utxo/primitives/README.md | 3 + .../scheduler/utxo/primitives/src/lib.rs | 179 ++++++++++++++++++ .../transaction-chaining/Cargo.toml | 0 .../{ => utxo}/transaction-chaining/LICENSE | 0 .../{ => utxo}/transaction-chaining/README.md | 0 .../transaction-chaining/src/lib.rs | 0 12 files changed, 240 insertions(+), 1 deletion(-) create mode 100644 processor/scheduler/utxo/primitives/Cargo.toml create mode 100644 processor/scheduler/utxo/primitives/LICENSE create mode 100644 processor/scheduler/utxo/primitives/README.md create mode 100644 processor/scheduler/utxo/primitives/src/lib.rs rename processor/scheduler/{ => utxo}/transaction-chaining/Cargo.toml (100%) rename processor/scheduler/{ => utxo}/transaction-chaining/LICENSE (100%) rename processor/scheduler/{ => utxo}/transaction-chaining/README.md (100%) rename processor/scheduler/{ => utxo}/transaction-chaining/src/lib.rs (100%) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 070c5b589..33f2e8529 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -42,6 +42,7 @@ jobs: -p serai-processor-key-gen \ -p serai-processor-frost-attempt-manager \ -p serai-processor-primitives \ + -p serai-processor-utxo-scheduler-primitives \ -p serai-processor-transaction-chaining-scheduler \ -p serai-processor-scanner \ -p serai-processor \ diff --git a/Cargo.lock b/Cargo.lock index 2a9de4b9d..935e95d8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8709,6 +8709,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-processor-transaction-chaining-scheduler" +version = "0.1.0" + +[[package]] +name = "serai-processor-utxo-scheduler-primitives" +version = "0.1.0" +dependencies = [ + "async-trait", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", +] + [[package]] name = "serai-reproducible-runtime-tests" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 27e5e5620..174357132 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,7 +74,8 @@ members = [ "processor/frost-attempt-manager", "processor/primitives", - "processor/scheduler/transaction-chaining", + "processor/scheduler/utxo/primitives", + "processor/scheduler/utxo/transaction-chaining", "processor/scanner", "processor", diff --git a/deny.toml b/deny.toml index 7531f3b79..fb6162449 100644 --- a/deny.toml +++ b/deny.toml @@ -49,6 +49,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-key-gen" }, { allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" }, + { allow = ["AGPL-3.0"], name = "serai-processor-utxo-primitives" }, { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor-scanner" }, { allow = ["AGPL-3.0"], name = "serai-processor" }, diff --git a/processor/scheduler/utxo/primitives/Cargo.toml b/processor/scheduler/utxo/primitives/Cargo.toml new file mode 100644 index 000000000..01d3db7d0 --- /dev/null +++ b/processor/scheduler/utxo/primitives/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "serai-processor-utxo-scheduler-primitives" +version = "0.1.0" +description = "Primitives for UTXO schedulers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/utxo/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +async-trait = { version = "0.1", default-features = false } + +serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } + +primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } diff --git a/processor/scheduler/utxo/primitives/LICENSE b/processor/scheduler/utxo/primitives/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/scheduler/utxo/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/utxo/primitives/README.md b/processor/scheduler/utxo/primitives/README.md new file mode 100644 index 000000000..81bc954a7 --- /dev/null +++ b/processor/scheduler/utxo/primitives/README.md @@ -0,0 +1,3 @@ +# UTXO Scheduler Primitives + +Primitives for UTXO schedulers. diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs new file mode 100644 index 000000000..61dd9d888 --- /dev/null +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -0,0 +1,179 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::fmt::Debug; + +use serai_primitives::{Coin, Amount}; + +use primitives::ReceivedOutput; +use scanner::{Payment, ScannerFeed, AddressFor, OutputFor}; + +/// An object able to plan a transaction. +#[async_trait::async_trait] +pub trait TransactionPlanner { + /// An error encountered when determining the fee rate. + /// + /// This MUST be an ephemeral error. Retrying fetching data from the blockchain MUST eventually + /// resolve without manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// The type representing a fee rate to use for transactions. + type FeeRate: Clone + Copy; + + /// The type representing a planned transaction. + type PlannedTransaction; + + /// Obtain the fee rate to pay. + /// + /// This must be constant to the finalized block referenced by this block number and the coin. + async fn fee_rate( + &self, + block_number: u64, + coin: Coin, + ) -> Result; + + /// Calculate the for a tansaction with this structure. + /// + /// The fee rate, inputs, and payments, will all be for the same coin. The returned fee is + /// denominated in this coin. + fn calculate_fee( + &self, + block_number: u64, + fee_rate: Self::FeeRate, + inputs: Vec>, + payments: Vec>, + change: Option>, + ) -> Amount; + + /// Plan a transaction. + /// + /// This must only require the same fee as would be returned by `calculate_fee`. The caller is + /// trusted to maintain `sum(inputs) - sum(payments) >= if change.is_some() { DUST } else { 0 }`. + /// + /// `change` will always be an address belonging to the Serai network. + fn plan( + &self, + block_number: u64, + fee_rate: Self::FeeRate, + inputs: Vec>, + payments: Vec>, + change: Option>, + ) -> Self::PlannedTransaction; + + /// Obtain a PlannedTransaction via amortizing the fee over the payments. + /// + /// `operating_costs` is accrued to if Serai faces the burden of a fee or drops inputs not worth + /// accumulating. `operating_costs` will be amortized along with this transaction's fee as + /// possible. Please see `spec/processor/UTXO Management.md` for more information. + /// + /// Returns `None` if the fee exceeded the inputs, or `Some` otherwise. + fn plan_transaction_with_fee_amortization( + &self, + operating_costs: &mut u64, + block_number: u64, + fee_rate: Self::FeeRate, + inputs: Vec>, + mut payments: Vec>, + change: Option>, + ) -> Option { + // Sanity checks + { + assert!(!inputs.is_empty()); + assert!((!payments.is_empty()) || change.is_some()); + let coin = inputs.first().unwrap().balance().coin; + for input in &inputs { + assert_eq!(coin, input.balance().coin); + } + for payment in &payments { + assert_eq!(coin, payment.balance().coin); + } + assert!( + (inputs.iter().map(|input| input.balance().amount.0).sum::() + *operating_costs) >= + payments.iter().map(|payment| payment.balance().amount.0).sum::(), + "attempted to fulfill payments without a sufficient input set" + ); + } + + let coin = inputs.first().unwrap().balance().coin; + + // Amortization + { + // Sort payments from high amount to low amount + payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); + + let mut fee = self + .calculate_fee(block_number, fee_rate, inputs.clone(), payments.clone(), change.clone()) + .0; + let mut amortized = 0; + while !payments.is_empty() { + // We need to pay the fee, and any accrued operating costs, minus what we've already + // amortized + let adjusted_fee = (*operating_costs + fee).saturating_sub(amortized); + + /* + Ideally, we wouldn't use a ceil div yet would be accurate about it. Any remainder could + be amortized over the largest outputs, which wouldn't be relevant here as we only work + with the smallest output. The issue is the theoretical edge case where all outputs have + the same value and are of the minimum value. In that case, none would be able to have the + remainder amortized as it'd cause them to need to be dropped. Using a ceil div avoids + this. + */ + let per_payment_fee = adjusted_fee.div_ceil(u64::try_from(payments.len()).unwrap()); + // Pop the last payment if it can't pay the fee, remaining about the dust limit as it does + if payments.last().unwrap().balance().amount.0 <= (per_payment_fee + S::dust(coin).0) { + amortized += payments.pop().unwrap().balance().amount.0; + // Recalculate the fee and try again + fee = self + .calculate_fee(block_number, fee_rate, inputs.clone(), payments.clone(), change.clone()) + .0; + continue; + } + // Break since all of these payments shouldn't be dropped + break; + } + + // If we couldn't amortize the fee over the payments, check if we even have enough to pay it + if payments.is_empty() { + // If we don't have a change output, we simply return here + // We no longer have anything to do here, nor any expectations + if change.is_none() { + None?; + } + + let inputs = inputs.iter().map(|input| input.balance().amount.0).sum::(); + // Checks not just if we can pay for it, yet that the would-be change output is at least + // dust + if inputs < (fee + S::dust(coin).0) { + // Write off these inputs + *operating_costs += inputs; + // Yet also claw back the payments we dropped, as we only lost the change + // The dropped payments will be worth less than the inputs + operating_costs we started + // with, so this shouldn't use `saturating_sub` + *operating_costs -= amortized; + None?; + } + } else { + // Since we have payments which can pay the fee we ended up with, amortize it + let adjusted_fee = (*operating_costs + fee).saturating_sub(amortized); + let per_payment_base_fee = adjusted_fee / u64::try_from(payments.len()).unwrap(); + let payments_paying_one_atomic_unit_more = + usize::try_from(adjusted_fee % u64::try_from(payments.len()).unwrap()).unwrap(); + + for (i, payment) in payments.iter_mut().enumerate() { + let per_payment_fee = + per_payment_base_fee + u64::from(u8::from(i < payments_paying_one_atomic_unit_more)); + payment.balance().amount.0 -= per_payment_fee; + amortized += per_payment_fee; + } + assert!(amortized >= (*operating_costs + fee)); + } + + // Update the amount of operating costs + *operating_costs = (*operating_costs + fee).saturating_sub(amortized); + } + + // Because we amortized, or accrued as operating costs, the fee, make the transaction + Some(self.plan(block_number, fee_rate, inputs, payments, change)) + } +} diff --git a/processor/scheduler/transaction-chaining/Cargo.toml b/processor/scheduler/utxo/transaction-chaining/Cargo.toml similarity index 100% rename from processor/scheduler/transaction-chaining/Cargo.toml rename to processor/scheduler/utxo/transaction-chaining/Cargo.toml diff --git a/processor/scheduler/transaction-chaining/LICENSE b/processor/scheduler/utxo/transaction-chaining/LICENSE similarity index 100% rename from processor/scheduler/transaction-chaining/LICENSE rename to processor/scheduler/utxo/transaction-chaining/LICENSE diff --git a/processor/scheduler/transaction-chaining/README.md b/processor/scheduler/utxo/transaction-chaining/README.md similarity index 100% rename from processor/scheduler/transaction-chaining/README.md rename to processor/scheduler/utxo/transaction-chaining/README.md diff --git a/processor/scheduler/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs similarity index 100% rename from processor/scheduler/transaction-chaining/src/lib.rs rename to processor/scheduler/utxo/transaction-chaining/src/lib.rs From 1711efb6e57f968d693dc7a6b6c21258c63c3efd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 1 Sep 2024 00:05:08 -0400 Subject: [PATCH 060/179] Expand primitives/scanner with niceties needed for the scheduler --- processor/primitives/src/output.rs | 2 +- processor/scanner/src/eventuality/mod.rs | 10 ++-- processor/scanner/src/lib.rs | 67 +++++++++++++++++++++--- processor/scanner/src/scan/mod.rs | 4 +- processor/scanner/src/substrate/mod.rs | 5 ++ 5 files changed, 75 insertions(+), 13 deletions(-) diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs index 9a3009407..d59b4fd0b 100644 --- a/processor/primitives/src/output.rs +++ b/processor/primitives/src/output.rs @@ -8,7 +8,7 @@ use serai_primitives::{ExternalAddress, Balance}; use crate::Id; /// An address on the external network. -pub trait Address: Send + Sync + Into + TryFrom { +pub trait Address: Send + Sync + Clone + Into + TryFrom { /// Write this address. fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; /// Read an address. diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 3be7f3ce9..bfc879ea0 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -12,7 +12,7 @@ use crate::{ SeraiKey, OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb, ScanToEventualityDb, }, - BlockExt, ScannerFeed, KeyFor, OutputFor, EventualityFor, SchedulerUpdate, Scheduler, + BlockExt, ScannerFeed, KeyFor, OutputFor, EventualityFor, Payment, SchedulerUpdate, Scheduler, sort_outputs, scan::{next_to_scan_for_outputs_block, queue_output_until_block}, }; @@ -165,7 +165,11 @@ impl> EventualityTask { { intaked_any = true; - let new_eventualities = self.scheduler.fulfill(&mut txn, &keys_with_stages, burns); + let new_eventualities = self.scheduler.fulfill( + &mut txn, + &keys_with_stages, + burns.into_iter().filter_map(|burn| Payment::try_from(burn).ok()).collect(), + ); intake_eventualities::(&mut txn, new_eventualities); } txn.commit(); @@ -291,7 +295,7 @@ impl> ContinuallyRan for EventualityTas // Drop any outputs less than the dust limit non_external_outputs.retain(|output| { let balance = output.balance(); - balance.amount.0 >= self.feed.dust(balance.coin).0 + balance.amount.0 >= S::dust(balance.coin).0 }); /* diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 53bb9030d..80cf96be8 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -5,7 +5,7 @@ use group::GroupEncoding; use serai_db::{Get, DbTxn, Db}; -use serai_primitives::{NetworkId, Coin, Amount}; +use serai_primitives::{NetworkId, Coin, Amount, Balance, Data}; use serai_in_instructions_primitives::Batch; use serai_coins_primitives::OutInstructionWithBalance; @@ -143,7 +143,7 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// /// This MUST be constant. Serai MUST NOT create internal outputs worth less than this. This /// SHOULD be a value worth handling at a human level. - fn dust(&self, coin: Coin) -> Amount; + fn dust(coin: Coin) -> Amount; /// The cost to aggregate an input as of the specified block. /// @@ -155,10 +155,14 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { ) -> Result; } -type KeyFor = <::Block as Block>::Key; -type AddressFor = <::Block as Block>::Address; -type OutputFor = <::Block as Block>::Output; -type EventualityFor = <::Block as Block>::Eventuality; +/// The key type for this ScannerFeed. +pub type KeyFor = <::Block as Block>::Key; +/// The address type for this ScannerFeed. +pub type AddressFor = <::Block as Block>::Address; +/// The output type for this ScannerFeed. +pub type OutputFor = <::Block as Block>::Output; +/// The eventuality type for this ScannerFeed. +pub type EventualityFor = <::Block as Block>::Eventuality; #[async_trait::async_trait] pub trait BatchPublisher: 'static + Send + Sync { @@ -200,6 +204,55 @@ pub struct SchedulerUpdate { returns: Vec>, } +impl SchedulerUpdate { + /// The outputs to accumulate. + pub fn outputs(&self) -> &[OutputFor] { + &self.outputs + } + /// The outputs to forward to the latest multisig. + pub fn forwards(&self) -> &[OutputFor] { + &self.forwards + } + /// The outputs to return. + pub fn returns(&self) -> &[Return] { + &self.returns + } +} + +/// A payment to fulfill. +#[derive(Clone)] +pub struct Payment { + address: AddressFor, + balance: Balance, + data: Option>, +} + +impl TryFrom for Payment { + type Error = (); + fn try_from(out_instruction_with_balance: OutInstructionWithBalance) -> Result { + Ok(Payment { + address: out_instruction_with_balance.instruction.address.try_into().map_err(|_| ())?, + balance: out_instruction_with_balance.balance, + data: out_instruction_with_balance.instruction.data.map(Data::consume), + }) + } +} + +impl Payment { + /// The address to pay. + pub fn address(&self) -> &AddressFor { + &self.address + } + /// The balance to transfer. + pub fn balance(&self) -> Balance { + self.balance + } + /// The data to associate with this payment. + pub fn data(&self) -> &Option> { + &self.data + } +} + /// The object responsible for accumulating outputs and planning new transactions. pub trait Scheduler: 'static + Send { /// Activate a key. @@ -274,7 +327,7 @@ pub trait Scheduler: 'static + Send { &mut self, txn: &mut impl DbTxn, active_keys: &[(KeyFor, LifetimeStage)], - payments: Vec, + payments: Vec>, ) -> HashMap, Vec>>; } diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index 4d6ca16e8..51671dc68 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -215,7 +215,7 @@ impl ContinuallyRan for ScanTask { let balance = output.balance(); // We ensure it's over the dust limit to prevent people sending 1 satoshi from causing // an invocation of a consensus/signing protocol - if balance.amount.0 >= self.feed.dust(balance.coin).0 { + if balance.amount.0 >= S::dust(balance.coin).0 { ScannerGlobalDb::::flag_notable_due_to_non_external_output(&mut txn, b); } continue; @@ -243,7 +243,7 @@ impl ContinuallyRan for ScanTask { balance.amount.0 -= 2 * cost_to_aggregate.0; // Now, check it's still past the dust threshold - if balance.amount.0 < self.feed.dust(balance.coin).0 { + if balance.amount.0 < S::dust(balance.coin).0 { continue; } diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs index 4feb85d58..d67be9dc3 100644 --- a/processor/scanner/src/substrate/mod.rs +++ b/processor/scanner/src/substrate/mod.rs @@ -138,6 +138,11 @@ impl ContinuallyRan for SubstrateTask { } } + // Drop burns less than the dust + let burns = burns + .into_iter() + .filter(|burn| burn.balance.amount.0 >= S::dust(burn.balance.coin).0) + .collect::>(); if !burns.is_empty() { // We send these Burns as stemming from this block we just acknowledged // This causes them to be acted on after we accumulate the outputs from this block From e1a965f396cfe99876bb790171d8637c2bfd7e46 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 1 Sep 2024 01:55:04 -0400 Subject: [PATCH 061/179] Outline of the transaction-chaining scheduler --- Cargo.lock | 14 +- processor/primitives/Cargo.toml | 1 + processor/primitives/src/lib.rs | 3 + processor/primitives/src/payment.rs | 42 +++++ processor/scanner/Cargo.toml | 4 +- processor/scanner/src/eventuality/mod.rs | 9 +- processor/scanner/src/lib.rs | 50 ++---- processor/scanner/src/lifetime.rs | 2 +- .../scheduler/utxo/primitives/src/lib.rs | 50 +++--- .../utxo/transaction-chaining/Cargo.toml | 19 ++- .../utxo/transaction-chaining/LICENSE | 2 +- .../utxo/transaction-chaining/src/db.rs | 49 ++++++ .../utxo/transaction-chaining/src/lib.rs | 148 ++++++++++++++++++ 13 files changed, 321 insertions(+), 72 deletions(-) create mode 100644 processor/primitives/src/payment.rs create mode 100644 processor/scheduler/utxo/transaction-chaining/src/db.rs diff --git a/Cargo.lock b/Cargo.lock index 935e95d8f..7512f35c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8656,6 +8656,7 @@ dependencies = [ "group", "log", "parity-scale-codec", + "serai-coins-primitives", "serai-primitives", "tokio", ] @@ -8674,9 +8675,7 @@ dependencies = [ "serai-db", "serai-in-instructions-primitives", "serai-primitives", - "serai-processor-messages", "serai-processor-primitives", - "thiserror", "tokio", ] @@ -8712,6 +8711,17 @@ dependencies = [ [[package]] name = "serai-processor-transaction-chaining-scheduler" version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-coins-primitives", + "serai-db", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-utxo-scheduler-primitives", +] [[package]] name = "serai-processor-utxo-scheduler-primitives" diff --git a/processor/primitives/Cargo.toml b/processor/primitives/Cargo.toml index 9427a6042..dd1b74ea8 100644 --- a/processor/primitives/Cargo.toml +++ b/processor/primitives/Cargo.toml @@ -22,6 +22,7 @@ async-trait = { version = "0.1", default-features = false } group = { version = "0.13", default-features = false } serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } +serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs index 7a8be2197..4e45fa8f3 100644 --- a/processor/primitives/src/lib.rs +++ b/processor/primitives/src/lib.rs @@ -21,6 +21,9 @@ pub use eventuality::*; mod block; pub use block::*; +mod payment; +pub use payment::*; + /// An ID for an output/transaction/block/etc. /// /// IDs don't need to implement `Copy`, enabling `[u8; 33]`, `[u8; 64]` to be used. IDs are still diff --git a/processor/primitives/src/payment.rs b/processor/primitives/src/payment.rs new file mode 100644 index 000000000..1bbb06048 --- /dev/null +++ b/processor/primitives/src/payment.rs @@ -0,0 +1,42 @@ +use serai_primitives::{Balance, Data}; +use serai_coins_primitives::OutInstructionWithBalance; + +use crate::Address; + +/// A payment to fulfill. +#[derive(Clone)] +pub struct Payment { + address: A, + balance: Balance, + data: Option>, +} + +impl TryFrom for Payment { + type Error = (); + fn try_from(out_instruction_with_balance: OutInstructionWithBalance) -> Result { + Ok(Payment { + address: out_instruction_with_balance.instruction.address.try_into().map_err(|_| ())?, + balance: out_instruction_with_balance.balance, + data: out_instruction_with_balance.instruction.data.map(Data::consume), + }) + } +} + +impl Payment { + /// Create a new Payment. + pub fn new(address: A, balance: Balance, data: Option>) -> Self { + Payment { address, balance, data } + } + /// The address to pay. + pub fn address(&self) -> &A { + &self.address + } + /// The balance to transfer. + pub fn balance(&self) -> Balance { + self.balance + } + /// The data to associate with this payment. + pub fn data(&self) -> &Option> { + &self.data + } +} diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index e7cdef97a..c2dc31fe3 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -19,7 +19,6 @@ workspace = true [dependencies] # Macros async-trait = { version = "0.1", default-features = false } -thiserror = { version = "1", default-features = false } # Encoders hex = { version = "0.4", default-features = false, features = ["std"] } @@ -37,7 +36,6 @@ serai-db = { path = "../../common/db" } serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std"] } -serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std"] } +serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } -messages = { package = "serai-processor-messages", path = "../messages" } primitives = { package = "serai-processor-primitives", path = "../primitives" } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index bfc879ea0..83ec50ab4 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -4,7 +4,7 @@ use group::GroupEncoding; use serai_db::{Get, DbTxn, Db}; -use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, Block}; +use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, Block, Payment}; use crate::{ lifetime::LifetimeStage, @@ -12,7 +12,7 @@ use crate::{ SeraiKey, OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb, ScanToEventualityDb, }, - BlockExt, ScannerFeed, KeyFor, OutputFor, EventualityFor, Payment, SchedulerUpdate, Scheduler, + BlockExt, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, SchedulerUpdate, Scheduler, sort_outputs, scan::{next_to_scan_for_outputs_block, queue_output_until_block}, }; @@ -168,7 +168,10 @@ impl> EventualityTask { let new_eventualities = self.scheduler.fulfill( &mut txn, &keys_with_stages, - burns.into_iter().filter_map(|burn| Payment::try_from(burn).ok()).collect(), + burns + .into_iter() + .filter_map(|burn| Payment::>::try_from(burn).ok()) + .collect(), ); intake_eventualities::(&mut txn, new_eventualities); } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 80cf96be8..4d33d0d06 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -5,11 +5,11 @@ use group::GroupEncoding; use serai_db::{Get, DbTxn, Db}; -use serai_primitives::{NetworkId, Coin, Amount, Balance, Data}; +use serai_primitives::{NetworkId, Coin, Amount}; use serai_in_instructions_primitives::Batch; use serai_coins_primitives::OutInstructionWithBalance; -use primitives::{task::*, Address, ReceivedOutput, Block}; +use primitives::{task::*, Address, ReceivedOutput, Block, Payment}; // Logic for deciding where in its lifetime a multisig is. mod lifetime; @@ -195,6 +195,16 @@ impl Return { let output = OutputFor::::read(reader)?; Ok(Return { address, output }) } + + /// The address to return the output to. + pub fn address(&self) -> &AddressFor { + &self.address + } + + /// The output to return. + pub fn output(&self) -> &OutputFor { + &self.output + } } /// An update for the scheduler. @@ -219,40 +229,6 @@ impl SchedulerUpdate { } } -/// A payment to fulfill. -#[derive(Clone)] -pub struct Payment { - address: AddressFor, - balance: Balance, - data: Option>, -} - -impl TryFrom for Payment { - type Error = (); - fn try_from(out_instruction_with_balance: OutInstructionWithBalance) -> Result { - Ok(Payment { - address: out_instruction_with_balance.instruction.address.try_into().map_err(|_| ())?, - balance: out_instruction_with_balance.balance, - data: out_instruction_with_balance.instruction.data.map(Data::consume), - }) - } -} - -impl Payment { - /// The address to pay. - pub fn address(&self) -> &AddressFor { - &self.address - } - /// The balance to transfer. - pub fn balance(&self) -> Balance { - self.balance - } - /// The data to associate with this payment. - pub fn data(&self) -> &Option> { - &self.data - } -} - /// The object responsible for accumulating outputs and planning new transactions. pub trait Scheduler: 'static + Send { /// Activate a key. @@ -327,7 +303,7 @@ pub trait Scheduler: 'static + Send { &mut self, txn: &mut impl DbTxn, active_keys: &[(KeyFor, LifetimeStage)], - payments: Vec>, + payments: Vec>>, ) -> HashMap, Vec>>; } diff --git a/processor/scanner/src/lifetime.rs b/processor/scanner/src/lifetime.rs index bef6af8b4..e07f5f420 100644 --- a/processor/scanner/src/lifetime.rs +++ b/processor/scanner/src/lifetime.rs @@ -6,7 +6,7 @@ use crate::ScannerFeed; /// rotation process. Steps 7-8 regard a multisig which isn't retiring yet retired, and /// accordingly, no longer exists, so they are not modelled here (as this only models active /// multisigs. Inactive multisigs aren't represented in the first place). -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, PartialEq, Debug)] pub enum LifetimeStage { /// A new multisig, once active, shouldn't actually start receiving coins until several blocks /// later. If any UI is premature in sending to this multisig, we delay to report the outputs to diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index 61dd9d888..2c6da97b7 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -6,12 +6,12 @@ use core::fmt::Debug; use serai_primitives::{Coin, Amount}; -use primitives::ReceivedOutput; -use scanner::{Payment, ScannerFeed, AddressFor, OutputFor}; +use primitives::{ReceivedOutput, Payment}; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; /// An object able to plan a transaction. #[async_trait::async_trait] -pub trait TransactionPlanner { +pub trait TransactionPlanner: 'static + Send + Sync { /// An error encountered when determining the fee rate. /// /// This MUST be an ephemeral error. Retrying fetching data from the blockchain MUST eventually @@ -33,17 +33,22 @@ pub trait TransactionPlanner { coin: Coin, ) -> Result; + /// The branch address for this key of Serai's. + fn branch_address(key: KeyFor) -> AddressFor; + /// The change address for this key of Serai's. + fn change_address(key: KeyFor) -> AddressFor; + /// The forwarding address for this key of Serai's. + fn forwarding_address(key: KeyFor) -> AddressFor; + /// Calculate the for a tansaction with this structure. /// /// The fee rate, inputs, and payments, will all be for the same coin. The returned fee is /// denominated in this coin. fn calculate_fee( - &self, - block_number: u64, fee_rate: Self::FeeRate, inputs: Vec>, - payments: Vec>, - change: Option>, + payments: Vec>>, + change: Option>, ) -> Amount; /// Plan a transaction. @@ -53,12 +58,10 @@ pub trait TransactionPlanner { /// /// `change` will always be an address belonging to the Serai network. fn plan( - &self, - block_number: u64, fee_rate: Self::FeeRate, inputs: Vec>, - payments: Vec>, - change: Option>, + payments: Vec>>, + change: Option>, ) -> Self::PlannedTransaction; /// Obtain a PlannedTransaction via amortizing the fee over the payments. @@ -69,13 +72,11 @@ pub trait TransactionPlanner { /// /// Returns `None` if the fee exceeded the inputs, or `Some` otherwise. fn plan_transaction_with_fee_amortization( - &self, operating_costs: &mut u64, - block_number: u64, fee_rate: Self::FeeRate, inputs: Vec>, - mut payments: Vec>, - change: Option>, + mut payments: Vec>>, + mut change: Option>, ) -> Option { // Sanity checks { @@ -102,9 +103,7 @@ pub trait TransactionPlanner { // Sort payments from high amount to low amount payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); - let mut fee = self - .calculate_fee(block_number, fee_rate, inputs.clone(), payments.clone(), change.clone()) - .0; + let mut fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0; let mut amortized = 0; while !payments.is_empty() { // We need to pay the fee, and any accrued operating costs, minus what we've already @@ -124,9 +123,7 @@ pub trait TransactionPlanner { if payments.last().unwrap().balance().amount.0 <= (per_payment_fee + S::dust(coin).0) { amortized += payments.pop().unwrap().balance().amount.0; // Recalculate the fee and try again - fee = self - .calculate_fee(block_number, fee_rate, inputs.clone(), payments.clone(), change.clone()) - .0; + fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0; continue; } // Break since all of these payments shouldn't be dropped @@ -167,6 +164,15 @@ pub trait TransactionPlanner { amortized += per_payment_fee; } assert!(amortized >= (*operating_costs + fee)); + + // If the change is less than the dust, drop it + let would_be_change = inputs.iter().map(|input| input.balance().amount.0).sum::() - + payments.iter().map(|payment| payment.balance().amount.0).sum::() - + fee; + if would_be_change < S::dust(coin).0 { + change = None; + *operating_costs += would_be_change; + } } // Update the amount of operating costs @@ -174,6 +180,6 @@ pub trait TransactionPlanner { } // Because we amortized, or accrued as operating costs, the fee, make the transaction - Some(self.plan(block_number, fee_rate, inputs, payments, change)) + Some(Self::plan(fee_rate, inputs, payments, change)) } } diff --git a/processor/scheduler/utxo/transaction-chaining/Cargo.toml b/processor/scheduler/utxo/transaction-chaining/Cargo.toml index 360da6c51..d54d0f852 100644 --- a/processor/scheduler/utxo/transaction-chaining/Cargo.toml +++ b/processor/scheduler/utxo/transaction-chaining/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "serai-processor-transaction-chaining-scheduler" version = "0.1.0" -description = "Scheduler for networks with transaction chaining for the Serai processor" +description = "Scheduler for UTXO networks with transaction chaining for the Serai processor" license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/transaction-chaining" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/utxo/transaction-chaining" authors = ["Luke Parker "] keywords = [] edition = "2021" @@ -14,9 +14,22 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] -ignored = ["scale"] +ignored = ["scale", "borsh"] [lints] workspace = true [dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } +serai-coins-primitives = { path = "../../../../substrate/coins/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../../../common/db" } + +primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } diff --git a/processor/scheduler/utxo/transaction-chaining/LICENSE b/processor/scheduler/utxo/transaction-chaining/LICENSE index 41d5a2616..e091b1498 100644 --- a/processor/scheduler/utxo/transaction-chaining/LICENSE +++ b/processor/scheduler/utxo/transaction-chaining/LICENSE @@ -1,6 +1,6 @@ AGPL-3.0-only license -Copyright (c) 2022-2024 Luke Parker +Copyright (c) 2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs new file mode 100644 index 000000000..20c574e9d --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -0,0 +1,49 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use serai_primitives::Coin; + +use serai_db::{Get, DbTxn, create_db}; + +use primitives::ReceivedOutput; +use scanner::{ScannerFeed, KeyFor, OutputFor}; + +create_db! { + TransactionChainingScheduler { + SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, + } +} + +pub(crate) struct Db(PhantomData); +impl Db { + pub(crate) fn outputs( + getter: &impl Get, + key: KeyFor, + coin: Coin, + ) -> Option>> { + let buf = SerializedOutputs::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(OutputFor::::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_outputs( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + outputs: &[OutputFor], + ) { + let mut buf = Vec::with_capacity(outputs.len() * 128); + for output in outputs { + output.write(&mut buf).unwrap(); + } + SerializedOutputs::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); + } +} diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 3639aa043..636356960 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -1,3 +1,151 @@ #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![doc = include_str!("../README.md")] #![deny(missing_docs)] + +use core::marker::PhantomData; +use std::collections::HashMap; + +use serai_primitives::Coin; + +use serai_db::DbTxn; + +use primitives::{ReceivedOutput, Payment}; +use scanner::{ + LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, SchedulerUpdate, + Scheduler as SchedulerTrait, +}; +use scheduler_primitives::*; + +mod db; +use db::Db; + +/// A planned transaction. +pub struct PlannedTransaction { + /// The signable transaction. + signable: T, + /// The outputs we'll receive from this. + effected_received_outputs: OutputFor, + /// The Evtnuality to watch for. + eventuality: EventualityFor, +} + +/// A scheduler of transactions for networks premised on the UTXO model which support +/// transaction chaining. +pub struct Scheduler< + S: ScannerFeed, + T, + P: TransactionPlanner>, +>(PhantomData, PhantomData, PhantomData

); + +impl>> + Scheduler +{ + fn accumulate_outputs(txn: &mut impl DbTxn, key: KeyFor, outputs: &[OutputFor]) { + // Accumulate them in memory + let mut outputs_by_coin = HashMap::with_capacity(1); + for output in outputs.iter().filter(|output| output.key() == key) { + let coin = output.balance().coin; + if let std::collections::hash_map::Entry::Vacant(e) = outputs_by_coin.entry(coin) { + e.insert(Db::::outputs(txn, key, coin).unwrap()); + } + outputs_by_coin.get_mut(&coin).unwrap().push(output.clone()); + } + + // Flush them to the database + for (coin, outputs) in outputs_by_coin { + Db::::set_outputs(txn, key, coin, &outputs); + } + } +} + +impl< + S: ScannerFeed, + T: 'static + Send + Sync, + P: TransactionPlanner>, + > SchedulerTrait for Scheduler +{ + fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + Db::::set_outputs(txn, key, *coin, &vec![]); + } + } + + fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor, new_key: KeyFor) { + todo!("TODO") + } + + fn retire_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, *coin).is_none()); + Db::::del_outputs(txn, key, *coin); + } + } + + fn update( + &mut self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> HashMap, Vec>> { + // Accumulate all the outputs + for key in active_keys { + Self::accumulate_outputs(txn, key.0, update.outputs()); + } + + let mut fee_rates: HashMap = todo!("TODO"); + + // Create the transactions for the forwards/burns + { + let mut planned_txs = vec![]; + for forward in update.forwards() { + let forward_to_key = active_keys.last().unwrap(); + assert_eq!(forward_to_key.1, LifetimeStage::Active); + + let Some(plan) = P::plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + &mut 0, + fee_rates[&forward.balance().coin], + vec![forward.clone()], + vec![Payment::new(P::forwarding_address(forward_to_key.0), forward.balance(), None)], + None, + ) else { + continue; + }; + planned_txs.push(plan); + } + for to_return in update.returns() { + let out_instruction = + Payment::new(to_return.address().clone(), to_return.output().balance(), None); + let Some(plan) = P::plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + &mut 0, + fee_rates[&out_instruction.balance().coin], + vec![to_return.output().clone()], + vec![out_instruction], + None, + ) else { + continue; + }; + planned_txs.push(plan); + } + + // TODO: Send the transactions off for signing + // TODO: Return the eventualities + todo!("TODO") + } + } + + fn fulfill( + &mut self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> HashMap, Vec>> { + // TODO: Find the key to use for fulfillment + // TODO: Sort outputs and payments by amount + // TODO: For as long as we don't have sufficiently aggregated inputs to handle all payments, + // aggregate + // TODO: Create the tree for the payments + todo!("TODO") + } +} From 5e7e3e435e39d6391f2e148118c0defb3014f839 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 2 Sep 2024 16:09:52 -0400 Subject: [PATCH 062/179] Add scheduler-primitives The main benefit is whatever scheduler is in use, we now have a single API to receive TXs to sign (which is of value to the TX signer crate we'll inevitably build). --- .github/workflows/tests.yml | 3 +- Cargo.lock | 12 ++++- Cargo.toml | 3 +- deny.toml | 5 +- processor/scanner/src/lib.rs | 8 +++- processor/scheduler/primitives/Cargo.toml | 25 ++++++++++ processor/scheduler/primitives/LICENSE | 15 ++++++ processor/scheduler/primitives/README.md | 3 ++ processor/scheduler/primitives/src/lib.rs | 48 +++++++++++++++++++ .../utxo/transaction-chaining/Cargo.toml | 4 +- .../utxo/transaction-chaining/src/db.rs | 26 +++++++++- .../utxo/transaction-chaining/src/lib.rs | 42 +++++++++++----- 12 files changed, 173 insertions(+), 21 deletions(-) create mode 100644 processor/scheduler/primitives/Cargo.toml create mode 100644 processor/scheduler/primitives/LICENSE create mode 100644 processor/scheduler/primitives/README.md create mode 100644 processor/scheduler/primitives/src/lib.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 33f2e8529..ca0bd4f59 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -42,9 +42,10 @@ jobs: -p serai-processor-key-gen \ -p serai-processor-frost-attempt-manager \ -p serai-processor-primitives \ + -p serai-processor-scanner \ + -p serai-processor-scheduler-primitives \ -p serai-processor-utxo-scheduler-primitives \ -p serai-processor-transaction-chaining-scheduler \ - -p serai-processor-scanner \ -p serai-processor \ -p tendermint-machine \ -p tributary-chain \ diff --git a/Cargo.lock b/Cargo.lock index 7512f35c6..6e7ced07f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8679,6 +8679,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "serai-processor-scheduler-primitives" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", +] + [[package]] name = "serai-processor-tests" version = "0.1.0" @@ -8715,11 +8725,11 @@ dependencies = [ "borsh", "group", "parity-scale-codec", - "serai-coins-primitives", "serai-db", "serai-primitives", "serai-processor-primitives", "serai-processor-scanner", + "serai-processor-scheduler-primitives", "serai-processor-utxo-scheduler-primitives", ] diff --git a/Cargo.toml b/Cargo.toml index 174357132..b61cde688 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,9 +74,10 @@ members = [ "processor/frost-attempt-manager", "processor/primitives", + "processor/scanner", + "processor/scheduler/primitives", "processor/scheduler/utxo/primitives", "processor/scheduler/utxo/transaction-chaining", - "processor/scanner", "processor", "coordinator/tributary/tendermint", diff --git a/deny.toml b/deny.toml index fb6162449..2ca0ca503 100644 --- a/deny.toml +++ b/deny.toml @@ -49,9 +49,10 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-key-gen" }, { allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" }, - { allow = ["AGPL-3.0"], name = "serai-processor-utxo-primitives" }, - { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor-scanner" }, + { allow = ["AGPL-3.0"], name = "serai-processor-scheduler-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor" }, { allow = ["AGPL-3.0"], name = "tributary-chain" }, diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 4d33d0d06..d894f8197 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -241,8 +241,12 @@ pub trait Scheduler: 'static + Send { /// /// When a key is activated, the existing multisig should retain its outputs and utility for a /// certain time period. With `flush_key`, all outputs should be directed towards fulfilling some - /// obligation or the `new_key`. Every output MUST be connected to an Eventuality. If a key no - /// longer has active Eventualities, it MUST be able to be retired. + /// obligation or the `new_key`. Every output held by the retiring key MUST be connected to an + /// Eventuality. If a key no longer has active Eventualities, it MUST be able to be retired + /// without losing any coins. + /// + /// If the retiring key has any unfulfilled payments associated with it, those MUST be made + /// the responsibility of the new key. fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor, new_key: KeyFor); /// Retire a key as it'll no longer be used. diff --git a/processor/scheduler/primitives/Cargo.toml b/processor/scheduler/primitives/Cargo.toml new file mode 100644 index 000000000..31d738531 --- /dev/null +++ b/processor/scheduler/primitives/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "serai-processor-scheduler-primitives" +version = "0.1.0" +description = "Primitives for schedulers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-db = { path = "../../../common/db" } diff --git a/processor/scheduler/primitives/LICENSE b/processor/scheduler/primitives/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/scheduler/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/primitives/README.md b/processor/scheduler/primitives/README.md new file mode 100644 index 000000000..6e81249d9 --- /dev/null +++ b/processor/scheduler/primitives/README.md @@ -0,0 +1,3 @@ +# Scheduler Primitives + +Primitives for schedulers. diff --git a/processor/scheduler/primitives/src/lib.rs b/processor/scheduler/primitives/src/lib.rs new file mode 100644 index 000000000..97a00c03e --- /dev/null +++ b/processor/scheduler/primitives/src/lib.rs @@ -0,0 +1,48 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::marker::PhantomData; +use std::io; + +use group::GroupEncoding; + +use serai_db::DbTxn; + +/// A signable transaction. +pub trait SignableTransaction: 'static + Sized + Send + Sync { + /// Read a `SignableTransaction`. + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write a `SignableTransaction`. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; +} + +mod db { + use serai_db::{Get, DbTxn, create_db, db_channel}; + + db_channel! { + SchedulerPrimitives { + TransactionsToSign: (key: &[u8]) -> Vec, + } + } +} + +/// The transactions to sign, as scheduled by a Scheduler. +pub struct TransactionsToSign(PhantomData); +impl TransactionsToSign { + /// Send a transaction to sign. + pub fn send(txn: &mut impl DbTxn, key: &impl GroupEncoding, tx: &T) { + let mut buf = Vec::with_capacity(128); + tx.write(&mut buf).unwrap(); + db::TransactionsToSign::send(txn, key.to_bytes().as_ref(), &buf); + } + + /// Try to receive a transaction to sign. + pub fn try_recv(txn: &mut impl DbTxn, key: &impl GroupEncoding) -> Option { + let tx = db::TransactionsToSign::try_recv(txn, key.to_bytes().as_ref())?; + let mut tx = tx.as_slice(); + let res = T::read(&mut tx).unwrap(); + assert!(tx.is_empty()); + Some(res) + } +} diff --git a/processor/scheduler/utxo/transaction-chaining/Cargo.toml b/processor/scheduler/utxo/transaction-chaining/Cargo.toml index d54d0f852..a6b121286 100644 --- a/processor/scheduler/utxo/transaction-chaining/Cargo.toml +++ b/processor/scheduler/utxo/transaction-chaining/Cargo.toml @@ -26,10 +26,10 @@ scale = { package = "parity-scale-codec", version = "3", default-features = fals borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } -serai-coins-primitives = { path = "../../../../substrate/coins/primitives", default-features = false, features = ["std"] } serai-db = { path = "../../../../common/db" } primitives = { package = "serai-processor-primitives", path = "../../../primitives" } -scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } +utxo-scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } scanner = { package = "serai-processor-scanner", path = "../../../scanner" } diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs index 20c574e9d..f6de26d13 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/db.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -2,7 +2,7 @@ use core::marker::PhantomData; use group::GroupEncoding; -use serai_primitives::Coin; +use serai_primitives::{Coin, Amount}; use serai_db::{Get, DbTxn, create_db}; @@ -11,12 +11,23 @@ use scanner::{ScannerFeed, KeyFor, OutputFor}; create_db! { TransactionChainingScheduler { + OperatingCosts: (coin: Coin) -> Amount, SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, + // We should be immediately able to schedule the fulfillment of payments, yet this may not be + // possible if we're in the middle of a multisig rotation (as our output set will be split) + SerializedQueuedPayments: (key: &[u8]) > Vec, } } pub(crate) struct Db(PhantomData); impl Db { + pub(crate) fn operating_costs(getter: &impl Get, coin: Coin) -> Amount { + OperatingCosts::get(getter, coin).unwrap_or(Amount(0)) + } + pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: Coin, amount: Amount) { + OperatingCosts::set(txn, coin, &amount) + } + pub(crate) fn outputs( getter: &impl Get, key: KeyFor, @@ -46,4 +57,17 @@ impl Db { pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); } + + pub(crate) fn queued_payments( + getter: &impl Get, + key: KeyFor, + ) -> Option>> { + todo!("TODO") + } + pub(crate) fn set_queued_payments(txn: &mut impl DbTxn, key: KeyFor, queued: Vec>) { + todo!("TODO") + } + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor) { + SerializedQueuedPayments::del(txn, key.to_bytes().as_ref()); + } } diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 636356960..8f21e9d62 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -5,6 +5,8 @@ use core::marker::PhantomData; use std::collections::HashMap; +use group::GroupEncoding; + use serai_primitives::Coin; use serai_db::DbTxn; @@ -15,6 +17,7 @@ use scanner::{ Scheduler as SchedulerTrait, }; use scheduler_primitives::*; +use utxo_scheduler_primitives::*; mod db; use db::Db; @@ -25,7 +28,7 @@ pub struct PlannedTransaction { signable: T, /// The outputs we'll receive from this. effected_received_outputs: OutputFor, - /// The Evtnuality to watch for. + /// The Eventuality to watch for. eventuality: EventualityFor, } @@ -60,13 +63,13 @@ impl>, > SchedulerTrait for Scheduler { fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { - Db::::set_outputs(txn, key, *coin, &vec![]); + Db::::set_outputs(txn, key, *coin, &[]); } } @@ -98,22 +101,27 @@ impl< { let mut planned_txs = vec![]; for forward in update.forwards() { - let forward_to_key = active_keys.last().unwrap(); - assert_eq!(forward_to_key.1, LifetimeStage::Active); + let key = forward.key(); + + assert_eq!(active_keys.len(), 2); + assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); + assert_eq!(active_keys[1].1, LifetimeStage::Active); + let forward_to_key = active_keys[1].0; let Some(plan) = P::plan_transaction_with_fee_amortization( // This uses 0 for the operating costs as we don't incur any here &mut 0, fee_rates[&forward.balance().coin], vec![forward.clone()], - vec![Payment::new(P::forwarding_address(forward_to_key.0), forward.balance(), None)], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], None, ) else { continue; }; - planned_txs.push(plan); + planned_txs.push((key, plan)); } for to_return in update.returns() { + let key = to_return.output().key(); let out_instruction = Payment::new(to_return.address().clone(), to_return.output().balance(), None); let Some(plan) = P::plan_transaction_with_fee_amortization( @@ -126,12 +134,24 @@ impl< ) else { continue; }; - planned_txs.push(plan); + planned_txs.push((key, plan)); + } + + let mut eventualities = HashMap::new(); + for (key, planned_tx) in planned_txs { + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned_tx.signable); + + // Insert the eventualities into the result + eventualities + .entry(key.to_bytes().as_ref().to_vec()) + .or_insert(Vec::with_capacity(1)) + .push(planned_tx.eventuality); } - // TODO: Send the transactions off for signing - // TODO: Return the eventualities - todo!("TODO") + // TODO: Fulfill any payments we prior couldn't + + eventualities } } From 7a35231e4b6733f1db5f757f01c5737854ded4cf Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 2 Sep 2024 22:31:15 -0400 Subject: [PATCH 063/179] Better document the forwarded output flow --- processor/primitives/src/eventuality.rs | 7 +++++-- processor/scanner/src/eventuality/mod.rs | 15 ++++++++++----- processor/scanner/src/lib.rs | 10 ++++++++++ 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/processor/primitives/src/eventuality.rs b/processor/primitives/src/eventuality.rs index eb6cda9c2..6a52194d8 100644 --- a/processor/primitives/src/eventuality.rs +++ b/processor/primitives/src/eventuality.rs @@ -19,8 +19,11 @@ pub trait Eventuality: Sized + Send + Sync { /// identified, the full check is performed. fn lookup(&self) -> Vec; - /// The output this plan forwarded. - fn forwarded_output(&self) -> Option; + /// The output the resolution of this Eventuality was supposed to spend. + /// + /// If the resolution of this Eventuality has multiple inputs, there is no singular spent output + /// so this MUST return None. + fn singular_spent_output(&self) -> Option; /// Read an Eventuality. fn read(reader: &mut impl io::Read) -> io::Result; diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 83ec50ab4..98d278d9d 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -352,19 +352,24 @@ impl> ContinuallyRan for EventualityTas non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded) { let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else { - // Output sent to the forwarding address yet not actually forwarded + // Output sent to the forwarding address yet not one we made continue; }; - let Some(forwarded) = eventuality.forwarded_output() else { - // This was a TX made by us, yet someone burned to the forwarding address + let Some(forwarded) = eventuality.singular_spent_output() else { + // This was a TX made by us, yet someone burned to the forwarding address as it doesn't + // follow the structure of forwarding transactions continue; }; - let (return_address, in_instruction) = + let Some((return_address, in_instruction)) = ScannerGlobalDb::::return_address_and_in_instruction_for_forwarded_output( &txn, &forwarded, ) - .expect("forwarded an output yet didn't save its InInstruction to the DB"); + else { + // This was a TX made by us, coincidentally with the necessary structure, yet wasn't + // forwarding an output + continue; + }; queue_output_until_block::( &mut txn, b + S::WINDOW_LENGTH, diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index d894f8197..539bd4a7d 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -216,14 +216,24 @@ pub struct SchedulerUpdate { impl SchedulerUpdate { /// The outputs to accumulate. + /// + /// These MUST be accumulated. pub fn outputs(&self) -> &[OutputFor] { &self.outputs } + /// The outputs to forward to the latest multisig. + /// + /// These MUST be forwarded in a 1-input 1-output transaction or dropped (if the fees are too + /// high to make the forwarding transaction). pub fn forwards(&self) -> &[OutputFor] { &self.forwards } + /// The outputs to return. + /// + /// These SHOULD be returned as specified (potentially in batch). They MAY be dropped if the fees + /// are too high to make the return transaction. pub fn returns(&self) -> &[Return] { &self.returns } From 65ed943ec617fcd65751bb37f330da0d4ead1945 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 3 Sep 2024 01:04:43 -0400 Subject: [PATCH 064/179] Fix bug in the scanner regarding forwarded output amounts We'd report the amount originally received, minus 2x the cost to aggregate, regardless the amount successfully forwarded. We should've reduced to the amount successfully forwarded, if it was smaller, in case the cost to forward exceeded the aggregation cost. --- processor/scanner/src/eventuality/mod.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 98d278d9d..5a7b4cca0 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -361,7 +361,7 @@ impl> ContinuallyRan for EventualityTas continue; }; - let Some((return_address, in_instruction)) = + let Some((return_address, mut in_instruction)) = ScannerGlobalDb::::return_address_and_in_instruction_for_forwarded_output( &txn, &forwarded, ) @@ -370,6 +370,14 @@ impl> ContinuallyRan for EventualityTas // forwarding an output continue; }; + + // We use the original amount, minus twice the cost to aggregate + // If the fees we paid to forward this now (less than the cost to aggregate now, yet not + // necessarily the cost to aggregate historically) caused this amount to be less, reduce + // it accordingly + in_instruction.balance.amount.0 = + in_instruction.balance.amount.0.min(output.balance().amount.0); + queue_output_until_block::( &mut txn, b + S::WINDOW_LENGTH, From 565fd59d2e9bf9314ca208bc404706cfee3c6892 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 3 Sep 2024 01:41:51 -0400 Subject: [PATCH 065/179] Add input aggregation in the transaction-chaining scheduler Also handles some other misc in it. --- Cargo.lock | 1 + .../scheduler/utxo/primitives/Cargo.toml | 1 + .../scheduler/utxo/primitives/src/lib.rs | 23 +- .../utxo/transaction-chaining/Cargo.toml | 2 +- .../utxo/transaction-chaining/src/db.rs | 20 +- .../utxo/transaction-chaining/src/lib.rs | 293 ++++++++++++++---- 6 files changed, 269 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6e7ced07f..dd1cc19e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8741,6 +8741,7 @@ dependencies = [ "serai-primitives", "serai-processor-primitives", "serai-processor-scanner", + "serai-processor-scheduler-primitives", ] [[package]] diff --git a/processor/scheduler/utxo/primitives/Cargo.toml b/processor/scheduler/utxo/primitives/Cargo.toml index 01d3db7d0..4f2499f98 100644 --- a/processor/scheduler/utxo/primitives/Cargo.toml +++ b/processor/scheduler/utxo/primitives/Cargo.toml @@ -23,3 +23,4 @@ serai-primitives = { path = "../../../../substrate/primitives", default-features primitives = { package = "serai-processor-primitives", path = "../../../primitives" } scanner = { package = "serai-processor-scanner", path = "../../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index 2c6da97b7..f3e220b0b 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -7,11 +7,22 @@ use core::fmt::Debug; use serai_primitives::{Coin, Amount}; use primitives::{ReceivedOutput, Payment}; -use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor}; +use scheduler_primitives::*; + +/// A planned transaction. +pub struct PlannedTransaction { + /// The signable transaction. + pub signable: ST, + /// The Eventuality to watch for. + pub eventuality: EventualityFor, + /// The auxilliary data for this transaction. + pub auxilliary: A, +} /// An object able to plan a transaction. #[async_trait::async_trait] -pub trait TransactionPlanner: 'static + Send + Sync { +pub trait TransactionPlanner: 'static + Send + Sync { /// An error encountered when determining the fee rate. /// /// This MUST be an ephemeral error. Retrying fetching data from the blockchain MUST eventually @@ -21,8 +32,8 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// The type representing a fee rate to use for transactions. type FeeRate: Clone + Copy; - /// The type representing a planned transaction. - type PlannedTransaction; + /// The type representing a signable transaction. + type SignableTransaction: SignableTransaction; /// Obtain the fee rate to pay. /// @@ -62,7 +73,7 @@ pub trait TransactionPlanner: 'static + Send + Sync { inputs: Vec>, payments: Vec>>, change: Option>, - ) -> Self::PlannedTransaction; + ) -> PlannedTransaction; /// Obtain a PlannedTransaction via amortizing the fee over the payments. /// @@ -77,7 +88,7 @@ pub trait TransactionPlanner: 'static + Send + Sync { inputs: Vec>, mut payments: Vec>>, mut change: Option>, - ) -> Option { + ) -> Option> { // Sanity checks { assert!(!inputs.is_empty()); diff --git a/processor/scheduler/utxo/transaction-chaining/Cargo.toml b/processor/scheduler/utxo/transaction-chaining/Cargo.toml index a6b121286..0b1eb155b 100644 --- a/processor/scheduler/utxo/transaction-chaining/Cargo.toml +++ b/processor/scheduler/utxo/transaction-chaining/Cargo.toml @@ -30,6 +30,6 @@ serai-primitives = { path = "../../../../substrate/primitives", default-features serai-db = { path = "../../../../common/db" } primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } utxo-scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } -scanner = { package = "serai-processor-scanner", path = "../../../scanner" } diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs index f6de26d13..7d8007182 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/db.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -6,8 +6,8 @@ use serai_primitives::{Coin, Amount}; use serai_db::{Get, DbTxn, create_db}; -use primitives::ReceivedOutput; -use scanner::{ScannerFeed, KeyFor, OutputFor}; +use primitives::{Payment, ReceivedOutput}; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; create_db! { TransactionChainingScheduler { @@ -15,7 +15,7 @@ create_db! { SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, // We should be immediately able to schedule the fulfillment of payments, yet this may not be // possible if we're in the middle of a multisig rotation (as our output set will be split) - SerializedQueuedPayments: (key: &[u8]) > Vec, + SerializedQueuedPayments: (key: &[u8], coin: Coin) -> Vec, } } @@ -61,13 +61,19 @@ impl Db { pub(crate) fn queued_payments( getter: &impl Get, key: KeyFor, - ) -> Option>> { + coin: Coin, + ) -> Option>>> { todo!("TODO") } - pub(crate) fn set_queued_payments(txn: &mut impl DbTxn, key: KeyFor, queued: Vec>) { + pub(crate) fn set_queued_payments( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + queued: &Vec>>, + ) { todo!("TODO") } - pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor) { - SerializedQueuedPayments::del(txn, key.to_bytes().as_ref()); + pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + SerializedQueuedPayments::del(txn, key.to_bytes().as_ref(), coin); } } diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 8f21e9d62..9e552c133 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -7,11 +7,11 @@ use std::collections::HashMap; use group::GroupEncoding; -use serai_primitives::Coin; +use serai_primitives::{Coin, Amount}; use serai_db::DbTxn; -use primitives::{ReceivedOutput, Payment}; +use primitives::{OutputType, ReceivedOutput, Payment}; use scanner::{ LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, SchedulerUpdate, Scheduler as SchedulerTrait, @@ -22,65 +22,205 @@ use utxo_scheduler_primitives::*; mod db; use db::Db; -/// A planned transaction. -pub struct PlannedTransaction { - /// The signable transaction. - signable: T, - /// The outputs we'll receive from this. - effected_received_outputs: OutputFor, - /// The Eventuality to watch for. - eventuality: EventualityFor, -} +/// The outputs which will be effected by a PlannedTransaction and received by Serai. +pub struct EffectedReceivedOutputs(Vec>); /// A scheduler of transactions for networks premised on the UTXO model which support /// transaction chaining. -pub struct Scheduler< - S: ScannerFeed, - T, - P: TransactionPlanner>, ->(PhantomData, PhantomData, PhantomData

); - -impl>> - Scheduler -{ - fn accumulate_outputs(txn: &mut impl DbTxn, key: KeyFor, outputs: &[OutputFor]) { - // Accumulate them in memory - let mut outputs_by_coin = HashMap::with_capacity(1); - for output in outputs.iter().filter(|output| output.key() == key) { - let coin = output.balance().coin; - if let std::collections::hash_map::Entry::Vacant(e) = outputs_by_coin.entry(coin) { - e.insert(Db::::outputs(txn, key, coin).unwrap()); +pub struct Scheduler>>( + PhantomData, + PhantomData

, +); + +impl>> Scheduler { + fn handle_queued_payments( + &mut self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + key: KeyFor, + ) -> Vec> { + let mut eventualities = vec![]; + + for coin in S::NETWORK.coins() { + // Fetch our operating costs and all our outputs + let mut operating_costs = Db::::operating_costs(txn, *coin).0; + let mut outputs = Db::::outputs(txn, key, *coin).unwrap(); + + // Fetch the queued payments + let mut payments = Db::::queued_payments(txn, key, *coin).unwrap(); + if payments.is_empty() { + continue; } - outputs_by_coin.get_mut(&coin).unwrap().push(output.clone()); - } - // Flush them to the database - for (coin, outputs) in outputs_by_coin { - Db::::set_outputs(txn, key, coin, &outputs); + // If this is our only key, our outputs and operating costs should be greater than the + // payments' value + if active_keys.len() == 1 { + // The available amount of fulfill is the amount we have plus the amount we'll reduce by + // An alternative formulation would be `outputs >= (payments - operating costs)`, but + // that'd risk underflow + let available = + operating_costs + outputs.iter().map(|output| output.balance().amount.0).sum::(); + assert!(available >= payments.iter().map(|payment| payment.balance().amount.0).sum::()); + } + + let amount_of_payments_that_can_be_handled = + |operating_costs: u64, outputs: &[_], payments: &[_]| { + let value_available = + operating_costs + outputs.iter().map(|output| output.balance().amount.0).sum::(); + + let mut can_handle = 0; + let mut value_used = 0; + for payment in payments { + value_used += payment.balance().amount.0; + if value_available < value_used { + break; + } + can_handle += 1; + } + + can_handle + }; + + // Find the set of payments we should fulfill at this time + { + // Drop to just the payments we currently have the outputs for + { + let can_handle = + amount_of_payments_that_can_be_handled(operating_costs, &outputs, &payments); + let remaining_payments = payments.drain(can_handle ..).collect::>(); + // Restore the rest to the database + Db::::set_queued_payments(txn, key, *coin, &remaining_payments); + } + let payments_value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + + // If these payments are worth less than the operating costs, immediately drop them + if payments_value <= operating_costs { + operating_costs -= payments_value; + Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); + return vec![]; + } + + // We explicitly sort AFTER deciding which payments to handle so we always handle the + // oldest queued payments first (preventing any from eternally being shuffled to the back + // of the line) + payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0)); + } + assert!(!payments.is_empty()); + + // Find the smallest set of outputs usable to fulfill these outputs + // Size is determined by the largest output, not quantity nor aggregate value + { + // We start by sorting low to high + outputs.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0)); + + let value_needed = + payments.iter().map(|payment| payment.balance().amount.0).sum::() - operating_costs; + + let mut needed = 0; + let mut value_present = 0; + for output in &outputs { + needed += 1; + value_present += output.balance().amount.0; + if value_present >= value_needed { + break; + } + } + + // Drain, and save back to the DB, the unnecessary outputs + let remaining_outputs = outputs.drain(needed ..).collect::>(); + Db::::set_outputs(txn, key, *coin, &remaining_outputs); + } + assert!(!outputs.is_empty()); + + // We now have the current operating costs, the outputs we're using, and the payments + // The database has the unused outputs/unfilfillable payments + // Actually plan/send off the transactions + + // While our set of outputs exceed the input limit, aggregate them + while outputs.len() > MAX_INPUTS { + let outputs_chunk = outputs.drain(.. MAX_INPUTS).collect::>(); + + // While we're aggregating these outputs, handle any payments we can + let payments_chunk = loop { + let can_handle = + amount_of_payments_that_can_be_handled(operating_costs, &outputs, &payments); + let payments_chunk = payments.drain(.. can_handle.min(MAX_OUTPUTS)).collect::>(); + + let payments_value = + payments_chunk.iter().map(|payment| payment.balance().amount.0).sum::(); + if payments_value <= operating_costs { + operating_costs -= payments_value; + continue; + } + break payments_chunk; + }; + + let Some(planned) = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + fee_rates[coin], + outputs_chunk, + payments_chunk, + // We always use our key for the change here since we may need this change output to + // finish fulfilling these payments + Some(key), + ) else { + // We amortized all payments, and even when just trying to make the change output, these + // inputs couldn't afford their own aggregation and were written off + continue; + }; + + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned.signable); + + // Push the Eventualities onto the result + eventualities.push(planned.eventuality); + + let mut effected_received_outputs = planned.auxilliary.0; + // Only handle Change so if someone burns to an External address, we don't use it here + // when the scanner will tell us to return it (without accumulating it) + effected_received_outputs.retain(|output| output.kind() == OutputType::Change); + outputs.append(&mut effected_received_outputs); + } + + // Now that we have an aggregated set of inputs, create the tree for payments + todo!("TODO"); } + + eventualities } } -impl< - S: ScannerFeed, - T: 'static + Send + Sync + SignableTransaction, - P: TransactionPlanner>, - > SchedulerTrait for Scheduler +impl>> SchedulerTrait + for Scheduler { fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, *coin).is_none()); Db::::set_outputs(txn, key, *coin, &[]); + assert!(Db::::queued_payments(txn, key, *coin).is_none()); + Db::::set_queued_payments(txn, key, *coin, &vec![]); } } fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor, new_key: KeyFor) { - todo!("TODO") + for coin in S::NETWORK.coins() { + let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); + + let mut queued = still_queued; + queued.append(&mut new_queued); + + Db::::set_queued_payments(txn, retiring_key, *coin, &vec![]); + Db::::set_queued_payments(txn, new_key, *coin, &queued); + } } fn retire_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { - assert!(Db::::outputs(txn, key, *coin).is_none()); + assert!(Db::::outputs(txn, key, *coin).unwrap().is_empty()); Db::::del_outputs(txn, key, *coin); + assert!(Db::::queued_payments(txn, key, *coin).unwrap().is_empty()); + Db::::del_queued_payments(txn, key, *coin); } } @@ -91,12 +231,41 @@ impl< update: SchedulerUpdate, ) -> HashMap, Vec>> { // Accumulate all the outputs - for key in active_keys { - Self::accumulate_outputs(txn, key.0, update.outputs()); + for (key, _) in active_keys { + // Accumulate them in memory + let mut outputs_by_coin = HashMap::with_capacity(1); + for output in update.outputs().iter().filter(|output| output.key() == *key) { + match output.kind() { + OutputType::External | OutputType::Forwarded => {}, + // TODO: Only accumulate these if we haven't already, but do accumulate if not + OutputType::Branch | OutputType::Change => todo!("TODO"), + } + let coin = output.balance().coin; + if let std::collections::hash_map::Entry::Vacant(e) = outputs_by_coin.entry(coin) { + e.insert(Db::::outputs(txn, *key, coin).unwrap()); + } + outputs_by_coin.get_mut(&coin).unwrap().push(output.clone()); + } + + // Flush them to the database + for (coin, outputs) in outputs_by_coin { + Db::::set_outputs(txn, *key, coin, &outputs); + } } let mut fee_rates: HashMap = todo!("TODO"); + // Fulfill the payments we prior couldn't + let mut eventualities = HashMap::new(); + for (key, _stage) in active_keys { + eventualities.insert( + key.to_bytes().as_ref().to_vec(), + self.handle_queued_payments(txn, active_keys, *key), + ); + } + + // TODO: If this key has been flushed, forward all outputs + // Create the transactions for the forwards/burns { let mut planned_txs = vec![]; @@ -137,20 +306,14 @@ impl< planned_txs.push((key, plan)); } - let mut eventualities = HashMap::new(); for (key, planned_tx) in planned_txs { // Send the transactions off for signing - TransactionsToSign::::send(txn, &key, &planned_tx.signable); + TransactionsToSign::::send(txn, &key, &planned_tx.signable); - // Insert the eventualities into the result - eventualities - .entry(key.to_bytes().as_ref().to_vec()) - .or_insert(Vec::with_capacity(1)) - .push(planned_tx.eventuality); + // Insert the Eventualities into the result + eventualities[key.to_bytes().as_ref()].push(planned_tx.eventuality); } - // TODO: Fulfill any payments we prior couldn't - eventualities } } @@ -159,13 +322,29 @@ impl< &mut self, txn: &mut impl DbTxn, active_keys: &[(KeyFor, LifetimeStage)], - payments: Vec>>, + mut payments: Vec>>, ) -> HashMap, Vec>> { - // TODO: Find the key to use for fulfillment - // TODO: Sort outputs and payments by amount - // TODO: For as long as we don't have sufficiently aggregated inputs to handle all payments, - // aggregate - // TODO: Create the tree for the payments - todo!("TODO") + // Find the key to filfill these payments with + let fulfillment_key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + // Queue the payments for this key + for coin in S::NETWORK.coins() { + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); + queued_payments + .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); + } + + // Handle the queued payments + HashMap::from([( + fulfillment_key.to_bytes().as_ref().to_vec(), + self.handle_queued_payments(txn, active_keys, fulfillment_key), + )]) } } From 408420e3972039671425e2dcc55fabcdac36dae8 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 3 Sep 2024 16:42:47 -0400 Subject: [PATCH 066/179] Ensure the transaction-chaining scheduler doesn't accumulate the same output multiple times --- .../utxo/transaction-chaining/src/db.rs | 16 ++++++++++++++++ .../utxo/transaction-chaining/src/lib.rs | 17 +++++++++++++---- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs index 7d8007182..d629480f1 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/db.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -13,6 +13,7 @@ create_db! { TransactionChainingScheduler { OperatingCosts: (coin: Coin) -> Amount, SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, + AlreadyAccumulatedOutput: (id: &[u8]) -> (), // We should be immediately able to schedule the fulfillment of payments, yet this may not be // possible if we're in the middle of a multisig rotation (as our output set will be split) SerializedQueuedPayments: (key: &[u8], coin: Coin) -> Vec, @@ -58,6 +59,21 @@ impl Db { SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); } + pub(crate) fn set_already_accumulated_output( + txn: &mut impl DbTxn, + output: as ReceivedOutput, AddressFor>>::Id, + ) { + AlreadyAccumulatedOutput::set(txn, output.as_ref(), &()); + } + pub(crate) fn take_if_already_accumulated_output( + txn: &mut impl DbTxn, + output: as ReceivedOutput, AddressFor>>::Id, + ) -> bool { + let res = AlreadyAccumulatedOutput::get(txn, output.as_ref()).is_some(); + AlreadyAccumulatedOutput::del(txn, output.as_ref()); + res + } + pub(crate) fn queued_payments( getter: &impl Get, key: KeyFor, diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 9e552c133..f74e2c2c4 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -60,7 +60,9 @@ impl>> Sched // that'd risk underflow let available = operating_costs + outputs.iter().map(|output| output.balance().amount.0).sum::(); - assert!(available >= payments.iter().map(|payment| payment.balance().amount.0).sum::()); + assert!( + available >= payments.iter().map(|payment| payment.balance().amount.0).sum::() + ); } let amount_of_payments_that_can_be_handled = @@ -179,6 +181,9 @@ impl>> Sched // Only handle Change so if someone burns to an External address, we don't use it here // when the scanner will tell us to return it (without accumulating it) effected_received_outputs.retain(|output| output.kind() == OutputType::Change); + for output in &effected_received_outputs { + Db::::set_already_accumulated_output(txn, output.id()); + } outputs.append(&mut effected_received_outputs); } @@ -236,9 +241,13 @@ impl>> Sched let mut outputs_by_coin = HashMap::with_capacity(1); for output in update.outputs().iter().filter(|output| output.key() == *key) { match output.kind() { - OutputType::External | OutputType::Forwarded => {}, - // TODO: Only accumulate these if we haven't already, but do accumulate if not - OutputType::Branch | OutputType::Change => todo!("TODO"), + OutputType::External | OutputType::Forwarded => {} + // Only accumulate these if we haven't already + OutputType::Branch | OutputType::Change => { + if Db::::take_if_already_accumulated_output(txn, output.id()) { + continue; + } + } } let coin = output.balance().coin; if let std::collections::hash_map::Entry::Vacant(e) = outputs_by_coin.entry(coin) { From fe889f941f44d56a12120379127fc8833ffcb1b7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 3 Sep 2024 18:51:27 -0400 Subject: [PATCH 067/179] Work on the tree logic in the transaction-chaining scheduler --- .../scheduler/utxo/primitives/src/lib.rs | 26 +- .../utxo/transaction-chaining/src/lib.rs | 272 +++++++++++------- 2 files changed, 191 insertions(+), 107 deletions(-) diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index f3e220b0b..af8b985f9 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -79,7 +79,8 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// /// `operating_costs` is accrued to if Serai faces the burden of a fee or drops inputs not worth /// accumulating. `operating_costs` will be amortized along with this transaction's fee as - /// possible. Please see `spec/processor/UTXO Management.md` for more information. + /// possible, if there is a change output. Please see `spec/processor/UTXO Management.md` for + /// more information. /// /// Returns `None` if the fee exceeded the inputs, or `Some` otherwise. fn plan_transaction_with_fee_amortization( @@ -89,6 +90,12 @@ pub trait TransactionPlanner: 'static + Send + Sync { mut payments: Vec>>, mut change: Option>, ) -> Option> { + // If there's no change output, we can't recoup any operating costs we would amortize + // We also don't have any losses if the inputs are written off/the change output is reduced + let mut operating_costs_if_no_change = 0; + let operating_costs_in_effect = + if change.is_none() { &mut operating_costs_if_no_change } else { operating_costs }; + // Sanity checks { assert!(!inputs.is_empty()); @@ -101,7 +108,8 @@ pub trait TransactionPlanner: 'static + Send + Sync { assert_eq!(coin, payment.balance().coin); } assert!( - (inputs.iter().map(|input| input.balance().amount.0).sum::() + *operating_costs) >= + (inputs.iter().map(|input| input.balance().amount.0).sum::() + + *operating_costs_in_effect) >= payments.iter().map(|payment| payment.balance().amount.0).sum::(), "attempted to fulfill payments without a sufficient input set" ); @@ -119,7 +127,7 @@ pub trait TransactionPlanner: 'static + Send + Sync { while !payments.is_empty() { // We need to pay the fee, and any accrued operating costs, minus what we've already // amortized - let adjusted_fee = (*operating_costs + fee).saturating_sub(amortized); + let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); /* Ideally, we wouldn't use a ceil div yet would be accurate about it. Any remainder could @@ -154,16 +162,16 @@ pub trait TransactionPlanner: 'static + Send + Sync { // dust if inputs < (fee + S::dust(coin).0) { // Write off these inputs - *operating_costs += inputs; + *operating_costs_in_effect += inputs; // Yet also claw back the payments we dropped, as we only lost the change // The dropped payments will be worth less than the inputs + operating_costs we started // with, so this shouldn't use `saturating_sub` - *operating_costs -= amortized; + *operating_costs_in_effect -= amortized; None?; } } else { // Since we have payments which can pay the fee we ended up with, amortize it - let adjusted_fee = (*operating_costs + fee).saturating_sub(amortized); + let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); let per_payment_base_fee = adjusted_fee / u64::try_from(payments.len()).unwrap(); let payments_paying_one_atomic_unit_more = usize::try_from(adjusted_fee % u64::try_from(payments.len()).unwrap()).unwrap(); @@ -174,7 +182,7 @@ pub trait TransactionPlanner: 'static + Send + Sync { payment.balance().amount.0 -= per_payment_fee; amortized += per_payment_fee; } - assert!(amortized >= (*operating_costs + fee)); + assert!(amortized >= (*operating_costs_in_effect + fee)); // If the change is less than the dust, drop it let would_be_change = inputs.iter().map(|input| input.balance().amount.0).sum::() - @@ -182,12 +190,12 @@ pub trait TransactionPlanner: 'static + Send + Sync { fee; if would_be_change < S::dust(coin).0 { change = None; - *operating_costs += would_be_change; + *operating_costs_in_effect += would_be_change; } } // Update the amount of operating costs - *operating_costs = (*operating_costs + fee).saturating_sub(amortized); + *operating_costs_in_effect = (*operating_costs_in_effect + fee).saturating_sub(amortized); } // Because we amortized, or accrued as operating costs, the fee, make the transaction diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index f74e2c2c4..8e567e14e 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -7,7 +7,7 @@ use std::collections::HashMap; use group::GroupEncoding; -use serai_primitives::{Coin, Amount}; +use serai_primitives::{Coin, Amount, Balance}; use serai_db::DbTxn; @@ -41,12 +41,56 @@ impl>> Sched ) -> Vec> { let mut eventualities = vec![]; + let mut accumulate_outputs = |txn, outputs: Vec>| { + let mut outputs_by_key = HashMap::new(); + for output in outputs { + Db::::set_already_accumulated_output(txn, output.id()); + let coin = output.balance().coin; + outputs_by_key + .entry((output.key().to_bytes().as_ref().to_vec(), coin)) + .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) + .1 + .push(output); + } + for ((_key_vec, coin), (key, outputs)) in outputs_by_key { + Db::::set_outputs(txn, key, coin, &outputs); + } + }; + for coin in S::NETWORK.coins() { // Fetch our operating costs and all our outputs let mut operating_costs = Db::::operating_costs(txn, *coin).0; let mut outputs = Db::::outputs(txn, key, *coin).unwrap(); - // Fetch the queued payments + // If we have more than the maximum amount of inputs, aggregate until we don't + { + while outputs.len() > MAX_INPUTS { + let Some(planned) = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + fee_rates[coin], + outputs.drain(.. MAX_INPUTS).collect::>(), + vec![], + Some(key_for_change), + ) else { + // We amortized all payments, and even when just trying to make the change output, these + // inputs couldn't afford their own aggregation and were written off + Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); + continue; + }; + + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned.signable); + // Push the Eventualities onto the result + eventualities.push(planned.eventuality); + // Accumulate the outputs + Db::set_outputs(txn, key, *coin, &outputs); + accumulate_outputs(txn, planned.auxilliary.0); + outputs = Db::outputs(txn, key, *coin).unwrap(); + } + Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); + } + + // Now, handle the payments let mut payments = Db::::queued_payments(txn, key, *coin).unwrap(); if payments.is_empty() { continue; @@ -55,21 +99,24 @@ impl>> Sched // If this is our only key, our outputs and operating costs should be greater than the // payments' value if active_keys.len() == 1 { - // The available amount of fulfill is the amount we have plus the amount we'll reduce by + // The available amount to fulfill is the amount we have plus the amount we'll reduce by // An alternative formulation would be `outputs >= (payments - operating costs)`, but // that'd risk underflow - let available = + let value_available = operating_costs + outputs.iter().map(|output| output.balance().amount.0).sum::(); + assert!( - available >= payments.iter().map(|payment| payment.balance().amount.0).sum::() + value_available >= payments.iter().map(|payment| payment.balance().amount.0).sum::() ); } - let amount_of_payments_that_can_be_handled = - |operating_costs: u64, outputs: &[_], payments: &[_]| { - let value_available = - operating_costs + outputs.iter().map(|output| output.balance().amount.0).sum::(); + // Find the set of payments we should fulfill at this time + loop { + let value_available = + operating_costs + outputs.iter().map(|output| output.balance().amount.0).sum::(); + // Drop to just the payments we currently have the outputs for + { let mut can_handle = 0; let mut value_used = 0; for payment in payments { @@ -80,15 +127,6 @@ impl>> Sched can_handle += 1; } - can_handle - }; - - // Find the set of payments we should fulfill at this time - { - // Drop to just the payments we currently have the outputs for - { - let can_handle = - amount_of_payments_that_can_be_handled(operating_costs, &outputs, &payments); let remaining_payments = payments.drain(can_handle ..).collect::>(); // Restore the rest to the database Db::::set_queued_payments(txn, key, *coin, &remaining_payments); @@ -99,96 +137,132 @@ impl>> Sched if payments_value <= operating_costs { operating_costs -= payments_value; Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); - return vec![]; - } - - // We explicitly sort AFTER deciding which payments to handle so we always handle the - // oldest queued payments first (preventing any from eternally being shuffled to the back - // of the line) - payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0)); - } - assert!(!payments.is_empty()); - // Find the smallest set of outputs usable to fulfill these outputs - // Size is determined by the largest output, not quantity nor aggregate value - { - // We start by sorting low to high - outputs.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0)); - - let value_needed = - payments.iter().map(|payment| payment.balance().amount.0).sum::() - operating_costs; - - let mut needed = 0; - let mut value_present = 0; - for output in &outputs { - needed += 1; - value_present += output.balance().amount.0; - if value_present >= value_needed { + // Reset payments to the queued payments + payments = Db::::queued_payments(txn, key, *coin).unwrap(); + // If there's no more payments, stop looking for which payments we should fulfill + if payments.is_empty() { break; } + + // Find which of these we should handle + continue; } - // Drain, and save back to the DB, the unnecessary outputs - let remaining_outputs = outputs.drain(needed ..).collect::>(); - Db::::set_outputs(txn, key, *coin, &remaining_outputs); + break; + } + if payments.is_empty() { + continue; + } + + // Create a tree to fulfill all of the payments + struct TreeTransaction { + payments: Vec>>, + children: Vec>, + value: u64, + } + let mut tree_transactions = vec![]; + for payments in payments.chunks(MAX_OUTPUTS) { + let value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + tree_transactions.push(TreeTransaction:: { + payments: payments.to_vec(), + children: vec![], + value, + }); } - assert!(!outputs.is_empty()); - - // We now have the current operating costs, the outputs we're using, and the payments - // The database has the unused outputs/unfilfillable payments - // Actually plan/send off the transactions - - // While our set of outputs exceed the input limit, aggregate them - while outputs.len() > MAX_INPUTS { - let outputs_chunk = outputs.drain(.. MAX_INPUTS).collect::>(); - - // While we're aggregating these outputs, handle any payments we can - let payments_chunk = loop { - let can_handle = - amount_of_payments_that_can_be_handled(operating_costs, &outputs, &payments); - let payments_chunk = payments.drain(.. can_handle.min(MAX_OUTPUTS)).collect::>(); - - let payments_value = - payments_chunk.iter().map(|payment| payment.balance().amount.0).sum::(); - if payments_value <= operating_costs { - operating_costs -= payments_value; + // While we haven't calculated a tree root, or the tree root doesn't support a change output, + // keep working + while (tree_transactions.len() != 1) || (tree_transactions[0].payments.len() == MAX_OUTPUTS) { + let mut next_tree_transactions = vec![]; + for children in tree_transactions.chunks(MAX_OUTPUTS) { + let payments = children + .iter() + .map(|child| { + Payment::new( + P::branch_address(key), + Balance { coin: *coin, amount: Amount(child.value) }, + None, + ) + }) + .collect(); + let value = children.iter().map(|child| child.value).sum(); + next_tree_transactions.push(TreeTransaction { + payments, + children: children.to_vec(), + value, + }); + } + tree_transactions = next_tree_transactions; + } + assert_eq!(tree_transactions.len(), 1); + assert!((tree_transactions.payments.len() + 1) <= MAX_OUTPUTS); + + // Create the transaction for the root of the tree + let Some(planned) = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + fee_rates[coin], + outputs, + tree_transactions.payments, + Some(key_for_change), + ) else { + Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); + continue; + }; + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + // We accumulate the change output, but consume the branches here + accumulate_outputs( + txn, + planned + .auxilliary + .0 + .iter() + .filter(|output| output.kind() == OutputType::Change) + .cloned() + .collect(), + ); + // Filter the outputs to the change outputs + let mut branch_outputs = planned.auxilliary.0; + branch_outputs.retain(|output| output.kind() == OutputType::Branch); + + // This is recursive, yet only recurses with logarithmic depth + let execute_tree_transaction = |branch_outputs, children| { + assert_eq!(branch_outputs.len(), children.len()); + + // Sort the branch outputs by their value + branch_outputs.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0)); + // Find the child for each branch output + // This is only done within a transaction, not across the layer, so we don't have branches + // created in transactions with less outputs (and therefore less fees) jump places with + // other branches + children.sort_by(|a, b| a.value.cmp(&b.value)); + + for (branch_output, child) in branch_outputs.into_iter().zip(children) { + assert_eq!(branch_output.kind(), OutputType::Branch); + Db::::set_already_accumulated_output(txn, branch_output.id()); + + let Some(planned) = P::plan_transaction_with_fee_amortization( + // Uses 0 as there's no operating costs to incur/amortize here + &mut 0, + fee_rates[coin], + vec![branch_output], + child.payments, + None, + ) else { + // This Branch isn't viable, so drop it (and its children) continue; + }; + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + if !child.children.is_empty() { + execute_tree_transaction(planned.auxilliary.0, child.children); } - break payments_chunk; - }; - - let Some(planned) = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - fee_rates[coin], - outputs_chunk, - payments_chunk, - // We always use our key for the change here since we may need this change output to - // finish fulfilling these payments - Some(key), - ) else { - // We amortized all payments, and even when just trying to make the change output, these - // inputs couldn't afford their own aggregation and were written off - continue; - }; - - // Send the transactions off for signing - TransactionsToSign::::send(txn, &key, &planned.signable); - - // Push the Eventualities onto the result - eventualities.push(planned.eventuality); - - let mut effected_received_outputs = planned.auxilliary.0; - // Only handle Change so if someone burns to an External address, we don't use it here - // when the scanner will tell us to return it (without accumulating it) - effected_received_outputs.retain(|output| output.kind() == OutputType::Change); - for output in &effected_received_outputs { - Db::::set_already_accumulated_output(txn, output.id()); } - outputs.append(&mut effected_received_outputs); + }; + if !tree_transaction.children.is_empty() { + execute_tree_transaction(branch_outputs, tree_transaction.children); } - - // Now that we have an aggregated set of inputs, create the tree for payments - todo!("TODO"); } eventualities @@ -288,6 +362,7 @@ impl>> Sched let Some(plan) = P::plan_transaction_with_fee_amortization( // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be forwarded, we simply drop it &mut 0, fee_rates[&forward.balance().coin], vec![forward.clone()], @@ -304,6 +379,7 @@ impl>> Sched Payment::new(to_return.address().clone(), to_return.output().balance(), None); let Some(plan) = P::plan_transaction_with_fee_amortization( // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be returned, we simply drop it &mut 0, fee_rates[&out_instruction.balance().coin], vec![to_return.output().clone()], From ea519adeba731c1e9d09f0fe2cc34b84c775e36a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 3 Sep 2024 19:33:38 -0400 Subject: [PATCH 068/179] Near-complete version of the tree algorithm in the transaction-chaining scheduler --- processor/scanner/src/lib.rs | 2 + .../scheduler/utxo/primitives/src/lib.rs | 5 + .../utxo/transaction-chaining/src/lib.rs | 175 +++++++++++++----- 3 files changed, 137 insertions(+), 45 deletions(-) diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 539bd4a7d..1818fbf0c 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -277,6 +277,7 @@ pub trait Scheduler: 'static + Send { fn update( &mut self, txn: &mut impl DbTxn, + block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], update: SchedulerUpdate, ) -> HashMap, Vec>>; @@ -316,6 +317,7 @@ pub trait Scheduler: 'static + Send { fn fulfill( &mut self, txn: &mut impl DbTxn, + block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], payments: Vec>>, ) -> HashMap, Vec>>; diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index af8b985f9..356192ee5 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -35,6 +35,11 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// The type representing a signable transaction. type SignableTransaction: SignableTransaction; + /// The maximum amount of inputs allowed in a transaction. + const MAX_INPUTS: usize; + /// The maximum amount of outputs allowed in a transaction, including the change output. + const MAX_OUTPUTS: usize; + /// Obtain the fee rate to pay. /// /// This must be constant to the finalized block referenced by this block number and the coin. diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 8e567e14e..31c70c1e8 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -37,6 +37,7 @@ impl>> Sched &mut self, txn: &mut impl DbTxn, active_keys: &[(KeyFor, LifetimeStage)], + fee_rates: &HashMap, key: KeyFor, ) -> Vec> { let mut eventualities = vec![]; @@ -64,11 +65,11 @@ impl>> Sched // If we have more than the maximum amount of inputs, aggregate until we don't { - while outputs.len() > MAX_INPUTS { + while outputs.len() > P::MAX_INPUTS { let Some(planned) = P::plan_transaction_with_fee_amortization( &mut operating_costs, fee_rates[coin], - outputs.drain(.. MAX_INPUTS).collect::>(), + outputs.drain(.. P::MAX_INPUTS).collect::>(), vec![], Some(key_for_change), ) else { @@ -156,13 +157,14 @@ impl>> Sched } // Create a tree to fulfill all of the payments + #[derive(Clone)] struct TreeTransaction { payments: Vec>>, children: Vec>, value: u64, } let mut tree_transactions = vec![]; - for payments in payments.chunks(MAX_OUTPUTS) { + for payments in payments.chunks(P::MAX_OUTPUTS) { let value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); tree_transactions.push(TreeTransaction:: { payments: payments.to_vec(), @@ -172,9 +174,21 @@ impl>> Sched } // While we haven't calculated a tree root, or the tree root doesn't support a change output, // keep working - while (tree_transactions.len() != 1) || (tree_transactions[0].payments.len() == MAX_OUTPUTS) { + while (tree_transactions.len() != 1) || + (tree_transactions[0].payments.len() == P::MAX_OUTPUTS) + { let mut next_tree_transactions = vec![]; - for children in tree_transactions.chunks(MAX_OUTPUTS) { + for children in tree_transactions.chunks(P::MAX_OUTPUTS) { + // If this is the last chunk, and it doesn't need to accumulated, continue + if (children.len() < P::MAX_OUTPUTS) && + ((next_tree_transactions.len() + children.len()) < P::MAX_OUTPUTS) + { + for child in children { + next_tree_transactions.push(child.clone()); + } + continue; + } + let payments = children .iter() .map(|child| { @@ -194,40 +208,19 @@ impl>> Sched } tree_transactions = next_tree_transactions; } - assert_eq!(tree_transactions.len(), 1); - assert!((tree_transactions.payments.len() + 1) <= MAX_OUTPUTS); - - // Create the transaction for the root of the tree - let Some(planned) = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - fee_rates[coin], - outputs, - tree_transactions.payments, - Some(key_for_change), - ) else { - Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); - continue; - }; - TransactionsToSign::::send(txn, &key, &planned.signable); - eventualities.push(planned.eventuality); - - // We accumulate the change output, but consume the branches here - accumulate_outputs( - txn, - planned - .auxilliary - .0 - .iter() - .filter(|output| output.kind() == OutputType::Change) - .cloned() - .collect(), - ); - // Filter the outputs to the change outputs - let mut branch_outputs = planned.auxilliary.0; - branch_outputs.retain(|output| output.kind() == OutputType::Branch); // This is recursive, yet only recurses with logarithmic depth - let execute_tree_transaction = |branch_outputs, children| { + fn execute_tree_transaction< + S: ScannerFeed, + P: TransactionPlanner>, + >( + txn: &mut impl DbTxn, + fee_rate: P::FeeRate, + eventualities: &mut Vec>, + key: KeyFor, + mut branch_outputs: Vec>, + mut children: Vec>, + ) { assert_eq!(branch_outputs.len(), children.len()); // Sort the branch outputs by their value @@ -238,14 +231,57 @@ impl>> Sched // other branches children.sort_by(|a, b| a.value.cmp(&b.value)); - for (branch_output, child) in branch_outputs.into_iter().zip(children) { + for (branch_output, mut child) in branch_outputs.into_iter().zip(children) { assert_eq!(branch_output.kind(), OutputType::Branch); Db::::set_already_accumulated_output(txn, branch_output.id()); + // We need to compensate for the value of this output being less than the value of the + // payments + { + let fee_to_amortize = child.value - branch_output.balance().amount.0; + let mut amortized = 0; + 'outer: while (!child.payments.is_empty()) && (amortized < fee_to_amortize) { + let adjusted_fee = fee_to_amortize - amortized; + let payments_len = u64::try_from(child.payments.len()).unwrap(); + let per_payment_fee_check = adjusted_fee.div_ceil(payments_len); + + let mut i = 0; + while i < child.payments.len() { + let amount = child.payments[i].balance().amount.0; + if amount <= per_payment_fee_check { + child.payments.swap_remove(i); + child.children.swap_remove(i); + amortized += amount; + continue 'outer; + } + i += 1; + } + + // Since all payments can pay the fee, deduct accordingly + for (i, payment) in child.payments.iter_mut().enumerate() { + let Balance { coin, amount } = payment.balance(); + let mut amount = amount.0; + amount -= adjusted_fee / payments_len; + if i < usize::try_from(adjusted_fee % payments_len).unwrap() { + amount -= 1; + } + + *payment = Payment::new( + payment.address().clone(), + Balance { coin, amount: Amount(amount) }, + None, + ); + } + } + if child.payments.is_empty() { + continue; + } + } + let Some(planned) = P::plan_transaction_with_fee_amortization( // Uses 0 as there's no operating costs to incur/amortize here &mut 0, - fee_rates[coin], + fee_rate, vec![branch_output], child.payments, None, @@ -256,12 +292,59 @@ impl>> Sched TransactionsToSign::::send(txn, &key, &planned.signable); eventualities.push(planned.eventuality); if !child.children.is_empty() { - execute_tree_transaction(planned.auxilliary.0, child.children); + execute_tree_transaction::( + txn, + fee_rate, + eventualities, + key, + planned.auxilliary.0, + child.children, + ); } } + } + + assert_eq!(tree_transactions.len(), 1); + assert!((tree_transactions[0].payments.len() + 1) <= P::MAX_OUTPUTS); + + // Create the transaction for the root of the tree + let Some(planned) = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + fee_rates[coin], + outputs, + tree_transactions[0].payments, + Some(key_for_change), + ) else { + Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); + continue; }; - if !tree_transaction.children.is_empty() { - execute_tree_transaction(branch_outputs, tree_transaction.children); + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + // We accumulate the change output, but consume the branches here + accumulate_outputs( + txn, + planned + .auxilliary + .0 + .iter() + .filter(|output| output.kind() == OutputType::Change) + .cloned() + .collect(), + ); + // Filter the outputs to the change outputs + let mut branch_outputs = planned.auxilliary.0; + branch_outputs.retain(|output| output.kind() == OutputType::Branch); + + if !tree_transactions[0].children.is_empty() { + execute_tree_transaction::( + txn, + fee_rates[coin], + &mut eventualities, + key, + branch_outputs, + tree_transactions[0].children, + ); } } @@ -306,6 +389,7 @@ impl>> Sched fn update( &mut self, txn: &mut impl DbTxn, + block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], update: SchedulerUpdate, ) -> HashMap, Vec>> { @@ -336,14 +420,14 @@ impl>> Sched } } - let mut fee_rates: HashMap = todo!("TODO"); + let fee_rates = block.fee_rates(); // Fulfill the payments we prior couldn't let mut eventualities = HashMap::new(); for (key, _stage) in active_keys { eventualities.insert( key.to_bytes().as_ref().to_vec(), - self.handle_queued_payments(txn, active_keys, *key), + self.handle_queued_payments(txn, active_keys, fee_rates, *key), ); } @@ -406,6 +490,7 @@ impl>> Sched fn fulfill( &mut self, txn: &mut impl DbTxn, + block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], mut payments: Vec>>, ) -> HashMap, Vec>> { @@ -429,7 +514,7 @@ impl>> Sched // Handle the queued payments HashMap::from([( fulfillment_key.to_bytes().as_ref().to_vec(), - self.handle_queued_payments(txn, active_keys, fulfillment_key), + self.handle_queued_payments(txn, active_keys, block.fee_rates(), fulfillment_key), )]) } } From db9df8a41623c8c5e310e22e4fdc0b6d22d93b49 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 4 Sep 2024 01:44:21 -0400 Subject: [PATCH 069/179] Finish the tree logic in the transaction-chaining scheduler Also completes the DB functions, makes Scheduler never instantiated, and ensures tree roots have change outputs. --- processor/primitives/src/payment.rs | 21 + processor/scanner/src/eventuality/mod.rs | 28 +- processor/scanner/src/lib.rs | 21 +- .../scheduler/utxo/primitives/src/lib.rs | 18 +- .../utxo/transaction-chaining/src/db.rs | 21 +- .../utxo/transaction-chaining/src/lib.rs | 694 ++++++++++-------- 6 files changed, 456 insertions(+), 347 deletions(-) diff --git a/processor/primitives/src/payment.rs b/processor/primitives/src/payment.rs index 1bbb06048..bf3c918c1 100644 --- a/processor/primitives/src/payment.rs +++ b/processor/primitives/src/payment.rs @@ -1,3 +1,7 @@ +use std::io; + +use scale::{Encode, Decode, IoReader}; + use serai_primitives::{Balance, Data}; use serai_coins_primitives::OutInstructionWithBalance; @@ -27,6 +31,7 @@ impl Payment { pub fn new(address: A, balance: Balance, data: Option>) -> Self { Payment { address, balance, data } } + /// The address to pay. pub fn address(&self) -> &A { &self.address @@ -39,4 +44,20 @@ impl Payment { pub fn data(&self) -> &Option> { &self.data } + + /// Read a Payment. + pub fn read(reader: &mut impl io::Read) -> io::Result { + let address = A::read(reader)?; + let reader = &mut IoReader(reader); + let balance = Balance::decode(reader).map_err(io::Error::other)?; + let data = Option::>::decode(reader).map_err(io::Error::other)?; + Ok(Self { address, balance, data }) + } + /// Write the Payment. + pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.address.write(writer).unwrap(); + self.balance.encode_to(writer); + self.data.encode_to(writer); + Ok(()) + } } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 5a7b4cca0..84670f79c 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -1,3 +1,4 @@ +use core::marker::PhantomData; use std::collections::{HashSet, HashMap}; use group::GroupEncoding; @@ -101,11 +102,11 @@ fn intake_eventualities( pub(crate) struct EventualityTask> { db: D, feed: S, - scheduler: Sch, + scheduler: PhantomData, } impl> EventualityTask { - pub(crate) fn new(mut db: D, feed: S, scheduler: Sch, start_block: u64) -> Self { + pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self { if EventualityDb::::next_to_check_for_eventualities_block(&db).is_none() { // Initialize the DB let mut txn = db.txn(); @@ -113,7 +114,7 @@ impl> EventualityTask { txn.commit(); } - Self { db, feed, scheduler } + Self { db, feed, scheduler: PhantomData } } #[allow(clippy::type_complexity)] @@ -146,7 +147,7 @@ impl> EventualityTask { } // Returns a boolean of if we intaked any Burns. - fn intake_burns(&mut self) -> bool { + async fn intake_burns(&mut self) -> Result { let mut intaked_any = false; // If we've handled an notable block, we may have Burns being queued with it as the reference @@ -158,6 +159,8 @@ impl> EventualityTask { // others the new key let (_keys, keys_with_stages) = self.keys_and_keys_with_stages(latest_handled_notable_block); + let block = self.feed.block_by_number(&self.db, latest_handled_notable_block).await?; + let mut txn = self.db.txn(); // Drain the entire channel while let Some(burns) = @@ -165,8 +168,9 @@ impl> EventualityTask { { intaked_any = true; - let new_eventualities = self.scheduler.fulfill( + let new_eventualities = Sch::fulfill( &mut txn, + &block, &keys_with_stages, burns .into_iter() @@ -178,7 +182,7 @@ impl> EventualityTask { txn.commit(); } - intaked_any + Ok(intaked_any) } } @@ -197,7 +201,7 @@ impl> ContinuallyRan for EventualityTas // Start by intaking any Burns we have sitting around // It's important we run this regardless of if we have a new block to handle - made_progress |= self.intake_burns(); + made_progress |= self.intake_burns().await?; /* Eventualities increase upon one of two cases: @@ -253,7 +257,7 @@ impl> ContinuallyRan for EventualityTas // state will be for the newer block) #[allow(unused_assignments)] { - made_progress |= self.intake_burns(); + made_progress |= self.intake_burns().await?; } } @@ -278,7 +282,7 @@ impl> ContinuallyRan for EventualityTas for key in &keys { // If this is the key's activation block, activate it if key.activation_block_number == b { - self.scheduler.activate_key(&mut txn, key.key); + Sch::activate_key(&mut txn, key.key); } let completed_eventualities = { @@ -431,7 +435,7 @@ impl> ContinuallyRan for EventualityTas after a later one was already used). */ let new_eventualities = - self.scheduler.update(&mut txn, &keys_with_stages, scheduler_update); + Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update); // Intake the new Eventualities for key in new_eventualities.keys() { keys @@ -451,7 +455,7 @@ impl> ContinuallyRan for EventualityTas key.key != keys.last().unwrap().key, "key which was forwarding was the last key (which has no key after it to forward to)" ); - self.scheduler.flush_key(&mut txn, key.key, keys.last().unwrap().key); + Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key); } // Now that we've intaked any Eventualities caused, check if we're retiring any keys @@ -469,7 +473,7 @@ impl> ContinuallyRan for EventualityTas // We tell the scheduler to retire it now as we're done with it, and this fn doesn't // require it be called with a canonical order - self.scheduler.retire_key(&mut txn, key.key); + Sch::retire_key(&mut txn, key.key); } } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 1818fbf0c..8ecb731f5 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -163,6 +163,8 @@ pub type AddressFor = <::Block as Block>::Address; pub type OutputFor = <::Block as Block>::Output; /// The eventuality type for this ScannerFeed. pub type EventualityFor = <::Block as Block>::Eventuality; +/// The block type for this ScannerFeed. +pub type BlockFor = ::Block; #[async_trait::async_trait] pub trait BatchPublisher: 'static + Send + Sync { @@ -245,7 +247,7 @@ pub trait Scheduler: 'static + Send { /// /// This SHOULD setup any necessary database structures. This SHOULD NOT cause the new key to /// be used as the primary key. The multisig rotation time clearly establishes its steps. - fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor); + fn activate_key(txn: &mut impl DbTxn, key: KeyFor); /// Flush all outputs within a retiring key to the new key. /// @@ -257,14 +259,20 @@ pub trait Scheduler: 'static + Send { /// /// If the retiring key has any unfulfilled payments associated with it, those MUST be made /// the responsibility of the new key. - fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor, new_key: KeyFor); + // TODO: This needs to return a HashMap for the eventualities + fn flush_key( + txn: &mut impl DbTxn, + block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ); /// Retire a key as it'll no longer be used. /// /// Any key retired MUST NOT still have outputs associated with it. This SHOULD be a NOP other /// than any assertions and database cleanup. This MUST NOT be expected to be called in a fashion /// ordered to any other calls. - fn retire_key(&mut self, txn: &mut impl DbTxn, key: KeyFor); + fn retire_key(txn: &mut impl DbTxn, key: KeyFor); /// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for. /// @@ -275,7 +283,6 @@ pub trait Scheduler: 'static + Send { /// The `Vec` used as the key in the returned HashMap should be the encoded key the /// Eventualities are for. fn update( - &mut self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], @@ -315,7 +322,6 @@ pub trait Scheduler: 'static + Send { has an output-to-Serai, the new primary output). */ fn fulfill( - &mut self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], @@ -333,18 +339,17 @@ impl Scanner { /// Create a new scanner. /// /// This will begin its execution, spawning several asynchronous tasks. - pub async fn new( + pub async fn new>( db: impl Db, feed: S, batch_publisher: impl BatchPublisher, - scheduler: impl Scheduler, start_block: u64, ) -> Self { let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); let report_task = report::ReportTask::<_, S, _>::new(db.clone(), batch_publisher, start_block); let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone()); - let eventuality_task = eventuality::EventualityTask::new(db, feed, scheduler, start_block); + let eventuality_task = eventuality::EventualityTask::<_, _, Sch>::new(db, feed, start_block); let (_index_handle, index_run) = RunNowHandle::new(); let (scan_handle, scan_run) = RunNowHandle::new(); diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index 356192ee5..81d5ebd70 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -2,12 +2,10 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] -use core::fmt::Debug; - use serai_primitives::{Coin, Amount}; use primitives::{ReceivedOutput, Payment}; -use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor}; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor}; use scheduler_primitives::*; /// A planned transaction. @@ -23,12 +21,6 @@ pub struct PlannedTransaction { /// An object able to plan a transaction. #[async_trait::async_trait] pub trait TransactionPlanner: 'static + Send + Sync { - /// An error encountered when determining the fee rate. - /// - /// This MUST be an ephemeral error. Retrying fetching data from the blockchain MUST eventually - /// resolve without manual intervention/changing the arguments. - type EphemeralError: Debug; - /// The type representing a fee rate to use for transactions. type FeeRate: Clone + Copy; @@ -42,12 +34,8 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// Obtain the fee rate to pay. /// - /// This must be constant to the finalized block referenced by this block number and the coin. - async fn fee_rate( - &self, - block_number: u64, - coin: Coin, - ) -> Result; + /// This must be constant to the block and coin. + fn fee_rate(block: &BlockFor, coin: Coin) -> Self::FeeRate; /// The branch address for this key of Serai's. fn branch_address(key: KeyFor) -> AddressFor; diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs index d629480f1..697f1009e 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/db.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -61,13 +61,13 @@ impl Db { pub(crate) fn set_already_accumulated_output( txn: &mut impl DbTxn, - output: as ReceivedOutput, AddressFor>>::Id, + output: & as ReceivedOutput, AddressFor>>::Id, ) { AlreadyAccumulatedOutput::set(txn, output.as_ref(), &()); } pub(crate) fn take_if_already_accumulated_output( txn: &mut impl DbTxn, - output: as ReceivedOutput, AddressFor>>::Id, + output: & as ReceivedOutput, AddressFor>>::Id, ) -> bool { let res = AlreadyAccumulatedOutput::get(txn, output.as_ref()).is_some(); AlreadyAccumulatedOutput::del(txn, output.as_ref()); @@ -79,15 +79,26 @@ impl Db { key: KeyFor, coin: Coin, ) -> Option>>> { - todo!("TODO") + let buf = SerializedQueuedPayments::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(Payment::read(&mut buf).unwrap()); + } + Some(res) } pub(crate) fn set_queued_payments( txn: &mut impl DbTxn, key: KeyFor, coin: Coin, - queued: &Vec>>, + queued: &[Payment>], ) { - todo!("TODO") + let mut buf = Vec::with_capacity(queued.len() * 128); + for queued in queued { + queued.write(&mut buf).unwrap(); + } + SerializedQueuedPayments::set(txn, key.to_bytes().as_ref(), coin, &buf); } pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { SerializedQueuedPayments::del(txn, key.to_bytes().as_ref(), coin); diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 31c70c1e8..7359a87cc 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -13,8 +13,8 @@ use serai_db::DbTxn; use primitives::{OutputType, ReceivedOutput, Payment}; use scanner::{ - LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, SchedulerUpdate, - Scheduler as SchedulerTrait, + LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor, + SchedulerUpdate, Scheduler as SchedulerTrait, }; use scheduler_primitives::*; use utxo_scheduler_primitives::*; @@ -22,6 +22,114 @@ use utxo_scheduler_primitives::*; mod db; use db::Db; +#[derive(Clone)] +enum TreeTransaction { + Leaves { payments: Vec>>, value: u64 }, + Branch { children: Vec, value: u64 }, +} +impl TreeTransaction { + fn children(&self) -> usize { + match self { + Self::Leaves { payments, .. } => payments.len(), + Self::Branch { children, .. } => children.len(), + } + } + fn value(&self) -> u64 { + match self { + Self::Leaves { value, .. } | Self::Branch { value, .. } => *value, + } + } + fn payments( + &self, + coin: Coin, + branch_address: &AddressFor, + input_value: u64, + ) -> Option>>> { + // Fetch the amounts for the payments we'll make + let mut amounts: Vec<_> = match self { + Self::Leaves { payments, .. } => { + payments.iter().map(|payment| Some(payment.balance().amount.0)).collect() + } + Self::Branch { children, .. } => children.iter().map(|child| Some(child.value())).collect(), + }; + + // We need to reduce them so their sum is our input value + assert!(input_value <= self.value()); + let amount_to_amortize = self.value() - input_value; + + // If any payments won't survive the reduction, set them to None + let mut amortized = 0; + 'outer: while amounts.iter().any(Option::is_some) && (amortized < amount_to_amortize) { + let adjusted_fee = amount_to_amortize - amortized; + let amounts_len = + u64::try_from(amounts.iter().filter(|amount| amount.is_some()).count()).unwrap(); + let per_payment_fee_check = adjusted_fee.div_ceil(amounts_len); + + // Check each amount to see if it's not viable + let mut i = 0; + while i < amounts.len() { + if let Some(amount) = amounts[i] { + if amount.saturating_sub(per_payment_fee_check) < S::dust(coin).0 { + amounts[i] = None; + amortized += amount; + // If this amount wasn't viable, re-run with the new fee/amortization amounts + continue 'outer; + } + } + i += 1; + } + + // Now that we have the payments which will survive, reduce them + for (i, amount) in amounts.iter_mut().enumerate() { + if let Some(amount) = amount { + *amount -= adjusted_fee / amounts_len; + if i < usize::try_from(adjusted_fee % amounts_len).unwrap() { + *amount -= 1; + } + } + } + break; + } + + // Now that we have the reduced amounts, create the payments + let payments: Vec<_> = match self { + Self::Leaves { payments, .. } => { + payments + .iter() + .zip(amounts) + .filter_map(|(payment, amount)| { + amount.map(|amount| { + // The existing payment, with the new amount + Payment::new( + payment.address().clone(), + Balance { coin, amount: Amount(amount) }, + payment.data().clone(), + ) + }) + }) + .collect() + } + Self::Branch { .. } => { + amounts + .into_iter() + .filter_map(|amount| { + amount.map(|amount| { + // A branch output with the new amount + Payment::new(branch_address.clone(), Balance { coin, amount: Amount(amount) }, None) + }) + }) + .collect() + } + }; + + // Use None for vec![] so we never actually use vec![] + if payments.is_empty() { + None?; + } + Some(payments) + } +} + /// The outputs which will be effected by a PlannedTransaction and received by Serai. pub struct EffectedReceivedOutputs(Vec>); @@ -33,319 +141,315 @@ pub struct Scheduler>> Scheduler { - fn handle_queued_payments( - &mut self, + fn accumulate_outputs(txn: &mut impl DbTxn, outputs: Vec>, from_scanner: bool) { + let mut outputs_by_key = HashMap::new(); + for output in outputs { + if !from_scanner { + // Since this isn't being reported by the scanner, flag it so when the scanner does report + // it, we don't accumulate it again + Db::::set_already_accumulated_output(txn, &output.id()); + } else if Db::::take_if_already_accumulated_output(txn, &output.id()) { + continue; + } + + let coin = output.balance().coin; + outputs_by_key + // Index by key and coin + .entry((output.key().to_bytes().as_ref().to_vec(), coin)) + // If we haven't accumulated here prior, read the outputs from the database + .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) + .1 + .push(output); + } + // Write the outputs back to the database + for ((_key_vec, coin), (key, outputs)) in outputs_by_key { + Db::::set_outputs(txn, key, coin, &outputs); + } + } + + fn aggregate_inputs( txn: &mut impl DbTxn, - active_keys: &[(KeyFor, LifetimeStage)], - fee_rates: &HashMap, + block: &BlockFor, + key_for_change: KeyFor, key: KeyFor, + coin: Coin, ) -> Vec> { let mut eventualities = vec![]; - let mut accumulate_outputs = |txn, outputs: Vec>| { - let mut outputs_by_key = HashMap::new(); - for output in outputs { - Db::::set_already_accumulated_output(txn, output.id()); - let coin = output.balance().coin; - outputs_by_key - .entry((output.key().to_bytes().as_ref().to_vec(), coin)) - .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) - .1 - .push(output); - } - for ((_key_vec, coin), (key, outputs)) in outputs_by_key { - Db::::set_outputs(txn, key, coin, &outputs); - } - }; + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let mut outputs = Db::::outputs(txn, key, coin).unwrap(); + while outputs.len() > P::MAX_INPUTS { + let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::>(); + Db::::set_outputs(txn, key, coin, &outputs); - for coin in S::NETWORK.coins() { - // Fetch our operating costs and all our outputs - let mut operating_costs = Db::::operating_costs(txn, *coin).0; - let mut outputs = Db::::outputs(txn, key, *coin).unwrap(); + let Some(planned) = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + to_aggregate, + vec![], + Some(key_for_change), + ) else { + continue; + }; - // If we have more than the maximum amount of inputs, aggregate until we don't - { - while outputs.len() > P::MAX_INPUTS { - let Some(planned) = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - fee_rates[coin], - outputs.drain(.. P::MAX_INPUTS).collect::>(), - vec![], - Some(key_for_change), - ) else { - // We amortized all payments, and even when just trying to make the change output, these - // inputs couldn't afford their own aggregation and were written off - Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); - continue; - }; + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + Self::accumulate_outputs(txn, planned.auxilliary.0, false); - // Send the transactions off for signing - TransactionsToSign::::send(txn, &key, &planned.signable); - // Push the Eventualities onto the result - eventualities.push(planned.eventuality); - // Accumulate the outputs - Db::set_outputs(txn, key, *coin, &outputs); - accumulate_outputs(txn, planned.auxilliary.0); - outputs = Db::outputs(txn, key, *coin).unwrap(); + // Reload the outputs for the next loop iteration + outputs = Db::::outputs(txn, key, coin).unwrap(); + } + + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + eventualities + } + + fn fulfillable_payments( + txn: &mut impl DbTxn, + operating_costs: &mut u64, + key: KeyFor, + coin: Coin, + value_of_outputs: u64, + ) -> Vec>> { + // Fetch all payments for this key + let mut payments = Db::::queued_payments(txn, key, coin).unwrap(); + if payments.is_empty() { + return vec![]; + } + + loop { + // inputs must be >= (payments - operating costs) + // Accordingly, (inputs + operating costs) must be >= payments + let value_fulfillable = value_of_outputs + *operating_costs; + + // Drop to just the payments we can currently fulfill + { + let mut can_handle = 0; + let mut value_used = 0; + for payment in &payments { + value_used += payment.balance().amount.0; + if value_fulfillable < value_used { + break; + } + can_handle += 1; } - Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); + + let remaining_payments = payments.drain(can_handle ..).collect::>(); + // Restore the rest to the database + Db::::set_queued_payments(txn, key, coin, &remaining_payments); } - // Now, handle the payments - let mut payments = Db::::queued_payments(txn, key, *coin).unwrap(); - if payments.is_empty() { + // If these payments are worth less than the operating costs, immediately drop them + let payments_value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + if payments_value <= *operating_costs { + *operating_costs -= payments_value; + Db::::set_operating_costs(txn, coin, Amount(*operating_costs)); + + // Reset payments to the queued payments + payments = Db::::queued_payments(txn, key, coin).unwrap(); + // If there's no more payments, stop looking for which payments we should fulfill + if payments.is_empty() { + return vec![]; + } + // Find which of these we should handle continue; } - // If this is our only key, our outputs and operating costs should be greater than the - // payments' value - if active_keys.len() == 1 { - // The available amount to fulfill is the amount we have plus the amount we'll reduce by - // An alternative formulation would be `outputs >= (payments - operating costs)`, but - // that'd risk underflow - let value_available = - operating_costs + outputs.iter().map(|output| output.balance().amount.0).sum::(); - - assert!( - value_available >= payments.iter().map(|payment| payment.balance().amount.0).sum::() - ); - } + return payments; + } + } - // Find the set of payments we should fulfill at this time - loop { - let value_available = - operating_costs + outputs.iter().map(|output| output.balance().amount.0).sum::(); - - // Drop to just the payments we currently have the outputs for - { - let mut can_handle = 0; - let mut value_used = 0; - for payment in payments { - value_used += payment.balance().amount.0; - if value_available < value_used { - break; - } - can_handle += 1; - } + fn step( + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + block: &BlockFor, + key: KeyFor, + ) -> Vec> { + let mut eventualities = vec![]; - let remaining_payments = payments.drain(can_handle ..).collect::>(); - // Restore the rest to the database - Db::::set_queued_payments(txn, key, *coin, &remaining_payments); - } - let payments_value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + let key_for_change = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active => active_keys[0].0, + LifetimeStage::UsingNewForChange | LifetimeStage::Forwarding | LifetimeStage::Finishing => { + active_keys[1].0 + } + }; + let branch_address = P::branch_address(key); - // If these payments are worth less than the operating costs, immediately drop them - if payments_value <= operating_costs { - operating_costs -= payments_value; - Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); + 'coin: for coin in S::NETWORK.coins() { + let coin = *coin; - // Reset payments to the queued payments - payments = Db::::queued_payments(txn, key, *coin).unwrap(); - // If there's no more payments, stop looking for which payments we should fulfill - if payments.is_empty() { - break; - } + // Perform any input aggregation we should + eventualities.append(&mut Self::aggregate_inputs(txn, block, key_for_change, key, coin)); - // Find which of these we should handle - continue; - } + // Fetch the operating costs/outputs + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, key, coin).unwrap(); - break; - } + // Fetch the fulfillable payments + let payments = Self::fulfillable_payments( + txn, + &mut operating_costs, + key, + coin, + outputs.iter().map(|output| output.balance().amount.0).sum(), + ); if payments.is_empty() { continue; } - // Create a tree to fulfill all of the payments - #[derive(Clone)] - struct TreeTransaction { - payments: Vec>>, - children: Vec>, - value: u64, + // If this is our only key, we should be able to fulfill all payments + // Else, we'd be insolvent + if active_keys.len() == 1 { + assert!(Db::::queued_payments(txn, key, coin).unwrap().is_empty()); } - let mut tree_transactions = vec![]; + + // Create a tree to fulfillthe payments + // This variable is for the current layer of the tree being built + let mut tree = Vec::with_capacity(payments.len().div_ceil(P::MAX_OUTPUTS)); + + // Push the branches for the leaves (the payments out) for payments in payments.chunks(P::MAX_OUTPUTS) { let value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); - tree_transactions.push(TreeTransaction:: { - payments: payments.to_vec(), - children: vec![], - value, - }); + tree.push(TreeTransaction::::Leaves { payments: payments.to_vec(), value }); } + // While we haven't calculated a tree root, or the tree root doesn't support a change output, // keep working - while (tree_transactions.len() != 1) || - (tree_transactions[0].payments.len() == P::MAX_OUTPUTS) - { - let mut next_tree_transactions = vec![]; - for children in tree_transactions.chunks(P::MAX_OUTPUTS) { - // If this is the last chunk, and it doesn't need to accumulated, continue - if (children.len() < P::MAX_OUTPUTS) && - ((next_tree_transactions.len() + children.len()) < P::MAX_OUTPUTS) - { - for child in children { - next_tree_transactions.push(child.clone()); - } - continue; - } - - let payments = children - .iter() - .map(|child| { - Payment::new( - P::branch_address(key), - Balance { coin: *coin, amount: Amount(child.value) }, - None, - ) - }) - .collect(); - let value = children.iter().map(|child| child.value).sum(); - next_tree_transactions.push(TreeTransaction { - payments, + while (tree.len() != 1) || (tree[0].children() == P::MAX_OUTPUTS) { + let mut branch_layer = vec![]; + for children in tree.chunks(P::MAX_OUTPUTS) { + branch_layer.push(TreeTransaction::::Branch { children: children.to_vec(), - value, + value: children.iter().map(TreeTransaction::value).sum(), }); } - tree_transactions = next_tree_transactions; + tree = branch_layer; } + assert_eq!(tree.len(), 1); + assert!((tree[0].children() + 1) <= P::MAX_OUTPUTS); + + // Create the transaction for the root of the tree + let mut branch_outputs = { + // Try creating this transaction twice, once with a change output and once with increased + // operating costs to ensure a change output (as necessary to meet the requirements of the + // scanner API) + let mut planned_outer = None; + for i in 0 .. 2 { + let Some(planned) = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + outputs.clone(), + tree[0] + .payments(coin, &branch_address, tree[0].value()) + .expect("payments were dropped despite providing an input of the needed value"), + Some(key_for_change), + ) else { + // This should trip on the first iteration or not at all + assert_eq!(i, 0); + // This doesn't have inputs even worth aggregating so drop the entire tree + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + continue 'coin; + }; - // This is recursive, yet only recurses with logarithmic depth - fn execute_tree_transaction< - S: ScannerFeed, - P: TransactionPlanner>, - >( - txn: &mut impl DbTxn, - fee_rate: P::FeeRate, - eventualities: &mut Vec>, - key: KeyFor, - mut branch_outputs: Vec>, - mut children: Vec>, - ) { - assert_eq!(branch_outputs.len(), children.len()); + // If this doesn't have a change output, increase operating costs and try again + if !planned.auxilliary.0.iter().any(|output| output.kind() == OutputType::Change) { + /* + Since we'll create a change output if it's worth at least dust, amortizing dust from + the payments should solve this. If the new transaction can't afford those operating + costs, then the payments should be amortized out, causing there to be a change or no + transaction at all. + */ + operating_costs += S::dust(coin).0; + continue; + } + + // Since this had a change output, move forward with it + planned_outer = Some(planned); + break; + } + let Some(mut planned) = planned_outer else { + panic!("couldn't create a tree root with a change output") + }; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + // We accumulate the change output, but not the branches as we'll consume them momentarily + Self::accumulate_outputs( + txn, + planned + .auxilliary + .0 + .iter() + .filter(|output| output.kind() == OutputType::Change) + .cloned() + .collect(), + false, + ); + planned.auxilliary.0.retain(|output| output.kind() == OutputType::Branch); + planned.auxilliary.0 + }; + + // Now execute each layer of the tree + tree = match tree.remove(0) { + TreeTransaction::Leaves { .. } => vec![], + TreeTransaction::Branch { children, .. } => children, + }; + while !tree.is_empty() { // Sort the branch outputs by their value - branch_outputs.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0)); - // Find the child for each branch output - // This is only done within a transaction, not across the layer, so we don't have branches - // created in transactions with less outputs (and therefore less fees) jump places with - // other branches - children.sort_by(|a, b| a.value.cmp(&b.value)); - - for (branch_output, mut child) in branch_outputs.into_iter().zip(children) { + branch_outputs.sort_by_key(|a| a.balance().amount.0); + // Sort the transactions we should create by their value so they share an order with the + // branch outputs + tree.sort_by_key(TreeTransaction::value); + + // If we dropped any Branch outputs, drop the associated children + tree.truncate(branch_outputs.len()); + assert_eq!(branch_outputs.len(), tree.len()); + + let branch_outputs_for_this_layer = branch_outputs; + let this_layer = tree; + branch_outputs = vec![]; + tree = vec![]; + + for (branch_output, tx) in branch_outputs_for_this_layer.into_iter().zip(this_layer) { assert_eq!(branch_output.kind(), OutputType::Branch); - Db::::set_already_accumulated_output(txn, branch_output.id()); - - // We need to compensate for the value of this output being less than the value of the - // payments - { - let fee_to_amortize = child.value - branch_output.balance().amount.0; - let mut amortized = 0; - 'outer: while (!child.payments.is_empty()) && (amortized < fee_to_amortize) { - let adjusted_fee = fee_to_amortize - amortized; - let payments_len = u64::try_from(child.payments.len()).unwrap(); - let per_payment_fee_check = adjusted_fee.div_ceil(payments_len); - - let mut i = 0; - while i < child.payments.len() { - let amount = child.payments[i].balance().amount.0; - if amount <= per_payment_fee_check { - child.payments.swap_remove(i); - child.children.swap_remove(i); - amortized += amount; - continue 'outer; - } - i += 1; - } - - // Since all payments can pay the fee, deduct accordingly - for (i, payment) in child.payments.iter_mut().enumerate() { - let Balance { coin, amount } = payment.balance(); - let mut amount = amount.0; - amount -= adjusted_fee / payments_len; - if i < usize::try_from(adjusted_fee % payments_len).unwrap() { - amount -= 1; - } - - *payment = Payment::new( - payment.address().clone(), - Balance { coin, amount: Amount(amount) }, - None, - ); - } - } - if child.payments.is_empty() { - continue; - } - } - let Some(planned) = P::plan_transaction_with_fee_amortization( + let Some(payments) = tx.payments(coin, &branch_address, branch_output.balance().amount.0) + else { + // If this output has become too small to satisfy this branch, drop it + continue; + }; + + let branch_output_id = branch_output.id(); + let Some(mut planned) = P::plan_transaction_with_fee_amortization( // Uses 0 as there's no operating costs to incur/amortize here &mut 0, - fee_rate, + P::fee_rate(block, coin), vec![branch_output], - child.payments, + payments, None, ) else { // This Branch isn't viable, so drop it (and its children) continue; }; + // Since we've made a TX spending this output, don't accumulate it later + Db::::set_already_accumulated_output(txn, &branch_output_id); TransactionsToSign::::send(txn, &key, &planned.signable); eventualities.push(planned.eventuality); - if !child.children.is_empty() { - execute_tree_transaction::( - txn, - fee_rate, - eventualities, - key, - planned.auxilliary.0, - child.children, - ); + + match tx { + TreeTransaction::Leaves { .. } => {} + // If this was a branch, handle its children + TreeTransaction::Branch { mut children, .. } => { + branch_outputs.append(&mut planned.auxilliary.0); + tree.append(&mut children); + } } } } - - assert_eq!(tree_transactions.len(), 1); - assert!((tree_transactions[0].payments.len() + 1) <= P::MAX_OUTPUTS); - - // Create the transaction for the root of the tree - let Some(planned) = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - fee_rates[coin], - outputs, - tree_transactions[0].payments, - Some(key_for_change), - ) else { - Db::::set_operating_costs(txn, *coin, Amount(operating_costs)); - continue; - }; - TransactionsToSign::::send(txn, &key, &planned.signable); - eventualities.push(planned.eventuality); - - // We accumulate the change output, but consume the branches here - accumulate_outputs( - txn, - planned - .auxilliary - .0 - .iter() - .filter(|output| output.kind() == OutputType::Change) - .cloned() - .collect(), - ); - // Filter the outputs to the change outputs - let mut branch_outputs = planned.auxilliary.0; - branch_outputs.retain(|output| output.kind() == OutputType::Branch); - - if !tree_transactions[0].children.is_empty() { - execute_tree_transaction::( - txn, - fee_rates[coin], - &mut eventualities, - key, - branch_outputs, - tree_transactions[0].children, - ); - } } eventualities @@ -355,16 +459,21 @@ impl>> Sched impl>> SchedulerTrait for Scheduler { - fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { + fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { assert!(Db::::outputs(txn, key, *coin).is_none()); Db::::set_outputs(txn, key, *coin, &[]); assert!(Db::::queued_payments(txn, key, *coin).is_none()); - Db::::set_queued_payments(txn, key, *coin, &vec![]); + Db::::set_queued_payments(txn, key, *coin, &[]); } } - fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor, new_key: KeyFor) { + fn flush_key( + txn: &mut impl DbTxn, + _block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) { for coin in S::NETWORK.coins() { let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); @@ -372,12 +481,14 @@ impl>> Sched let mut queued = still_queued; queued.append(&mut new_queued); - Db::::set_queued_payments(txn, retiring_key, *coin, &vec![]); + Db::::set_queued_payments(txn, retiring_key, *coin, &[]); Db::::set_queued_payments(txn, new_key, *coin, &queued); + + // TODO: Forward all existing outputs } } - fn retire_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { + fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { assert!(Db::::outputs(txn, key, *coin).unwrap().is_empty()); Db::::del_outputs(txn, key, *coin); @@ -387,48 +498,18 @@ impl>> Sched } fn update( - &mut self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], update: SchedulerUpdate, ) -> HashMap, Vec>> { - // Accumulate all the outputs - for (key, _) in active_keys { - // Accumulate them in memory - let mut outputs_by_coin = HashMap::with_capacity(1); - for output in update.outputs().iter().filter(|output| output.key() == *key) { - match output.kind() { - OutputType::External | OutputType::Forwarded => {} - // Only accumulate these if we haven't already - OutputType::Branch | OutputType::Change => { - if Db::::take_if_already_accumulated_output(txn, output.id()) { - continue; - } - } - } - let coin = output.balance().coin; - if let std::collections::hash_map::Entry::Vacant(e) = outputs_by_coin.entry(coin) { - e.insert(Db::::outputs(txn, *key, coin).unwrap()); - } - outputs_by_coin.get_mut(&coin).unwrap().push(output.clone()); - } - - // Flush them to the database - for (coin, outputs) in outputs_by_coin { - Db::::set_outputs(txn, *key, coin, &outputs); - } - } - - let fee_rates = block.fee_rates(); + Self::accumulate_outputs(txn, update.outputs().to_vec(), true); // Fulfill the payments we prior couldn't let mut eventualities = HashMap::new(); for (key, _stage) in active_keys { - eventualities.insert( - key.to_bytes().as_ref().to_vec(), - self.handle_queued_payments(txn, active_keys, fee_rates, *key), - ); + eventualities + .insert(key.to_bytes().as_ref().to_vec(), Self::step(txn, active_keys, block, *key)); } // TODO: If this key has been flushed, forward all outputs @@ -448,7 +529,7 @@ impl>> Sched // This uses 0 for the operating costs as we don't incur any here // If the output can't pay for itself to be forwarded, we simply drop it &mut 0, - fee_rates[&forward.balance().coin], + P::fee_rate(block, forward.balance().coin), vec![forward.clone()], vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], None, @@ -465,7 +546,7 @@ impl>> Sched // This uses 0 for the operating costs as we don't incur any here // If the output can't pay for itself to be returned, we simply drop it &mut 0, - fee_rates[&out_instruction.balance().coin], + P::fee_rate(block, out_instruction.balance().coin), vec![to_return.output().clone()], vec![out_instruction], None, @@ -480,7 +561,7 @@ impl>> Sched TransactionsToSign::::send(txn, &key, &planned_tx.signable); // Insert the Eventualities into the result - eventualities[key.to_bytes().as_ref()].push(planned_tx.eventuality); + eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); } eventualities @@ -488,11 +569,10 @@ impl>> Sched } fn fulfill( - &mut self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], - mut payments: Vec>>, + payments: Vec>>, ) -> HashMap, Vec>> { // Find the key to filfill these payments with let fulfillment_key = match active_keys[0].1 { @@ -514,7 +594,7 @@ impl>> Sched // Handle the queued payments HashMap::from([( fulfillment_key.to_bytes().as_ref().to_vec(), - self.handle_queued_payments(txn, active_keys, block.fee_rates(), fulfillment_key), + Self::step(txn, active_keys, block, fulfillment_key), )]) } } From ba8477def8c5bf172c1f1dc72cbb917dd45d754c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 4 Sep 2024 02:06:21 -0400 Subject: [PATCH 070/179] Finish routing output flushing Completes the transaction-chaining scheduler. --- processor/scanner/src/eventuality/mod.rs | 4 +- processor/scanner/src/lib.rs | 3 +- .../utxo/transaction-chaining/src/lib.rs | 79 ++++++++++++++++--- 3 files changed, 73 insertions(+), 13 deletions(-) diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 84670f79c..6db60b71e 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -455,7 +455,9 @@ impl> ContinuallyRan for EventualityTas key.key != keys.last().unwrap().key, "key which was forwarding was the last key (which has no key after it to forward to)" ); - Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key); + let new_eventualities = + Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key); + intake_eventualities::(&mut txn, new_eventualities); } // Now that we've intaked any Eventualities caused, check if we're retiring any keys diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 8ecb731f5..ecefb9a8b 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -259,13 +259,12 @@ pub trait Scheduler: 'static + Send { /// /// If the retiring key has any unfulfilled payments associated with it, those MUST be made /// the responsibility of the new key. - // TODO: This needs to return a HashMap for the eventualities fn flush_key( txn: &mut impl DbTxn, block: &BlockFor, retiring_key: KeyFor, new_key: KeyFor, - ); + ) -> HashMap, Vec>>; /// Retire a key as it'll no longer be used. /// diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 7359a87cc..321d4b60f 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -454,6 +454,42 @@ impl>> Sched eventualities } + + fn flush_outputs( + txn: &mut impl DbTxn, + eventualities: &mut HashMap, Vec>>, + block: &BlockFor, + from: KeyFor, + to: KeyFor, + coin: Coin, + ) { + let from_bytes = from.to_bytes().as_ref().to_vec(); + // Ensure our inputs are aggregated + eventualities + .entry(from_bytes.clone()) + .or_insert(vec![]) + .append(&mut Self::aggregate_inputs(txn, block, to, from, coin)); + + // Now that our inputs are aggregated, transfer all of them to the new key + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, from, coin).unwrap(); + if outputs.is_empty() { + return; + } + let planned = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + outputs, + vec![], + Some(to), + ); + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + let Some(planned) = planned else { return }; + + TransactionsToSign::::send(txn, &from, &planned.signable); + eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality); + Self::accumulate_outputs(txn, planned.auxilliary.0, false); + } } impl>> SchedulerTrait @@ -470,22 +506,28 @@ impl>> Sched fn flush_key( txn: &mut impl DbTxn, - _block: &BlockFor, + block: &BlockFor, retiring_key: KeyFor, new_key: KeyFor, - ) { + ) -> HashMap, Vec>> { + let mut eventualities = HashMap::new(); for coin in S::NETWORK.coins() { - let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); - let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); + // Move the payments to the new key + { + let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); - let mut queued = still_queued; - queued.append(&mut new_queued); + let mut queued = still_queued; + queued.append(&mut new_queued); - Db::::set_queued_payments(txn, retiring_key, *coin, &[]); - Db::::set_queued_payments(txn, new_key, *coin, &queued); + Db::::set_queued_payments(txn, retiring_key, *coin, &[]); + Db::::set_queued_payments(txn, new_key, *coin, &queued); + } - // TODO: Forward all existing outputs + // Move the outputs to the new key + Self::flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin); } + eventualities } fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { @@ -512,7 +554,24 @@ impl>> Sched .insert(key.to_bytes().as_ref().to_vec(), Self::step(txn, active_keys, block, *key)); } - // TODO: If this key has been flushed, forward all outputs + // If this key has been flushed, forward all outputs + match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => {} + LifetimeStage::Forwarding | LifetimeStage::Finishing => { + for coin in S::NETWORK.coins() { + Self::flush_outputs( + txn, + &mut eventualities, + block, + active_keys[0].0, + active_keys[1].0, + *coin, + ); + } + } + } // Create the transactions for the forwards/burns { From 4c25367624fc08c5182ba92b9a158fca238915d4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 4 Sep 2024 03:54:12 -0400 Subject: [PATCH 071/179] Add non-transaction-chaining scheduler --- .github/workflows/tests.yml | 1 + Cargo.lock | 16 + Cargo.toml | 1 + deny.toml | 1 + processor/primitives/src/output.rs | 12 +- processor/primitives/src/payment.rs | 3 +- processor/scheduler/primitives/Cargo.toml | 3 + .../scheduler/utxo/primitives/Cargo.toml | 2 + .../scheduler/utxo/primitives/src/lib.rs | 69 ++- .../scheduler/utxo/primitives/src/tree.rs | 146 +++++ processor/scheduler/utxo/standard/Cargo.toml | 35 ++ processor/scheduler/utxo/standard/LICENSE | 15 + processor/scheduler/utxo/standard/README.md | 17 + processor/scheduler/utxo/standard/src/db.rs | 113 ++++ processor/scheduler/utxo/standard/src/lib.rs | 508 ++++++++++++++++++ .../utxo/transaction-chaining/README.md | 2 +- .../utxo/transaction-chaining/src/lib.rs | 152 +----- 17 files changed, 951 insertions(+), 145 deletions(-) create mode 100644 processor/scheduler/utxo/primitives/src/tree.rs create mode 100644 processor/scheduler/utxo/standard/Cargo.toml create mode 100644 processor/scheduler/utxo/standard/LICENSE create mode 100644 processor/scheduler/utxo/standard/README.md create mode 100644 processor/scheduler/utxo/standard/src/db.rs create mode 100644 processor/scheduler/utxo/standard/src/lib.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index ca0bd4f59..a6260579f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -45,6 +45,7 @@ jobs: -p serai-processor-scanner \ -p serai-processor-scheduler-primitives \ -p serai-processor-utxo-scheduler-primitives \ + -p serai-processor-utxo-scheduler \ -p serai-processor-transaction-chaining-scheduler \ -p serai-processor \ -p tendermint-machine \ diff --git a/Cargo.lock b/Cargo.lock index dd1cc19e6..b3fa4e36e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8733,11 +8733,27 @@ dependencies = [ "serai-processor-utxo-scheduler-primitives", ] +[[package]] +name = "serai-processor-utxo-scheduler" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-utxo-scheduler-primitives", +] + [[package]] name = "serai-processor-utxo-scheduler-primitives" version = "0.1.0" dependencies = [ "async-trait", + "borsh", "serai-primitives", "serai-processor-primitives", "serai-processor-scanner", diff --git a/Cargo.toml b/Cargo.toml index b61cde688..a2d86c823 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ members = [ "processor/scanner", "processor/scheduler/primitives", "processor/scheduler/utxo/primitives", + "processor/scheduler/utxo/standard", "processor/scheduler/utxo/transaction-chaining", "processor", diff --git a/deny.toml b/deny.toml index 2ca0ca503..16d3cbeaa 100644 --- a/deny.toml +++ b/deny.toml @@ -52,6 +52,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-scanner" }, { allow = ["AGPL-3.0"], name = "serai-processor-scheduler-primitives" }, { allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-standard-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor" }, diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs index d59b4fd0b..cbfe59f3f 100644 --- a/processor/primitives/src/output.rs +++ b/processor/primitives/src/output.rs @@ -3,12 +3,22 @@ use std::io; use group::GroupEncoding; +use borsh::{BorshSerialize, BorshDeserialize}; + use serai_primitives::{ExternalAddress, Balance}; use crate::Id; /// An address on the external network. -pub trait Address: Send + Sync + Clone + Into + TryFrom { +pub trait Address: + Send + + Sync + + Clone + + Into + + TryFrom + + BorshSerialize + + BorshDeserialize +{ /// Write this address. fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; /// Read an address. diff --git a/processor/primitives/src/payment.rs b/processor/primitives/src/payment.rs index bf3c918c1..67a5bbadd 100644 --- a/processor/primitives/src/payment.rs +++ b/processor/primitives/src/payment.rs @@ -1,6 +1,7 @@ use std::io; use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; use serai_primitives::{Balance, Data}; use serai_coins_primitives::OutInstructionWithBalance; @@ -8,7 +9,7 @@ use serai_coins_primitives::OutInstructionWithBalance; use crate::Address; /// A payment to fulfill. -#[derive(Clone)] +#[derive(Clone, BorshSerialize, BorshDeserialize)] pub struct Payment { address: A, balance: Balance, diff --git a/processor/scheduler/primitives/Cargo.toml b/processor/scheduler/primitives/Cargo.toml index 31d738531..cdf12cbbb 100644 --- a/processor/scheduler/primitives/Cargo.toml +++ b/processor/scheduler/primitives/Cargo.toml @@ -13,6 +13,9 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + [lints] workspace = true diff --git a/processor/scheduler/utxo/primitives/Cargo.toml b/processor/scheduler/utxo/primitives/Cargo.toml index 4f2499f98..85935ae0f 100644 --- a/processor/scheduler/utxo/primitives/Cargo.toml +++ b/processor/scheduler/utxo/primitives/Cargo.toml @@ -19,6 +19,8 @@ workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } primitives = { package = "serai-processor-primitives", path = "../../../primitives" } diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index 81d5ebd70..274eb2a4d 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -8,6 +8,9 @@ use primitives::{ReceivedOutput, Payment}; use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor}; use scheduler_primitives::*; +mod tree; +pub use tree::*; + /// A planned transaction. pub struct PlannedTransaction { /// The signable transaction. @@ -18,6 +21,23 @@ pub struct PlannedTransaction { pub auxilliary: A, } +/// A planned transaction which was created via amortizing the fee. +pub struct AmortizePlannedTransaction { + /// The amounts the included payments were worth. + /// + /// If the payments passed as an argument are sorted from highest to lowest valued, these `n` + /// amounts will be for the first `n` payments. + pub effected_payments: Vec, + /// Whether or not the planned transaction had a change output. + pub has_change: bool, + /// The signable transaction. + pub signable: ST, + /// The Eventuality to watch for. + pub eventuality: EventualityFor, + /// The auxilliary data for this transaction. + pub auxilliary: A, +} + /// An object able to plan a transaction. #[async_trait::async_trait] pub trait TransactionPlanner: 'static + Send + Sync { @@ -60,7 +80,8 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// This must only require the same fee as would be returned by `calculate_fee`. The caller is /// trusted to maintain `sum(inputs) - sum(payments) >= if change.is_some() { DUST } else { 0 }`. /// - /// `change` will always be an address belonging to the Serai network. + /// `change` will always be an address belonging to the Serai network. If it is `Some`, a change + /// output must be created. fn plan( fee_rate: Self::FeeRate, inputs: Vec>, @@ -82,7 +103,7 @@ pub trait TransactionPlanner: 'static + Send + Sync { inputs: Vec>, mut payments: Vec>>, mut change: Option>, - ) -> Option> { + ) -> Option> { // If there's no change output, we can't recoup any operating costs we would amortize // We also don't have any losses if the inputs are written off/the change output is reduced let mut operating_costs_if_no_change = 0; @@ -192,6 +213,48 @@ pub trait TransactionPlanner: 'static + Send + Sync { } // Because we amortized, or accrued as operating costs, the fee, make the transaction - Some(Self::plan(fee_rate, inputs, payments, change)) + let effected_payments = payments.iter().map(|payment| payment.balance().amount).collect(); + let has_change = change.is_some(); + let PlannedTransaction { signable, eventuality, auxilliary } = + Self::plan(fee_rate, inputs, payments, change); + Some(AmortizePlannedTransaction { + effected_payments, + has_change, + signable, + eventuality, + auxilliary, + }) + } + + /// Create a tree to fulfill a set of payments. + /// + /// Returns a `TreeTransaction` whose children (and arbitrary children of children) fulfill all + /// these payments. This tree root will be able to be made with a change output. + fn tree(payments: &[Payment>]) -> TreeTransaction> { + // This variable is for the current layer of the tree being built + let mut tree = Vec::with_capacity(payments.len().div_ceil(Self::MAX_OUTPUTS)); + + // Push the branches for the leaves (the payments out) + for payments in payments.chunks(Self::MAX_OUTPUTS) { + let value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + tree.push(TreeTransaction::>::Leaves { payments: payments.to_vec(), value }); + } + + // While we haven't calculated a tree root, or the tree root doesn't support a change output, + // keep working + while (tree.len() != 1) || (tree[0].children() == Self::MAX_OUTPUTS) { + let mut branch_layer = vec![]; + for children in tree.chunks(Self::MAX_OUTPUTS) { + branch_layer.push(TreeTransaction::>::Branch { + children: children.to_vec(), + value: children.iter().map(TreeTransaction::value).sum(), + }); + } + tree = branch_layer; + } + assert_eq!(tree.len(), 1); + let tree_root = tree.remove(0); + assert!((tree_root.children() + 1) <= Self::MAX_OUTPUTS); + tree_root } } diff --git a/processor/scheduler/utxo/primitives/src/tree.rs b/processor/scheduler/utxo/primitives/src/tree.rs new file mode 100644 index 000000000..b52f3ba3b --- /dev/null +++ b/processor/scheduler/utxo/primitives/src/tree.rs @@ -0,0 +1,146 @@ +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_primitives::{Coin, Amount, Balance}; + +use primitives::{Address, Payment}; +use scanner::ScannerFeed; + +/// A transaction within a tree to fulfill payments. +#[derive(Clone, BorshSerialize, BorshDeserialize)] +pub enum TreeTransaction { + /// A transaction for the leaves (payments) of the tree. + Leaves { + /// The payments within this transaction. + payments: Vec>, + /// The sum value of the payments. + value: u64, + }, + /// A transaction for the branches of the tree. + Branch { + /// The child transactions. + children: Vec, + /// The sum value of the child transactions. + value: u64, + }, +} +impl TreeTransaction { + /// How many children this transaction has. + /// + /// A child is defined as any dependent, whether payment or transaction. + pub fn children(&self) -> usize { + match self { + Self::Leaves { payments, .. } => payments.len(), + Self::Branch { children, .. } => children.len(), + } + } + + /// The value this transaction wants to spend. + pub fn value(&self) -> u64 { + match self { + Self::Leaves { value, .. } | Self::Branch { value, .. } => *value, + } + } + + /// The payments to make to enable this transaction's children. + /// + /// A child is defined as any dependent, whether payment or transaction. + /// + /// The input value given to this transaction MUST be less than or equal to the desired value. + /// The difference will be amortized over all dependents. + /// + /// Returns None if no payments should be made. Returns Some containing a non-empty Vec if any + /// payments should be made. + pub fn payments( + &self, + coin: Coin, + branch_address: &A, + input_value: u64, + ) -> Option>> { + // Fetch the amounts for the payments we'll make + let mut amounts: Vec<_> = match self { + Self::Leaves { payments, .. } => payments + .iter() + .map(|payment| { + assert_eq!(payment.balance().coin, coin); + Some(payment.balance().amount.0) + }) + .collect(), + Self::Branch { children, .. } => children.iter().map(|child| Some(child.value())).collect(), + }; + + // We need to reduce them so their sum is our input value + assert!(input_value <= self.value()); + let amount_to_amortize = self.value() - input_value; + + // If any payments won't survive the reduction, set them to None + let mut amortized = 0; + 'outer: while amounts.iter().any(Option::is_some) && (amortized < amount_to_amortize) { + let adjusted_fee = amount_to_amortize - amortized; + let amounts_len = + u64::try_from(amounts.iter().filter(|amount| amount.is_some()).count()).unwrap(); + let per_payment_fee_check = adjusted_fee.div_ceil(amounts_len); + + // Check each amount to see if it's not viable + let mut i = 0; + while i < amounts.len() { + if let Some(amount) = amounts[i] { + if amount.saturating_sub(per_payment_fee_check) < S::dust(coin).0 { + amounts[i] = None; + amortized += amount; + // If this amount wasn't viable, re-run with the new fee/amortization amounts + continue 'outer; + } + } + i += 1; + } + + // Now that we have the payments which will survive, reduce them + for (i, amount) in amounts.iter_mut().enumerate() { + if let Some(amount) = amount { + *amount -= adjusted_fee / amounts_len; + if i < usize::try_from(adjusted_fee % amounts_len).unwrap() { + *amount -= 1; + } + } + } + break; + } + + // Now that we have the reduced amounts, create the payments + let payments: Vec<_> = match self { + Self::Leaves { payments, .. } => { + payments + .iter() + .zip(amounts) + .filter_map(|(payment, amount)| { + amount.map(|amount| { + // The existing payment, with the new amount + Payment::new( + payment.address().clone(), + Balance { coin, amount: Amount(amount) }, + payment.data().clone(), + ) + }) + }) + .collect() + } + Self::Branch { .. } => { + amounts + .into_iter() + .filter_map(|amount| { + amount.map(|amount| { + // A branch output with the new amount + Payment::new(branch_address.clone(), Balance { coin, amount: Amount(amount) }, None) + }) + }) + .collect() + } + }; + + // Use None for vec![] so we never actually use vec![] + if payments.is_empty() { + None?; + } + Some(payments) + } +} diff --git a/processor/scheduler/utxo/standard/Cargo.toml b/processor/scheduler/utxo/standard/Cargo.toml new file mode 100644 index 000000000..d6c16161d --- /dev/null +++ b/processor/scheduler/utxo/standard/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "serai-processor-utxo-scheduler" +version = "0.1.0" +description = "Scheduler for UTXO networks for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/utxo/standard" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../../../common/db" } + +primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } +utxo-scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } diff --git a/processor/scheduler/utxo/standard/LICENSE b/processor/scheduler/utxo/standard/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/scheduler/utxo/standard/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/utxo/standard/README.md b/processor/scheduler/utxo/standard/README.md new file mode 100644 index 000000000..8e5360f06 --- /dev/null +++ b/processor/scheduler/utxo/standard/README.md @@ -0,0 +1,17 @@ +# UTXO Scheduler + +A scheduler of transactions for networks premised on the UTXO model. + +### Design + +The scheduler is designed to achieve fulfillment of all expected payments with +an `O(1)` delay (regardless of prior scheduler state), `O(log n)` time, and +`O(log(n) + n)` computational complexity. + +For the time/computational complexity, we use a tree to fulfill payments. +This quickly gives us the ability to make as many outputs as necessary +(regardless of per-transaction output limits) and only has the latency of +including a chain of `O(log n)` transactions on-chain. The only computational +overhead is in creating the transactions which are branches in the tree. +Since we split off the root of the tree from a master output, the delay to start +fulfillment is the delay for the master output to re-appear on-chain. diff --git a/processor/scheduler/utxo/standard/src/db.rs b/processor/scheduler/utxo/standard/src/db.rs new file mode 100644 index 000000000..007615950 --- /dev/null +++ b/processor/scheduler/utxo/standard/src/db.rs @@ -0,0 +1,113 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use serai_primitives::{Coin, Amount, Balance}; + +use borsh::BorshDeserialize; +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use primitives::{Payment, ReceivedOutput}; +use utxo_scheduler_primitives::TreeTransaction; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; + +create_db! { + UtxoScheduler { + OperatingCosts: (coin: Coin) -> Amount, + SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, + SerializedQueuedPayments: (key: &[u8], coin: Coin) -> Vec, + } +} + +db_channel! { + UtxoScheduler { + PendingBranch: (key: &[u8], balance: Balance) -> Vec, + } +} + +pub(crate) struct Db(PhantomData); +impl Db { + pub(crate) fn operating_costs(getter: &impl Get, coin: Coin) -> Amount { + OperatingCosts::get(getter, coin).unwrap_or(Amount(0)) + } + pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: Coin, amount: Amount) { + OperatingCosts::set(txn, coin, &amount) + } + + pub(crate) fn outputs( + getter: &impl Get, + key: KeyFor, + coin: Coin, + ) -> Option>> { + let buf = SerializedOutputs::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(OutputFor::::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_outputs( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + outputs: &[OutputFor], + ) { + let mut buf = Vec::with_capacity(outputs.len() * 128); + for output in outputs { + output.write(&mut buf).unwrap(); + } + SerializedOutputs::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); + } + + pub(crate) fn queued_payments( + getter: &impl Get, + key: KeyFor, + coin: Coin, + ) -> Option>>> { + let buf = SerializedQueuedPayments::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(Payment::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_queued_payments( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + queued: &[Payment>], + ) { + let mut buf = Vec::with_capacity(queued.len() * 128); + for queued in queued { + queued.write(&mut buf).unwrap(); + } + SerializedQueuedPayments::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + SerializedQueuedPayments::del(txn, key.to_bytes().as_ref(), coin); + } + + pub(crate) fn queue_pending_branch( + txn: &mut impl DbTxn, + key: KeyFor, + balance: Balance, + child: &TreeTransaction>, + ) { + PendingBranch::send(txn, key.to_bytes().as_ref(), balance, &borsh::to_vec(child).unwrap()) + } + pub(crate) fn take_pending_branch( + txn: &mut impl DbTxn, + key: KeyFor, + balance: Balance, + ) -> Option>> { + PendingBranch::try_recv(txn, key.to_bytes().as_ref(), balance) + .map(|bytes| TreeTransaction::>::deserialize(&mut bytes.as_slice()).unwrap()) + } +} diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs new file mode 100644 index 000000000..f69ca54b6 --- /dev/null +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -0,0 +1,508 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::marker::PhantomData; +use std::collections::HashMap; + +use group::GroupEncoding; + +use serai_primitives::{Coin, Amount, Balance}; + +use serai_db::DbTxn; + +use primitives::{ReceivedOutput, Payment}; +use scanner::{ + LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor, + SchedulerUpdate, Scheduler as SchedulerTrait, +}; +use scheduler_primitives::*; +use utxo_scheduler_primitives::*; + +mod db; +use db::Db; + +/// A scheduler of transactions for networks premised on the UTXO model. +pub struct Scheduler>(PhantomData, PhantomData

); + +impl> Scheduler { + fn aggregate_inputs( + txn: &mut impl DbTxn, + block: &BlockFor, + key_for_change: KeyFor, + key: KeyFor, + coin: Coin, + ) -> Vec> { + let mut eventualities = vec![]; + + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let mut outputs = Db::::outputs(txn, key, coin).unwrap(); + outputs.sort_by_key(|output| output.balance().amount.0); + while outputs.len() > P::MAX_INPUTS { + let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::>(); + + let Some(planned) = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + to_aggregate, + vec![], + Some(key_for_change), + ) else { + continue; + }; + + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + } + + Db::::set_outputs(txn, key, coin, &outputs); + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + eventualities + } + + fn fulfillable_payments( + txn: &mut impl DbTxn, + operating_costs: &mut u64, + key: KeyFor, + coin: Coin, + value_of_outputs: u64, + ) -> Vec>> { + // Fetch all payments for this key + let mut payments = Db::::queued_payments(txn, key, coin).unwrap(); + if payments.is_empty() { + return vec![]; + } + + loop { + // inputs must be >= (payments - operating costs) + // Accordingly, (inputs + operating costs) must be >= payments + let value_fulfillable = value_of_outputs + *operating_costs; + + // Drop to just the payments we can currently fulfill + { + let mut can_handle = 0; + let mut value_used = 0; + for payment in &payments { + value_used += payment.balance().amount.0; + if value_fulfillable < value_used { + break; + } + can_handle += 1; + } + + let remaining_payments = payments.drain(can_handle ..).collect::>(); + // Restore the rest to the database + Db::::set_queued_payments(txn, key, coin, &remaining_payments); + } + + // If these payments are worth less than the operating costs, immediately drop them + let payments_value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + if payments_value <= *operating_costs { + *operating_costs -= payments_value; + Db::::set_operating_costs(txn, coin, Amount(*operating_costs)); + + // Reset payments to the queued payments + payments = Db::::queued_payments(txn, key, coin).unwrap(); + // If there's no more payments, stop looking for which payments we should fulfill + if payments.is_empty() { + return vec![]; + } + // Find which of these we should handle + continue; + } + + return payments; + } + } + + fn queue_branches( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + effected_payments: Vec, + tx: TreeTransaction>, + ) { + match tx { + TreeTransaction::Leaves { .. } => {} + TreeTransaction::Branch { mut children, .. } => { + children.sort_by_key(TreeTransaction::value); + children.reverse(); + + /* + This may only be a subset of payments but it'll be the originally-highest-valued + payments. `zip` will truncate to the first children which will be the highest-valued + children thanks to our sort. + */ + for (amount, child) in effected_payments.into_iter().zip(children) { + Db::::queue_pending_branch(txn, key, Balance { coin, amount }, &child); + } + } + } + } + + fn handle_branch( + txn: &mut impl DbTxn, + block: &BlockFor, + eventualities: &mut Vec>, + output: OutputFor, + tx: TreeTransaction>, + ) -> bool { + let key = output.key(); + let coin = output.balance().coin; + let Some(payments) = tx.payments::(coin, &P::branch_address(key), output.balance().amount.0) + else { + // If this output has become too small to satisfy this branch, drop it + return false; + }; + + let Some(planned) = P::plan_transaction_with_fee_amortization( + // Uses 0 as there's no operating costs to incur/amortize here + &mut 0, + P::fee_rate(block, coin), + vec![output], + payments, + None, + ) else { + // This Branch isn't viable, so drop it (and its children) + return false; + }; + + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + Self::queue_branches(txn, key, coin, planned.effected_payments, tx); + + true + } + + fn step( + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + block: &BlockFor, + key: KeyFor, + ) -> Vec> { + let mut eventualities = vec![]; + + let key_for_change = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active => active_keys[0].0, + LifetimeStage::UsingNewForChange | LifetimeStage::Forwarding | LifetimeStage::Finishing => { + active_keys[1].0 + } + }; + let branch_address = P::branch_address(key); + + 'coin: for coin in S::NETWORK.coins() { + let coin = *coin; + + // Perform any input aggregation we should + eventualities.append(&mut Self::aggregate_inputs(txn, block, key_for_change, key, coin)); + + // Fetch the operating costs/outputs + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, key, coin).unwrap(); + + // Fetch the fulfillable payments + let payments = Self::fulfillable_payments( + txn, + &mut operating_costs, + key, + coin, + outputs.iter().map(|output| output.balance().amount.0).sum(), + ); + if payments.is_empty() { + continue; + } + + // Create a tree to fulfill the payments + let mut tree = vec![P::tree(&payments)]; + + // Create the transaction for the root of the tree + // Try creating this transaction twice, once with a change output and once with increased + // operating costs to ensure a change output (as necessary to meet the requirements of the + // scanner API) + let mut planned_outer = None; + for i in 0 .. 2 { + let Some(planned) = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + outputs.clone(), + tree[0] + .payments::(coin, &branch_address, tree[0].value()) + .expect("payments were dropped despite providing an input of the needed value"), + Some(key_for_change), + ) else { + // This should trip on the first iteration or not at all + assert_eq!(i, 0); + // This doesn't have inputs even worth aggregating so drop the entire tree + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + continue 'coin; + }; + + // If this doesn't have a change output, increase operating costs and try again + if !planned.has_change { + /* + Since we'll create a change output if it's worth at least dust, amortizing dust from + the payments should solve this. If the new transaction can't afford those operating + costs, then the payments should be amortized out, causing there to be a change or no + transaction at all. + */ + operating_costs += S::dust(coin).0; + continue; + } + + // Since this had a change output, move forward with it + planned_outer = Some(planned); + break; + } + let Some(planned) = planned_outer else { + panic!("couldn't create a tree root with a change output") + }; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + // Now save the next layer of the tree to the database + // We'll execute it when it appears + Self::queue_branches(txn, key, coin, planned.effected_payments, tree.remove(0)); + } + + eventualities + } + + fn flush_outputs( + txn: &mut impl DbTxn, + eventualities: &mut HashMap, Vec>>, + block: &BlockFor, + from: KeyFor, + to: KeyFor, + coin: Coin, + ) { + let from_bytes = from.to_bytes().as_ref().to_vec(); + // Ensure our inputs are aggregated + eventualities + .entry(from_bytes.clone()) + .or_insert(vec![]) + .append(&mut Self::aggregate_inputs(txn, block, to, from, coin)); + + // Now that our inputs are aggregated, transfer all of them to the new key + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, from, coin).unwrap(); + if outputs.is_empty() { + return; + } + let planned = P::plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + outputs, + vec![], + Some(to), + ); + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + let Some(planned) = planned else { return }; + + TransactionsToSign::::send(txn, &from, &planned.signable); + eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality); + } +} + +impl> SchedulerTrait for Scheduler { + fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, *coin).is_none()); + Db::::set_outputs(txn, key, *coin, &[]); + assert!(Db::::queued_payments(txn, key, *coin).is_none()); + Db::::set_queued_payments(txn, key, *coin, &[]); + } + } + + fn flush_key( + txn: &mut impl DbTxn, + block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> HashMap, Vec>> { + let mut eventualities = HashMap::new(); + for coin in S::NETWORK.coins() { + // Move the payments to the new key + { + let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); + + let mut queued = still_queued; + queued.append(&mut new_queued); + + Db::::set_queued_payments(txn, retiring_key, *coin, &[]); + Db::::set_queued_payments(txn, new_key, *coin, &queued); + } + + // Move the outputs to the new key + Self::flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin); + } + eventualities + } + + fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, *coin).unwrap().is_empty()); + Db::::del_outputs(txn, key, *coin); + assert!(Db::::queued_payments(txn, key, *coin).unwrap().is_empty()); + Db::::del_queued_payments(txn, key, *coin); + } + } + + fn update( + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> HashMap, Vec>> { + let mut eventualities = HashMap::new(); + + // Accumulate the new outputs + { + let mut outputs_by_key = HashMap::new(); + for output in update.outputs() { + // If this aligns for a branch, handle it + if let Some(branch) = Db::::take_pending_branch(txn, output.key(), output.balance()) { + if Self::handle_branch( + txn, + block, + eventualities.entry(output.key().to_bytes().as_ref().to_vec()).or_insert(vec![]), + output.clone(), + branch, + ) { + // If we could use it for a branch, we do and move on + // Else, we let it be accumulated by the standard accumulation code + continue; + } + } + + let coin = output.balance().coin; + outputs_by_key + // Index by key and coin + .entry((output.key().to_bytes().as_ref().to_vec(), coin)) + // If we haven't accumulated here prior, read the outputs from the database + .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) + .1 + .push(output.clone()); + } + // Write the outputs back to the database + for ((_key_vec, coin), (key, outputs)) in outputs_by_key { + Db::::set_outputs(txn, key, coin, &outputs); + } + } + + // Fulfill the payments we prior couldn't + for (key, _stage) in active_keys { + eventualities + .entry(key.to_bytes().as_ref().to_vec()) + .or_insert(vec![]) + .append(&mut Self::step(txn, active_keys, block, *key)); + } + + // If this key has been flushed, forward all outputs + match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => {} + LifetimeStage::Forwarding | LifetimeStage::Finishing => { + for coin in S::NETWORK.coins() { + Self::flush_outputs( + txn, + &mut eventualities, + block, + active_keys[0].0, + active_keys[1].0, + *coin, + ); + } + } + } + + // Create the transactions for the forwards/burns + { + let mut planned_txs = vec![]; + for forward in update.forwards() { + let key = forward.key(); + + assert_eq!(active_keys.len(), 2); + assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); + assert_eq!(active_keys[1].1, LifetimeStage::Active); + let forward_to_key = active_keys[1].0; + + let Some(plan) = P::plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be forwarded, we simply drop it + &mut 0, + P::fee_rate(block, forward.balance().coin), + vec![forward.clone()], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], + None, + ) else { + continue; + }; + planned_txs.push((key, plan)); + } + for to_return in update.returns() { + let key = to_return.output().key(); + let out_instruction = + Payment::new(to_return.address().clone(), to_return.output().balance(), None); + let Some(plan) = P::plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be returned, we simply drop it + &mut 0, + P::fee_rate(block, out_instruction.balance().coin), + vec![to_return.output().clone()], + vec![out_instruction], + None, + ) else { + continue; + }; + planned_txs.push((key, plan)); + } + + for (key, planned_tx) in planned_txs { + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned_tx.signable); + + // Insert the Eventualities into the result + eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); + } + + eventualities + } + } + + fn fulfill( + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> HashMap, Vec>> { + // Find the key to filfill these payments with + let fulfillment_key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + // Queue the payments for this key + for coin in S::NETWORK.coins() { + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); + queued_payments + .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); + } + + // Handle the queued payments + HashMap::from([( + fulfillment_key.to_bytes().as_ref().to_vec(), + Self::step(txn, active_keys, block, fulfillment_key), + )]) + } +} diff --git a/processor/scheduler/utxo/transaction-chaining/README.md b/processor/scheduler/utxo/transaction-chaining/README.md index 0788ff53c..a129b6693 100644 --- a/processor/scheduler/utxo/transaction-chaining/README.md +++ b/processor/scheduler/utxo/transaction-chaining/README.md @@ -9,7 +9,7 @@ to build and sign a transaction spending it. The scheduler is designed to achieve fulfillment of all expected payments with an `O(1)` delay (regardless of prior scheduler state), `O(log n)` time, and -`O(n)` computational complexity. +`O(log(n) + n)` computational complexity. Due to the ability to chain transactions, we can immediately plan/sign dependent transactions. For the time/computational complexity, we use a tree to fulfill diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 321d4b60f..9a4ed2ebc 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -7,7 +7,7 @@ use std::collections::HashMap; use group::GroupEncoding; -use serai_primitives::{Coin, Amount, Balance}; +use serai_primitives::{Coin, Amount}; use serai_db::DbTxn; @@ -22,114 +22,6 @@ use utxo_scheduler_primitives::*; mod db; use db::Db; -#[derive(Clone)] -enum TreeTransaction { - Leaves { payments: Vec>>, value: u64 }, - Branch { children: Vec, value: u64 }, -} -impl TreeTransaction { - fn children(&self) -> usize { - match self { - Self::Leaves { payments, .. } => payments.len(), - Self::Branch { children, .. } => children.len(), - } - } - fn value(&self) -> u64 { - match self { - Self::Leaves { value, .. } | Self::Branch { value, .. } => *value, - } - } - fn payments( - &self, - coin: Coin, - branch_address: &AddressFor, - input_value: u64, - ) -> Option>>> { - // Fetch the amounts for the payments we'll make - let mut amounts: Vec<_> = match self { - Self::Leaves { payments, .. } => { - payments.iter().map(|payment| Some(payment.balance().amount.0)).collect() - } - Self::Branch { children, .. } => children.iter().map(|child| Some(child.value())).collect(), - }; - - // We need to reduce them so their sum is our input value - assert!(input_value <= self.value()); - let amount_to_amortize = self.value() - input_value; - - // If any payments won't survive the reduction, set them to None - let mut amortized = 0; - 'outer: while amounts.iter().any(Option::is_some) && (amortized < amount_to_amortize) { - let adjusted_fee = amount_to_amortize - amortized; - let amounts_len = - u64::try_from(amounts.iter().filter(|amount| amount.is_some()).count()).unwrap(); - let per_payment_fee_check = adjusted_fee.div_ceil(amounts_len); - - // Check each amount to see if it's not viable - let mut i = 0; - while i < amounts.len() { - if let Some(amount) = amounts[i] { - if amount.saturating_sub(per_payment_fee_check) < S::dust(coin).0 { - amounts[i] = None; - amortized += amount; - // If this amount wasn't viable, re-run with the new fee/amortization amounts - continue 'outer; - } - } - i += 1; - } - - // Now that we have the payments which will survive, reduce them - for (i, amount) in amounts.iter_mut().enumerate() { - if let Some(amount) = amount { - *amount -= adjusted_fee / amounts_len; - if i < usize::try_from(adjusted_fee % amounts_len).unwrap() { - *amount -= 1; - } - } - } - break; - } - - // Now that we have the reduced amounts, create the payments - let payments: Vec<_> = match self { - Self::Leaves { payments, .. } => { - payments - .iter() - .zip(amounts) - .filter_map(|(payment, amount)| { - amount.map(|amount| { - // The existing payment, with the new amount - Payment::new( - payment.address().clone(), - Balance { coin, amount: Amount(amount) }, - payment.data().clone(), - ) - }) - }) - .collect() - } - Self::Branch { .. } => { - amounts - .into_iter() - .filter_map(|amount| { - amount.map(|amount| { - // A branch output with the new amount - Payment::new(branch_address.clone(), Balance { coin, amount: Amount(amount) }, None) - }) - }) - .collect() - } - }; - - // Use None for vec![] so we never actually use vec![] - if payments.is_empty() { - None?; - } - Some(payments) - } -} - /// The outputs which will be effected by a PlannedTransaction and received by Serai. pub struct EffectedReceivedOutputs(Vec>); @@ -306,30 +198,8 @@ impl>> Sched assert!(Db::::queued_payments(txn, key, coin).unwrap().is_empty()); } - // Create a tree to fulfillthe payments - // This variable is for the current layer of the tree being built - let mut tree = Vec::with_capacity(payments.len().div_ceil(P::MAX_OUTPUTS)); - - // Push the branches for the leaves (the payments out) - for payments in payments.chunks(P::MAX_OUTPUTS) { - let value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); - tree.push(TreeTransaction::::Leaves { payments: payments.to_vec(), value }); - } - - // While we haven't calculated a tree root, or the tree root doesn't support a change output, - // keep working - while (tree.len() != 1) || (tree[0].children() == P::MAX_OUTPUTS) { - let mut branch_layer = vec![]; - for children in tree.chunks(P::MAX_OUTPUTS) { - branch_layer.push(TreeTransaction::::Branch { - children: children.to_vec(), - value: children.iter().map(TreeTransaction::value).sum(), - }); - } - tree = branch_layer; - } - assert_eq!(tree.len(), 1); - assert!((tree[0].children() + 1) <= P::MAX_OUTPUTS); + // Create a tree to fulfill the payments + let mut tree = vec![P::tree(&payments)]; // Create the transaction for the root of the tree let mut branch_outputs = { @@ -343,7 +213,7 @@ impl>> Sched P::fee_rate(block, coin), outputs.clone(), tree[0] - .payments(coin, &branch_address, tree[0].value()) + .payments::(coin, &branch_address, tree[0].value()) .expect("payments were dropped despite providing an input of the needed value"), Some(key_for_change), ) else { @@ -355,7 +225,7 @@ impl>> Sched }; // If this doesn't have a change output, increase operating costs and try again - if !planned.auxilliary.0.iter().any(|output| output.kind() == OutputType::Change) { + if !planned.has_change { /* Since we'll create a change output if it's worth at least dust, amortizing dust from the payments should solve this. If the new transaction can't afford those operating @@ -399,11 +269,13 @@ impl>> Sched TreeTransaction::Branch { children, .. } => children, }; while !tree.is_empty() { - // Sort the branch outputs by their value + // Sort the branch outputs by their value (high to low) branch_outputs.sort_by_key(|a| a.balance().amount.0); + branch_outputs.reverse(); // Sort the transactions we should create by their value so they share an order with the // branch outputs tree.sort_by_key(TreeTransaction::value); + tree.reverse(); // If we dropped any Branch outputs, drop the associated children tree.truncate(branch_outputs.len()); @@ -417,7 +289,8 @@ impl>> Sched for (branch_output, tx) in branch_outputs_for_this_layer.into_iter().zip(this_layer) { assert_eq!(branch_output.kind(), OutputType::Branch); - let Some(payments) = tx.payments(coin, &branch_address, branch_output.balance().amount.0) + let Some(payments) = + tx.payments::(coin, &branch_address, branch_output.balance().amount.0) else { // If this output has become too small to satisfy this branch, drop it continue; @@ -550,8 +423,9 @@ impl>> Sched // Fulfill the payments we prior couldn't let mut eventualities = HashMap::new(); for (key, _stage) in active_keys { - eventualities - .insert(key.to_bytes().as_ref().to_vec(), Self::step(txn, active_keys, block, *key)); + assert!(eventualities + .insert(key.to_bytes().as_ref().to_vec(), Self::step(txn, active_keys, block, *key)) + .is_none()); } // If this key has been flushed, forward all outputs From 07e7068c06ddcc3714ac94d375074c9d0e35a236 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 4 Sep 2024 17:03:20 -0400 Subject: [PATCH 072/179] Remove vast swaths of legacy code in the processor --- processor/messages/src/lib.rs | 14 - processor/src/multisigs/db.rs | 260 ----- processor/src/multisigs/mod.rs | 1064 +-------------------- processor/src/multisigs/scheduler/mod.rs | 96 -- processor/src/multisigs/scheduler/utxo.rs | 631 ------------ processor/src/plan.rs | 212 ---- 6 files changed, 1 insertion(+), 2276 deletions(-) delete mode 100644 processor/src/multisigs/db.rs delete mode 100644 processor/src/multisigs/scheduler/mod.rs delete mode 100644 processor/src/multisigs/scheduler/utxo.rs delete mode 100644 processor/src/plan.rs diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 096fddb9e..27d75d2e9 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -102,10 +102,6 @@ pub mod sign { Shares { id: SignId, shares: HashMap> }, // Re-attempt a signing protocol. Reattempt { id: SignId }, - /* TODO - // Completed a signing protocol already. - Completed { session: Session, id: [u8; 32], tx: Vec }, - */ } impl CoordinatorMessage { @@ -118,7 +114,6 @@ pub mod sign { CoordinatorMessage::Preprocesses { id, .. } | CoordinatorMessage::Shares { id, .. } | CoordinatorMessage::Reattempt { id, .. } => id.session, - // TODO CoordinatorMessage::Completed { session, .. } => *session, } } } @@ -131,8 +126,6 @@ pub mod sign { Preprocesses { id: SignId, preprocesses: Vec> }, // Signed shares for the specified signing protocol. Shares { id: SignId, shares: Vec> }, - // Completed a signing protocol already. - // TODO Completed { session: Session, id: [u8; 32], tx: Vec }, } } @@ -330,11 +323,6 @@ impl CoordinatorMessage { sign::CoordinatorMessage::Preprocesses { id, .. } => (0, id), sign::CoordinatorMessage::Shares { id, .. } => (1, id), sign::CoordinatorMessage::Reattempt { id, .. } => (2, id), - // The coordinator should report all reported completions to the processor - // Accordingly, the intent is a combination of plan ID and actual TX - // While transaction alone may suffice, that doesn't cover cross-chain TX ID conflicts, - // which are possible - // TODO sign::CoordinatorMessage::Completed { id, tx, .. } => (3, (id, tx).encode()), }; let mut res = vec![COORDINATOR_UID, TYPE_SIGN_UID, sub]; @@ -406,8 +394,6 @@ impl ProcessorMessage { // Unique since SignId sign::ProcessorMessage::Preprocesses { id, .. } => (1, id.encode()), sign::ProcessorMessage::Shares { id, .. } => (2, id.encode()), - // Unique since a processor will only sign a TX once - // TODO sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()), }; let mut res = vec![PROCESSOR_UID, TYPE_SIGN_UID, sub]; diff --git a/processor/src/multisigs/db.rs b/processor/src/multisigs/db.rs deleted file mode 100644 index 3d1d13bdf..000000000 --- a/processor/src/multisigs/db.rs +++ /dev/null @@ -1,260 +0,0 @@ -use std::io; - -use ciphersuite::Ciphersuite; -pub use serai_db::*; - -use scale::{Encode, Decode}; -use serai_client::{primitives::Balance, in_instructions::primitives::InInstructionWithBalance}; - -use crate::{ - Get, Plan, - networks::{Output, Transaction, Network}, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum PlanFromScanning { - Refund(N::Output, N::Address), - Forward(N::Output), -} - -impl PlanFromScanning { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - match kind[0] { - 0 => { - let output = N::Output::read(reader)?; - - let mut address_vec_len = [0; 4]; - reader.read_exact(&mut address_vec_len)?; - let mut address_vec = - vec![0; usize::try_from(u32::from_le_bytes(address_vec_len)).unwrap()]; - reader.read_exact(&mut address_vec)?; - let address = - N::Address::try_from(address_vec).map_err(|_| "invalid address saved to disk").unwrap(); - - Ok(PlanFromScanning::Refund(output, address)) - } - 1 => { - let output = N::Output::read(reader)?; - Ok(PlanFromScanning::Forward(output)) - } - _ => panic!("reading unrecognized PlanFromScanning"), - } - } - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - PlanFromScanning::Refund(output, address) => { - writer.write_all(&[0])?; - output.write(writer)?; - - let address_vec: Vec = - address.clone().try_into().map_err(|_| "invalid address being refunded to").unwrap(); - writer.write_all(&u32::try_from(address_vec.len()).unwrap().to_le_bytes())?; - writer.write_all(&address_vec) - } - PlanFromScanning::Forward(output) => { - writer.write_all(&[1])?; - output.write(writer) - } - } - } -} - -create_db!( - MultisigsDb { - NextBatchDb: () -> u32, - PlanDb: (id: &[u8]) -> Vec, - PlansFromScanningDb: (block_number: u64) -> Vec, - OperatingCostsDb: () -> u64, - ResolvedDb: (tx: &[u8]) -> [u8; 32], - SigningDb: (key: &[u8]) -> Vec, - ForwardedOutputDb: (balance: Balance) -> Vec, - DelayedOutputDb: () -> Vec - } -); - -impl PlanDb { - pub fn save_active_plan( - txn: &mut impl DbTxn, - key: &[u8], - block_number: usize, - plan: &Plan, - operating_costs_at_time: u64, - ) { - let id = plan.id(); - - { - let mut signing = SigningDb::get(txn, key).unwrap_or_default(); - - // If we've already noted we're signing this, return - assert_eq!(signing.len() % 32, 0); - for i in 0 .. (signing.len() / 32) { - if signing[(i * 32) .. ((i + 1) * 32)] == id { - return; - } - } - - signing.extend(&id); - SigningDb::set(txn, key, &signing); - } - - { - let mut buf = block_number.to_le_bytes().to_vec(); - plan.write(&mut buf).unwrap(); - buf.extend(&operating_costs_at_time.to_le_bytes()); - Self::set(txn, &id, &buf); - } - } - - pub fn active_plans(getter: &impl Get, key: &[u8]) -> Vec<(u64, Plan, u64)> { - let signing = SigningDb::get(getter, key).unwrap_or_default(); - let mut res = vec![]; - - assert_eq!(signing.len() % 32, 0); - for i in 0 .. (signing.len() / 32) { - let id = &signing[(i * 32) .. ((i + 1) * 32)]; - let buf = Self::get(getter, id).unwrap(); - - let block_number = u64::from_le_bytes(buf[.. 8].try_into().unwrap()); - let plan = Plan::::read::<&[u8]>(&mut &buf[8 ..]).unwrap(); - assert_eq!(id, &plan.id()); - let operating_costs = u64::from_le_bytes(buf[(buf.len() - 8) ..].try_into().unwrap()); - res.push((block_number, plan, operating_costs)); - } - res - } - - pub fn plan_by_key_with_self_change( - getter: &impl Get, - key: ::G, - id: [u8; 32], - ) -> bool { - let plan = Plan::::read::<&[u8]>(&mut &Self::get(getter, &id).unwrap()[8 ..]).unwrap(); - assert_eq!(plan.id(), id); - if let Some(change) = N::change_address(plan.key) { - (key == plan.key) && (Some(change) == plan.change) - } else { - false - } - } -} - -impl OperatingCostsDb { - pub fn take_operating_costs(txn: &mut impl DbTxn) -> u64 { - let existing = Self::get(txn).unwrap_or_default(); - txn.del(Self::key()); - existing - } - pub fn set_operating_costs(txn: &mut impl DbTxn, amount: u64) { - if amount != 0 { - Self::set(txn, &amount); - } - } -} - -impl ResolvedDb { - pub fn resolve_plan( - txn: &mut impl DbTxn, - key: &[u8], - plan: [u8; 32], - resolution: &>::Id, - ) { - let mut signing = SigningDb::get(txn, key).unwrap_or_default(); - assert_eq!(signing.len() % 32, 0); - - let mut found = false; - for i in 0 .. (signing.len() / 32) { - let start = i * 32; - let end = i + 32; - if signing[start .. end] == plan { - found = true; - signing = [&signing[.. start], &signing[end ..]].concat(); - break; - } - } - - if !found { - log::warn!("told to finish signing {} yet wasn't actively signing it", hex::encode(plan)); - } - SigningDb::set(txn, key, &signing); - Self::set(txn, resolution.as_ref(), &plan); - } -} - -impl PlansFromScanningDb { - pub fn set_plans_from_scanning( - txn: &mut impl DbTxn, - block_number: usize, - plans: Vec>, - ) { - let mut buf = vec![]; - for plan in plans { - plan.write(&mut buf).unwrap(); - } - Self::set(txn, block_number.try_into().unwrap(), &buf); - } - - pub fn take_plans_from_scanning( - txn: &mut impl DbTxn, - block_number: usize, - ) -> Option>> { - let block_number = u64::try_from(block_number).unwrap(); - let res = Self::get(txn, block_number).map(|plans| { - let mut plans_ref = plans.as_slice(); - let mut res = vec![]; - while !plans_ref.is_empty() { - res.push(PlanFromScanning::::read(&mut plans_ref).unwrap()); - } - res - }); - if res.is_some() { - txn.del(Self::key(block_number)); - } - res - } -} - -impl ForwardedOutputDb { - pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { - let mut existing = Self::get(txn, instruction.balance).unwrap_or_default(); - existing.extend(instruction.encode()); - Self::set(txn, instruction.balance, &existing); - } - - pub fn take_forwarded_output( - txn: &mut impl DbTxn, - balance: Balance, - ) -> Option { - let outputs = Self::get(txn, balance)?; - let mut outputs_ref = outputs.as_slice(); - let res = InInstructionWithBalance::decode(&mut outputs_ref).unwrap(); - assert!(outputs_ref.len() < outputs.len()); - if outputs_ref.is_empty() { - txn.del(Self::key(balance)); - } else { - Self::set(txn, balance, &outputs); - } - Some(res) - } -} - -impl DelayedOutputDb { - pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { - let mut existing = Self::get(txn).unwrap_or_default(); - existing.extend(instruction.encode()); - Self::set(txn, &existing); - } - - pub fn take_delayed_outputs(txn: &mut impl DbTxn) -> Vec { - let Some(outputs) = Self::get(txn) else { return vec![] }; - txn.del(Self::key()); - - let mut outputs_ref = outputs.as_slice(); - let mut res = vec![]; - while !outputs_ref.is_empty() { - res.push(InInstructionWithBalance::decode(&mut outputs_ref).unwrap()); - } - res - } -} diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index 92ea0271a..c20a922ca 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -1,1070 +1,8 @@ -use core::time::Duration; -use std::collections::HashSet; - -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use scale::{Encode, Decode}; -use messages::SubstrateContext; - -use serai_client::{ - primitives::{MAX_DATA_LEN, ExternalAddress, BlockHash, Data}, - in_instructions::primitives::{ - InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, - }, - coins::primitives::{OutInstruction, OutInstructionWithBalance}, -}; - -use log::{info, error}; - -use tokio::time::sleep; - -/* TODO -#[cfg(not(test))] -mod scanner; -#[cfg(test)] -pub mod scanner; -*/ - -use scanner::{ScannerEvent, ScannerHandle, Scanner}; - -mod db; -use db::*; - -pub(crate) mod scheduler; -use scheduler::Scheduler; - -use crate::{ - Get, Db, Payment, Plan, - networks::{OutputType, Output, SignableTransaction, Eventuality, Block, PreparedSend, Network}, -}; - -// InInstructionWithBalance from an external output -fn instruction_from_output( - output: &N::Output, -) -> (Option, Option) { - assert_eq!(output.kind(), OutputType::External); - - let presumed_origin = output.presumed_origin().map(|address| { - ExternalAddress::new( - address - .try_into() - .map_err(|_| ()) - .expect("presumed origin couldn't be converted to a Vec"), - ) - .expect("presumed origin exceeded address limits") - }); - - let mut data = output.data(); - let max_data_len = usize::try_from(MAX_DATA_LEN).unwrap(); - if data.len() > max_data_len { - error!( - "data in output {} exceeded MAX_DATA_LEN ({MAX_DATA_LEN}): {}. skipping", - hex::encode(output.id()), - data.len(), - ); - return (presumed_origin, None); - } - - let shorthand = match Shorthand::decode(&mut data) { - Ok(shorthand) => shorthand, - Err(e) => { - info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); - return (presumed_origin, None); - } - }; - let instruction = match RefundableInInstruction::try_from(shorthand) { - Ok(instruction) => instruction, - Err(e) => { - info!( - "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", - hex::encode(output.id()) - ); - return (presumed_origin, None); - } - }; - - let mut balance = output.balance(); - // Deduct twice the cost to aggregate to prevent economic attacks by malicious miners against - // other users - balance.amount.0 -= 2 * N::COST_TO_AGGREGATE; - - ( - instruction.origin.or(presumed_origin), - Some(InInstructionWithBalance { instruction: instruction.instruction, balance }), - ) -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -enum RotationStep { - // Use the existing multisig for all actions (steps 1-3) - UseExisting, - // Use the new multisig as change (step 4) - NewAsChange, - // The existing multisig is expected to solely forward transactions at this point (step 5) - ForwardFromExisting, - // The existing multisig is expected to finish its own transactions and do nothing more - // (step 6) - ClosingExisting, -} - -// This explicitly shouldn't take the database as we prepare Plans we won't execute for fee -// estimates -async fn prepare_send( - network: &N, - block_number: usize, - plan: Plan, - operating_costs: u64, -) -> PreparedSend { - loop { - match network.prepare_send(block_number, plan.clone(), operating_costs).await { - Ok(prepared) => { - return prepared; - } - Err(e) => { - error!("couldn't prepare a send for plan {}: {e}", hex::encode(plan.id())); - // The processor is either trying to create an invalid TX (fatal) or the node went - // offline - // The former requires a patch, the latter is a connection issue - // If the latter, this is an appropriate sleep. If the former, we should panic, yet - // this won't flood the console ad infinitum - sleep(Duration::from_secs(60)).await; - } - } - } -} - -pub struct MultisigViewer { - activation_block: usize, - key: ::G, - scheduler: N::Scheduler, -} - #[allow(clippy::type_complexity)] #[derive(Clone, Debug)] pub enum MultisigEvent { // Batches to publish Batches(Option<(::G, ::G)>, Vec), // Eventuality completion found on-chain - Completed(Vec, [u8; 32], ::Completion), -} - -pub struct MultisigManager { - scanner: ScannerHandle, - existing: Option>, - new: Option>, -} - -impl MultisigManager { - pub async fn new( - raw_db: &D, - network: &N, - ) -> ( - Self, - Vec<::G>, - Vec<(Plan, N::SignableTransaction, N::Eventuality)>, - ) { - // The scanner has no long-standing orders to re-issue - let (mut scanner, current_keys) = Scanner::new(network.clone(), raw_db.clone()); - - let mut schedulers = vec![]; - - assert!(current_keys.len() <= 2); - let mut actively_signing = vec![]; - for (_, key) in ¤t_keys { - schedulers.push(N::Scheduler::from_db(raw_db, *key, N::NETWORK).unwrap()); - - // Load any TXs being actively signed - let key = key.to_bytes(); - for (block_number, plan, operating_costs) in PlanDb::active_plans::(raw_db, key.as_ref()) { - let block_number = block_number.try_into().unwrap(); - - let id = plan.id(); - info!("reloading plan {}: {:?}", hex::encode(id), plan); - - let key_bytes = plan.key.to_bytes(); - - let Some((tx, eventuality)) = - prepare_send(network, block_number, plan.clone(), operating_costs).await.tx - else { - panic!("previously created transaction is no longer being created") - }; - - scanner - .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone()) - .await; - actively_signing.push((plan, tx, eventuality)); - } - } - - ( - MultisigManager { - scanner, - existing: current_keys.first().copied().map(|(activation_block, key)| MultisigViewer { - activation_block, - key, - scheduler: schedulers.remove(0), - }), - new: current_keys.get(1).copied().map(|(activation_block, key)| MultisigViewer { - activation_block, - key, - scheduler: schedulers.remove(0), - }), - }, - current_keys.into_iter().map(|(_, key)| key).collect(), - actively_signing, - ) - } - - /// Returns the block number for a block hash, if it's known and all keys have scanned the block. - // This is guaranteed to atomically increment so long as no new keys are added to the scanner - // which activate at a block before the currently highest scanned block. This is prevented by - // the processor waiting for `Batch` inclusion before scanning too far ahead, and activation only - // happening after the "too far ahead" window. - pub async fn block_number( - &self, - getter: &G, - hash: &>::Id, - ) -> Option { - let latest = ScannerHandle::::block_number(getter, hash)?; - - // While the scanner has cemented this block, that doesn't mean it's been scanned for all - // keys - // ram_scanned will return the lowest scanned block number out of all keys - if latest > self.scanner.ram_scanned().await { - return None; - } - Some(latest) - } - - pub async fn add_key( - &mut self, - txn: &mut D::Transaction<'_>, - activation_block: usize, - external_key: ::G, - ) { - self.scanner.register_key(txn, activation_block, external_key).await; - let viewer = Some(MultisigViewer { - activation_block, - key: external_key, - scheduler: N::Scheduler::new::(txn, external_key, N::NETWORK), - }); - - if self.existing.is_none() { - self.existing = viewer; - return; - } - self.new = viewer; - } - - fn current_rotation_step(&self, block_number: usize) -> RotationStep { - let Some(new) = self.new.as_ref() else { return RotationStep::UseExisting }; - - // Period numbering here has no meaning other than these are the time values useful here, and - // the order they're calculated in. They have no reference/shared marker with anything else - - // ESTIMATED_BLOCK_TIME_IN_SECONDS is fine to use here. While inaccurate, it shouldn't be - // drastically off, and even if it is, it's a hiccup to latency handling only possible when - // rotating. The error rate wouldn't be acceptable if it was allowed to accumulate over time, - // yet rotation occurs on Serai's clock, disconnecting any errors here from any prior. - - // N::CONFIRMATIONS + 10 minutes - let period_1_start = new.activation_block + - N::CONFIRMATIONS + - (10usize * 60).div_ceil(N::ESTIMATED_BLOCK_TIME_IN_SECONDS); - - // N::CONFIRMATIONS - let period_2_start = period_1_start + N::CONFIRMATIONS; - - // 6 hours after period 2 - // Also ensure 6 hours is greater than the amount of CONFIRMATIONS, for sanity purposes - let period_3_start = - period_2_start + ((6 * 60 * 60) / N::ESTIMATED_BLOCK_TIME_IN_SECONDS).max(N::CONFIRMATIONS); - - if block_number < period_1_start { - RotationStep::UseExisting - } else if block_number < period_2_start { - RotationStep::NewAsChange - } else if block_number < period_3_start { - RotationStep::ForwardFromExisting - } else { - RotationStep::ClosingExisting - } - } - - // Convert new Burns to Payments. - // - // Also moves payments from the old Scheduler to the new multisig if the step calls for it. - fn burns_to_payments( - &mut self, - txn: &mut D::Transaction<'_>, - step: RotationStep, - burns: Vec, - ) -> (Vec>, Vec>) { - let mut payments = vec![]; - for out in burns { - let OutInstructionWithBalance { instruction: OutInstruction { address, data }, balance } = - out; - assert_eq!(balance.coin.network(), N::NETWORK); - - if let Ok(address) = N::Address::try_from(address.consume()) { - payments.push(Payment { address, data: data.map(Data::consume), balance }); - } - } - - let payments = payments; - match step { - RotationStep::UseExisting | RotationStep::NewAsChange => (payments, vec![]), - RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => { - // Consume any payments the prior scheduler was unable to complete - // This should only actually matter once - let mut new_payments = self.existing.as_mut().unwrap().scheduler.consume_payments::(txn); - // Add the new payments - new_payments.extend(payments); - (vec![], new_payments) - } - } - } - - fn split_outputs_by_key(&self, outputs: Vec) -> (Vec, Vec) { - let mut existing_outputs = Vec::with_capacity(outputs.len()); - let mut new_outputs = vec![]; - - let existing_key = self.existing.as_ref().unwrap().key; - let new_key = self.new.as_ref().map(|new| new.key); - for output in outputs { - if output.key() == existing_key { - existing_outputs.push(output); - } else { - assert_eq!(Some(output.key()), new_key); - new_outputs.push(output); - } - } - - (existing_outputs, new_outputs) - } - - fn refund_plan( - scheduler: &mut N::Scheduler, - txn: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan { - log::info!("creating refund plan for {}", hex::encode(output.id())); - assert_eq!(output.kind(), OutputType::External); - scheduler.refund_plan::(txn, output, refund_to) - } - - // Returns the plan for forwarding if one is needed. - // Returns None if one is not needed to forward this output. - fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Output) -> Option> { - log::info!("creating forwarding plan for {}", hex::encode(output.id())); - let res = self.existing.as_mut().unwrap().scheduler.forward_plan::( - txn, - output.clone(), - self.new.as_ref().expect("forwarding plan yet no new multisig").key, - ); - if res.is_none() { - log::info!("no forwarding plan was necessary for {}", hex::encode(output.id())); - } - res - } - - // Filter newly received outputs due to the step being RotationStep::ClosingExisting. - // - // Returns the Plans for the `Branch`s which should be created off outputs which passed the - // filter. - fn filter_outputs_due_to_closing( - &mut self, - txn: &mut D::Transaction<'_>, - existing_outputs: &mut Vec, - ) -> Vec> { - /* - The document says to only handle outputs we created. We don't know what outputs we - created. We do have an ordered view of equivalent outputs however, and can assume the - first (and likely only) ones are the ones we created. - - Accordingly, only handling outputs we created should be definable as only handling - outputs from the resolution of Eventualities. - - This isn't feasible. It requires knowing what Eventualities were completed in this block, - when we handle this block, which we don't know without fully serialized scanning + Batch - publication. - - Take the following scenario: - 1) A network uses 10 confirmations. Block x is scanned, meaning x+9a exists. - 2) 67% of nodes process x, create, sign, and publish a TX, creating an Eventuality. - 3) A reorganization to a shorter chain occurs, including the published TX in x+1b. - 4) The 33% of nodes which are latent will be allowed to scan x+1b as soon as x+10b - exists. They won't wait for Serai to include the Batch for x until they try to scan - x+10b. - 5) These latent nodes will handle x+1b, post-create an Eventuality, post-learn x+1b - contained resolutions, changing how x+1b should've been interpreted. - - We either have to: - A) Fully serialize scanning (removing the ability to utilize throughput to allow higher - latency, at least while the step is `ClosingExisting`). - B) Create Eventualities immediately, which we can't do as then both the external - network's clock AND Serai's clock can trigger Eventualities, removing ordering. - We'd need to shift entirely to the external network's clock, only handling Burns - outside the parallelization window (which would be extremely latent). - C) Use a different mechanism to determine if we created an output. - D) Re-define which outputs are still to be handled after the 6 hour period expires, such - that the multisig's lifetime cannot be further extended yet it does fulfill its - responsibility. - - External outputs to the existing multisig will be: - - Scanned before the rotation and unused (as used External outputs become Change) - - Forwarded immediately upon scanning - - Not scanned before the cut off time (and accordingly dropped) - - For the first case, since they're scanned before the rotation and unused, they'll be - forwarded with all other available outputs (since they'll be available when scanned). - - Change outputs will be: - - Scanned before the rotation and forwarded with all other available outputs - - Forwarded immediately upon scanning - - Not scanned before the cut off time, requiring an extension exclusive to these outputs - - The important thing to note about honest Change outputs to the existing multisig is that - they'll only be created within `CONFIRMATIONS+1` blocks of the activation block. Also - important to note is that there's another explicit window of `CONFIRMATIONS` before the - 6 hour window. - - Eventualities are not guaranteed to be known before we scan the block containing their - resolution. They are guaranteed to be known within `CONFIRMATIONS-1` blocks however, due - to the limitation on how far we'll scan ahead. - - This means we will know of all Eventualities related to Change outputs we need to forward - before the 6 hour period begins (as forwarding outputs will not create any Change outputs - to the existing multisig). - - This means a definition of complete can be defined as: - 1) Handled all Branch outputs - 2) Forwarded all External outputs received before the end of 6 hour window - 3) Forwarded the results of all Eventualities with Change, which will have been created - before the 6 hour window - - How can we track and ensure this without needing to check if an output is from the - resolution of an Eventuality? - - 1) We only create Branch outputs before the 6 hour window starts. These are guaranteed - to appear within `CONFIRMATIONS` blocks. They will exist with arbitrary depth however, - meaning that upon completion they will spawn several more Eventualities. The further - created Eventualities re-risk being present after the 6 hour period ends. - - We can: - 1) Build a queue for Branch outputs, delaying their handling until relevant - Eventualities are guaranteed to be present. - - This solution would theoretically work for all outputs and allow collapsing this - problem to simply: - - > Accordingly, only handling outputs we created should be definable as only - handling outputs from the resolution of Eventualities. - - 2) Create all Eventualities under a Branch at time of Branch creation. - This idea fails as Plans are tightly bound to outputs. - - 3) Don't track Branch outputs by Eventualities, yet by the amount of Branch outputs - remaining. Any Branch output received, of a useful amount, is assumed to be our - own and handled. All other Branch outputs, even if they're the completion of some - Eventuality, are dropped. - - This avoids needing any additional queue, avoiding additional pipelining/latency. - - 2) External outputs are self-evident. We simply stop handling them at the cut-off point, - and only start checking after `CONFIRMATIONS` blocks if all Eventualities are - complete. - - 3) Since all Change Eventualities will be known prior to the 6 hour window's beginning, - we can safely check if a received Change output is the resolution of an Eventuality. - We only need to forward it if so. Forwarding it simply requires only checking if - Eventualities are complete after `CONFIRMATIONS` blocks, same as for straggling - External outputs. - */ - - let mut plans = vec![]; - existing_outputs.retain(|output| { - match output.kind() { - OutputType::External | OutputType::Forwarded => false, - OutputType::Branch => { - let scheduler = &mut self.existing.as_mut().unwrap().scheduler; - // There *would* be a race condition here due to the fact we only mark a `Branch` output - // as needed when we process the block (and handle scheduling), yet actual `Branch` - // outputs may appear as soon as the next block (and we scan the next block before we - // process the prior block) - // - // Unlike Eventuality checking, which happens on scanning and is therefore asynchronous, - // all scheduling (and this check against the scheduler) happens on processing, which is - // synchronous - // - // While we could move Eventuality checking into the block processing, removing its - // asynchonicity, we could only check data the Scanner deems important. The Scanner won't - // deem important Eventuality resolutions which don't create an output to Serai unless - // it knows of the Eventuality. Accordingly, at best we could have a split role (the - // Scanner noting completion of Eventualities which don't have relevant outputs, the - // processing noting completion of ones which do) - // - // This is unnecessary, due to the current flow around Eventuality resolutions and the - // current bounds naturally found being sufficiently amenable, yet notable for the future - if scheduler.can_use_branch(output.balance()) { - // We could simply call can_use_branch, yet it'd have an edge case where if we receive - // two outputs for 100, and we could use one such output, we'd handle both. - // - // Individually schedule each output once confirming they're usable in order to avoid - // this. - let mut plan = scheduler.schedule::( - txn, - vec![output.clone()], - vec![], - self.new.as_ref().unwrap().key, - false, - ); - assert_eq!(plan.len(), 1); - let plan = plan.remove(0); - plans.push(plan); - } - false - } - OutputType::Change => { - // If the TX containing this output resolved an Eventuality... - if let Some(plan) = ResolvedDb::get(txn, output.tx_id().as_ref()) { - // And the Eventuality had change... - // We need this check as Eventualities have a race condition and can't be relied - // on, as extensively detailed above. Eventualities explicitly with change do have - // a safe timing window however - if PlanDb::plan_by_key_with_self_change::( - txn, - // Pass the key so the DB checks the Plan's key is this multisig's, preventing a - // potential issue where the new multisig creates a Plan with change *and a - // payment to the existing multisig's change address* - self.existing.as_ref().unwrap().key, - plan, - ) { - // Then this is an honest change output we need to forward - // (or it's a payment to the change address in the same transaction as an honest - // change output, which is fine to let slip in) - return true; - } - } - false - } - } - }); - plans - } - - // Returns the Plans caused from a block being acknowledged. - // - // Will rotate keys if the block acknowledged is the retirement block. - async fn plans_from_block( - &mut self, - txn: &mut D::Transaction<'_>, - block_number: usize, - block_id: >::Id, - step: &mut RotationStep, - burns: Vec, - ) -> (bool, Vec>, HashSet<[u8; 32]>) { - let (mut existing_payments, mut new_payments) = self.burns_to_payments(txn, *step, burns); - - let mut plans = vec![]; - let mut plans_from_scanning = HashSet::new(); - - // We now have to acknowledge the acknowledged block, if it's new - // It won't be if this block's `InInstruction`s were split into multiple `Batch`s - let (acquired_lock, (mut existing_outputs, new_outputs)) = { - let (acquired_lock, mut outputs) = if ScannerHandle::::db_scanned(txn) - .expect("published a Batch despite never scanning a block") < - block_number - { - // Load plans crated when we scanned the block - let scanning_plans = - PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); - // Expand into actual plans - plans = scanning_plans - .into_iter() - .map(|plan| match plan { - PlanFromScanning::Refund(output, refund_to) => { - let existing = self.existing.as_mut().unwrap(); - if output.key() == existing.key { - Self::refund_plan(&mut existing.scheduler, txn, output, refund_to) - } else { - let new = self - .new - .as_mut() - .expect("new multisig didn't expect yet output wasn't for existing multisig"); - assert_eq!(output.key(), new.key, "output wasn't for existing nor new multisig"); - Self::refund_plan(&mut new.scheduler, txn, output, refund_to) - } - } - PlanFromScanning::Forward(output) => self - .forward_plan(txn, &output) - .expect("supposed to forward an output yet no forwarding plan"), - }) - .collect(); - - for plan in &plans { - plans_from_scanning.insert(plan.id()); - } - - let (is_retirement_block, outputs) = self.scanner.ack_block(txn, block_id.clone()).await; - if is_retirement_block { - let existing = self.existing.take().unwrap(); - assert!(existing.scheduler.empty()); - self.existing = self.new.take(); - *step = RotationStep::UseExisting; - assert!(existing_payments.is_empty()); - existing_payments = new_payments; - new_payments = vec![]; - } - (true, outputs) - } else { - (false, vec![]) - }; - - // Remove all outputs already present in plans - let mut output_set = HashSet::new(); - for plan in &plans { - for input in &plan.inputs { - output_set.insert(input.id().as_ref().to_vec()); - } - } - outputs.retain(|output| !output_set.remove(output.id().as_ref())); - assert_eq!(output_set.len(), 0); - - (acquired_lock, self.split_outputs_by_key(outputs)) - }; - - // If we're closing the existing multisig, filter its outputs down - if *step == RotationStep::ClosingExisting { - plans.extend(self.filter_outputs_due_to_closing(txn, &mut existing_outputs)); - } - - // Now that we've done all our filtering, schedule the existing multisig's outputs - plans.extend({ - let existing = self.existing.as_mut().unwrap(); - let existing_key = existing.key; - self.existing.as_mut().unwrap().scheduler.schedule::( - txn, - existing_outputs, - existing_payments, - match *step { - RotationStep::UseExisting => existing_key, - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => self.new.as_ref().unwrap().key, - }, - match *step { - RotationStep::UseExisting | RotationStep::NewAsChange => false, - RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true, - }, - ) - }); - - for plan in &plans { - // This first equality should 'never meaningfully' be false - // All created plans so far are by the existing multisig EXCEPT: - // A) If we created a refund plan from the new multisig (yet that wouldn't have change) - // B) The existing Scheduler returned a Plan for the new key (yet that happens with the SC - // scheduler, yet that doesn't have change) - // Despite being 'unnecessary' now, it's better to explicitly ensure and be robust - if plan.key == self.existing.as_ref().unwrap().key { - if let Some(change) = N::change_address(plan.key) { - if plan.change == Some(change) { - // Assert these (self-change) are only created during the expected step - match *step { - RotationStep::UseExisting => {} - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), - } - } - } - } - } - - // Schedule the new multisig's outputs too - if let Some(new) = self.new.as_mut() { - plans.extend(new.scheduler.schedule::(txn, new_outputs, new_payments, new.key, false)); - } - - (acquired_lock, plans, plans_from_scanning) - } - - /// Handle a SubstrateBlock event, building the relevant Plans. - pub async fn substrate_block( - &mut self, - txn: &mut D::Transaction<'_>, - network: &N, - context: SubstrateContext, - burns: Vec, - ) -> (bool, Vec<(::G, [u8; 32], N::SignableTransaction, N::Eventuality)>) - { - let mut block_id = >::Id::default(); - block_id.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref()); - let block_number = ScannerHandle::::block_number(txn, &block_id) - .expect("SubstrateBlock with context we haven't synced"); - - // Determine what step of rotation we're currently in - let mut step = self.current_rotation_step(block_number); - - // Get the Plans from this block - let (acquired_lock, plans, plans_from_scanning) = - self.plans_from_block(txn, block_number, block_id, &mut step, burns).await; - - let res = { - let mut res = Vec::with_capacity(plans.len()); - - for plan in plans { - let id = plan.id(); - info!("preparing plan {}: {:?}", hex::encode(id), plan); - - let key = plan.key; - let key_bytes = key.to_bytes(); - - let (tx, post_fee_branches) = { - let running_operating_costs = OperatingCostsDb::take_operating_costs(txn); - - PlanDb::save_active_plan::( - txn, - key_bytes.as_ref(), - block_number, - &plan, - running_operating_costs, - ); - - // If this Plan is from the scanner handler below, don't take the opportunity to amortze - // operating costs - // It operates with limited context, and on a different clock, making it nable to react - // to operating costs - // Despite this, in order to properly save forwarded outputs' instructions, it needs to - // know the actual value forwarded outputs will be created with - // Including operating costs prevents that - let from_scanning = plans_from_scanning.contains(&plan.id()); - let to_use_operating_costs = if from_scanning { 0 } else { running_operating_costs }; - - let PreparedSend { tx, post_fee_branches, mut operating_costs } = - prepare_send(network, block_number, plan, to_use_operating_costs).await; - - // Restore running_operating_costs to operating_costs - if from_scanning { - // If we're forwarding (or refunding) this output, operating_costs should still be 0 - // Either this TX wasn't created, causing no operating costs, or it was yet it'd be - // amortized - assert_eq!(operating_costs, 0); - - operating_costs += running_operating_costs; - } - - OperatingCostsDb::set_operating_costs(txn, operating_costs); - - (tx, post_fee_branches) - }; - - for branch in post_fee_branches { - let existing = self.existing.as_mut().unwrap(); - let to_use = if key == existing.key { - existing - } else { - let new = self - .new - .as_mut() - .expect("plan wasn't for existing multisig yet there wasn't a new multisig"); - assert_eq!(key, new.key); - new - }; - - to_use.scheduler.created_output::(txn, branch.expected, branch.actual); - } - - if let Some((tx, eventuality)) = tx { - // The main function we return to will send an event to the coordinator which must be - // fired before these registered Eventualities have their Completions fired - // Safety is derived from a mutable lock on the Scanner being preserved, preventing - // scanning (and detection of Eventuality resolutions) before it's released - // It's only released by the main function after it does what it will - self - .scanner - .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone()) - .await; - - res.push((key, id, tx, eventuality)); - } - - // TODO: If the TX is None, restore its inputs to the scheduler for efficiency's sake - // If this TODO is removed, also reduce the operating costs - } - res - }; - (acquired_lock, res) - } - - pub async fn release_scanner_lock(&mut self) { - self.scanner.release_lock().await; - } - - pub async fn scanner_event_to_multisig_event( - &self, - txn: &mut D::Transaction<'_>, - network: &N, - msg: ScannerEvent, - ) -> MultisigEvent { - let (block_number, event) = match msg { - ScannerEvent::Block { is_retirement_block, block, mut outputs } => { - // Since the Scanner is asynchronous, the following is a concern for race conditions - // We safely know the step of a block since keys are declared, and the Scanner is safe - // with respect to the declaration of keys - // Accordingly, the following calls regarding new keys and step should be safe - let block_number = ScannerHandle::::block_number(txn, &block) - .expect("didn't have the block number for a block we just scanned"); - let step = self.current_rotation_step(block_number); - - // Instructions created from this block - let mut instructions = vec![]; - - // If any of these outputs were forwarded, create their instruction now - for output in &outputs { - if output.kind() != OutputType::Forwarded { - continue; - } - - if let Some(instruction) = ForwardedOutputDb::take_forwarded_output(txn, output.balance()) - { - instructions.push(instruction); - } - } - - // If the remaining outputs aren't externally received funds, don't handle them as - // instructions - outputs.retain(|output| output.kind() == OutputType::External); - - // These plans are of limited context. They're only allowed the outputs newly received - // within this block and are intended to handle forwarding transactions/refunds - let mut plans = vec![]; - - // If the old multisig is explicitly only supposed to forward, create all such plans now - if step == RotationStep::ForwardFromExisting { - let mut i = 0; - while i < outputs.len() { - let output = &outputs[i]; - let plans = &mut plans; - let txn = &mut *txn; - - #[allow(clippy::redundant_closure_call)] - let should_retain = (|| async move { - // If this output doesn't belong to the existing multisig, it shouldn't be forwarded - if output.key() != self.existing.as_ref().unwrap().key { - return true; - } - - let plans_at_start = plans.len(); - let (refund_to, instruction) = instruction_from_output::(output); - if let Some(mut instruction) = instruction { - let Some(shimmed_plan) = N::Scheduler::shim_forward_plan( - output.clone(), - self.new.as_ref().expect("forwarding from existing yet no new multisig").key, - ) else { - // If this network doesn't need forwarding, report the output now - return true; - }; - plans.push(PlanFromScanning::::Forward(output.clone())); - - // Set the instruction for this output to be returned - // We need to set it under the amount it's forwarded with, so prepare its forwarding - // TX to determine the fees involved - let PreparedSend { tx, post_fee_branches: _, operating_costs } = - prepare_send(network, block_number, shimmed_plan, 0).await; - // operating_costs should not increase in a forwarding TX - assert_eq!(operating_costs, 0); - - // If this actually forwarded any coins, save the output as forwarded - // If this didn't create a TX, we don't bother saving the output as forwarded - // The fact we already created and pushed a plan still using this output will cause - // it to not be retained here, and later the plan will be dropped as this did here, - // letting it die out - if let Some(tx) = &tx { - instruction.balance.amount.0 -= tx.0.fee(); - - /* - Sending a Plan, with arbitrary data proxying the InInstruction, would require - adding a flow for networks which drop their data to still embed arbitrary data. - It'd also have edge cases causing failures (we'd need to manually provide the - origin if it was implied, which may exceed the encoding limit). - - Instead, we save the InInstruction as we scan this output. Then, when the - output is successfully forwarded, we simply read it from the local database. - This also saves the costs of embedding arbitrary data. - - Since we can't rely on the Eventuality system to detect if it's a forwarded - transaction, due to the asynchonicity of the Eventuality system, we instead - interpret an Forwarded output which has an amount associated with an - InInstruction which was forwarded as having been forwarded. - */ - ForwardedOutputDb::save_forwarded_output(txn, &instruction); - } - } else if let Some(refund_to) = refund_to { - if let Ok(refund_to) = refund_to.consume().try_into() { - // Build a dedicated Plan refunding this - plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); - } - } - - // Only keep if we didn't make a Plan consuming it - plans_at_start == plans.len() - })() - .await; - if should_retain { - i += 1; - continue; - } - outputs.remove(i); - } - } - - for output in outputs { - // If this is an External transaction to the existing multisig, and we're either solely - // forwarding or closing the existing multisig, drop it - // In the case of the forwarding case, we'll report it once it hits the new multisig - if (match step { - RotationStep::UseExisting | RotationStep::NewAsChange => false, - RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true, - }) && (output.key() == self.existing.as_ref().unwrap().key) - { - continue; - } - - let (refund_to, instruction) = instruction_from_output::(&output); - let Some(instruction) = instruction else { - if let Some(refund_to) = refund_to { - if let Ok(refund_to) = refund_to.consume().try_into() { - plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); - } - } - continue; - }; - - // Delay External outputs received to new multisig earlier than expected - if Some(output.key()) == self.new.as_ref().map(|new| new.key) { - match step { - RotationStep::UseExisting => { - DelayedOutputDb::save_delayed_output(txn, &instruction); - continue; - } - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => {} - } - } - - instructions.push(instruction); - } - - // Save the plans created while scanning - // TODO: Should we combine all of these plans to reduce the fees incurred from their - // execution? They're refunds and forwards. Neither should need isolate Plan/Eventualities. - PlansFromScanningDb::set_plans_from_scanning(txn, block_number, plans); - - // If any outputs were delayed, append them into this block - match step { - RotationStep::UseExisting => {} - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => { - instructions.extend(DelayedOutputDb::take_delayed_outputs(txn)); - } - } - - let mut block_hash = [0; 32]; - block_hash.copy_from_slice(block.as_ref()); - let mut batch_id = NextBatchDb::get(txn).unwrap_or_default(); - - // start with empty batch - let mut batches = vec![Batch { - network: N::NETWORK, - id: batch_id, - block: BlockHash(block_hash), - instructions: vec![], - }]; - - for instruction in instructions { - let batch = batches.last_mut().unwrap(); - batch.instructions.push(instruction); - - // check if batch is over-size - if batch.encode().len() > MAX_BATCH_SIZE { - // pop the last instruction so it's back in size - let instruction = batch.instructions.pop().unwrap(); - - // bump the id for the new batch - batch_id += 1; - - // make a new batch with this instruction included - batches.push(Batch { - network: N::NETWORK, - id: batch_id, - block: BlockHash(block_hash), - instructions: vec![instruction], - }); - } - } - - // Save the next batch ID - NextBatchDb::set(txn, &(batch_id + 1)); - - ( - block_number, - MultisigEvent::Batches( - if is_retirement_block { - Some((self.existing.as_ref().unwrap().key, self.new.as_ref().unwrap().key)) - } else { - None - }, - batches, - ), - ) - } - - // This must be emitted before ScannerEvent::Block for all completions of known Eventualities - // within the block. Unknown Eventualities may have their Completed events emitted after - // ScannerEvent::Block however. - ScannerEvent::Completed(key, block_number, id, tx_id, completion) => { - ResolvedDb::resolve_plan::(txn, &key, id, &tx_id); - (block_number, MultisigEvent::Completed(key, id, completion)) - } - }; - - // If we either received a Block event (which will be the trigger when we have no - // Plans/Eventualities leading into ClosingExisting), or we received the last Completed for - // this multisig, set its retirement block - let existing = self.existing.as_ref().unwrap(); - - // This multisig is closing - let closing = self.current_rotation_step(block_number) == RotationStep::ClosingExisting; - // There's nothing left in its Scheduler. This call is safe as: - // 1) When ClosingExisting, all outputs should've been already forwarded, preventing - // new UTXOs from accumulating. - // 2) No new payments should be issued. - // 3) While there may be plans, they'll be dropped to create Eventualities. - // If this Eventuality is resolved, the Plan has already been dropped. - // 4) If this Eventuality will trigger a Plan, it'll still be in the plans HashMap. - let scheduler_is_empty = closing && existing.scheduler.empty(); - // Nothing is still being signed - let no_active_plans = scheduler_is_empty && - PlanDb::active_plans::(txn, existing.key.to_bytes().as_ref()).is_empty(); - - self - .scanner - .multisig_completed - // The above explicitly included their predecessor to ensure short-circuiting, yet their - // names aren't defined as an aggregate check. Still including all three here ensures all are - // used in the final value - .send(closing && scheduler_is_empty && no_active_plans) - .unwrap(); - - event - } - - pub async fn next_scanner_event(&mut self) -> ScannerEvent { - self.scanner.events.recv().await.unwrap() - } + Completed(Vec, [u8; 32], (reader: &mut R) -> io::Result; - fn write(&self, writer: &mut W) -> io::Result<()>; -} - -impl SchedulerAddendum for () { - fn read(_: &mut R) -> io::Result { - Ok(()) - } - fn write(&self, _: &mut W) -> io::Result<()> { - Ok(()) - } -} - -pub trait Scheduler: Sized + Clone + PartialEq + Debug { - type Addendum: SchedulerAddendum; - - /// Check if this Scheduler is empty. - fn empty(&self) -> bool; - - /// Create a new Scheduler. - fn new( - txn: &mut D::Transaction<'_>, - key: ::G, - network: NetworkId, - ) -> Self; - - /// Load a Scheduler from the DB. - fn from_db( - db: &D, - key: ::G, - network: NetworkId, - ) -> io::Result; - - /// Check if a branch is usable. - fn can_use_branch(&self, balance: Balance) -> bool; - - /// Schedule a series of outputs/payments. - fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - // TODO: Tighten this to multisig_for_any_change - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec>; - - /// Consume all payments still pending within this Scheduler, without scheduling them. - fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec>; - - /// Note a branch output as having been created, with the amount it was actually created with, - /// or not having been created due to being too small. - fn created_output( - &mut self, - txn: &mut D::Transaction<'_>, - expected: u64, - actual: Option, - ); - - /// Refund a specific output. - fn refund_plan( - &mut self, - txn: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan; - - /// Shim the forwarding Plan as necessary to obtain a fee estimate. - /// - /// If this Scheduler is for a Network which requires forwarding, this must return Some with a - /// plan with identical fee behavior. If forwarding isn't necessary, returns None. - fn shim_forward_plan(output: N::Output, to: ::G) -> Option>; - - /// Forward a specific output to the new multisig. - /// - /// Returns None if no forwarding is necessary. Must return Some if forwarding is necessary. - fn forward_plan( - &mut self, - txn: &mut D::Transaction<'_>, - output: N::Output, - to: ::G, - ) -> Option>; -} diff --git a/processor/src/multisigs/scheduler/utxo.rs b/processor/src/multisigs/scheduler/utxo.rs deleted file mode 100644 index 1865cab91..000000000 --- a/processor/src/multisigs/scheduler/utxo.rs +++ /dev/null @@ -1,631 +0,0 @@ -use std::{ - io::{self, Read}, - collections::{VecDeque, HashMap}, -}; - -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use serai_client::primitives::{NetworkId, Coin, Amount, Balance}; - -use crate::{ - DbTxn, Db, Payment, Plan, - networks::{OutputType, Output, Network, UtxoNetwork}, - multisigs::scheduler::Scheduler as SchedulerTrait, -}; - -/// Deterministic output/payment manager. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Scheduler { - key: ::G, - coin: Coin, - - // Serai, when it has more outputs expected than it can handle in a single transaction, will - // schedule the outputs to be handled later. Immediately, it just creates additional outputs - // which will eventually handle those outputs - // - // These maps map output amounts, which we'll receive in the future, to the payments they should - // be used on - // - // When those output amounts appear, their payments should be scheduled - // The Vec is for all payments that should be done per output instance - // The VecDeque allows multiple sets of payments with the same sum amount to properly co-exist - // - // queued_plans are for outputs which we will create, yet when created, will have their amount - // reduced by the fee it cost to be created. The Scheduler will then be told how what amount the - // output actually has, and it'll be moved into plans - queued_plans: HashMap>>>, - plans: HashMap>>>, - - // UTXOs available - utxos: Vec, - - // Payments awaiting scheduling due to the output availability problem - payments: VecDeque>, -} - -fn scheduler_key(key: &G) -> Vec { - D::key(b"SCHEDULER", b"scheduler", key.to_bytes()) -} - -impl> Scheduler { - pub fn empty(&self) -> bool { - self.queued_plans.is_empty() && - self.plans.is_empty() && - self.utxos.is_empty() && - self.payments.is_empty() - } - - fn read( - key: ::G, - coin: Coin, - reader: &mut R, - ) -> io::Result { - let mut read_plans = || -> io::Result<_> { - let mut all_plans = HashMap::new(); - let mut all_plans_len = [0; 4]; - reader.read_exact(&mut all_plans_len)?; - for _ in 0 .. u32::from_le_bytes(all_plans_len) { - let mut amount = [0; 8]; - reader.read_exact(&mut amount)?; - let amount = u64::from_le_bytes(amount); - - let mut plans = VecDeque::new(); - let mut plans_len = [0; 4]; - reader.read_exact(&mut plans_len)?; - for _ in 0 .. u32::from_le_bytes(plans_len) { - let mut payments = vec![]; - let mut payments_len = [0; 4]; - reader.read_exact(&mut payments_len)?; - - for _ in 0 .. u32::from_le_bytes(payments_len) { - payments.push(Payment::read(reader)?); - } - plans.push_back(payments); - } - all_plans.insert(amount, plans); - } - Ok(all_plans) - }; - let queued_plans = read_plans()?; - let plans = read_plans()?; - - let mut utxos = vec![]; - let mut utxos_len = [0; 4]; - reader.read_exact(&mut utxos_len)?; - for _ in 0 .. u32::from_le_bytes(utxos_len) { - utxos.push(N::Output::read(reader)?); - } - - let mut payments = VecDeque::new(); - let mut payments_len = [0; 4]; - reader.read_exact(&mut payments_len)?; - for _ in 0 .. u32::from_le_bytes(payments_len) { - payments.push_back(Payment::read(reader)?); - } - - Ok(Scheduler { key, coin, queued_plans, plans, utxos, payments }) - } - - // TODO2: Get rid of this - // We reserialize the entire scheduler on any mutation to save it to the DB which is horrible - // We should have an incremental solution - fn serialize(&self) -> Vec { - let mut res = Vec::with_capacity(4096); - - let mut write_plans = |plans: &HashMap>>>| { - res.extend(u32::try_from(plans.len()).unwrap().to_le_bytes()); - for (amount, list_of_plans) in plans { - res.extend(amount.to_le_bytes()); - res.extend(u32::try_from(list_of_plans.len()).unwrap().to_le_bytes()); - for plan in list_of_plans { - res.extend(u32::try_from(plan.len()).unwrap().to_le_bytes()); - for payment in plan { - payment.write(&mut res).unwrap(); - } - } - } - }; - write_plans(&self.queued_plans); - write_plans(&self.plans); - - res.extend(u32::try_from(self.utxos.len()).unwrap().to_le_bytes()); - for utxo in &self.utxos { - utxo.write(&mut res).unwrap(); - } - - res.extend(u32::try_from(self.payments.len()).unwrap().to_le_bytes()); - for payment in &self.payments { - payment.write(&mut res).unwrap(); - } - - debug_assert_eq!(&Self::read(self.key, self.coin, &mut res.as_slice()).unwrap(), self); - res - } - - pub fn new( - txn: &mut D::Transaction<'_>, - key: ::G, - network: NetworkId, - ) -> Self { - assert!(N::branch_address(key).is_some()); - assert!(N::change_address(key).is_some()); - assert!(N::forward_address(key).is_some()); - - let coin = { - let coins = network.coins(); - assert_eq!(coins.len(), 1); - coins[0] - }; - - let res = Scheduler { - key, - coin, - queued_plans: HashMap::new(), - plans: HashMap::new(), - utxos: vec![], - payments: VecDeque::new(), - }; - // Save it to disk so from_db won't panic if we don't mutate it before rebooting - txn.put(scheduler_key::(&res.key), res.serialize()); - res - } - - pub fn from_db( - db: &D, - key: ::G, - network: NetworkId, - ) -> io::Result { - let coin = { - let coins = network.coins(); - assert_eq!(coins.len(), 1); - coins[0] - }; - - let scheduler = db.get(scheduler_key::(&key)).unwrap_or_else(|| { - panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes())) - }); - let mut reader_slice = scheduler.as_slice(); - let reader = &mut reader_slice; - - Self::read(key, coin, reader) - } - - pub fn can_use_branch(&self, balance: Balance) -> bool { - assert_eq!(balance.coin, self.coin); - self.plans.contains_key(&balance.amount.0) - } - - fn execute( - &mut self, - inputs: Vec, - mut payments: Vec>, - key_for_any_change: ::G, - ) -> Plan { - let mut change = false; - let mut max = N::MAX_OUTPUTS; - - let payment_amounts = |payments: &Vec>| { - payments.iter().map(|payment| payment.balance.amount.0).sum::() - }; - - // Requires a change output - if inputs.iter().map(|output| output.balance().amount.0).sum::() != - payment_amounts(&payments) - { - change = true; - max -= 1; - } - - let mut add_plan = |payments| { - let amount = payment_amounts(&payments); - self.queued_plans.entry(amount).or_insert(VecDeque::new()).push_back(payments); - amount - }; - - let branch_address = N::branch_address(self.key).unwrap(); - - // If we have more payments than we can handle in a single TX, create plans for them - // TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create: - // 15 branches of 16 leaves - // 1 branch of: - // - 1 branch of 16 leaves - // - 2 leaves - // If this was perfect, the heaviest branch would have 1 branch of 3 leaves and 15 leaves - while payments.len() > max { - // The resulting TX will have the remaining payments and a new branch payment - let to_remove = (payments.len() + 1) - N::MAX_OUTPUTS; - // Don't remove more than possible - let to_remove = to_remove.min(N::MAX_OUTPUTS); - - // Create the plan - let removed = payments.drain((payments.len() - to_remove) ..).collect::>(); - assert_eq!(removed.len(), to_remove); - let amount = add_plan(removed); - - // Create the payment for the plan - // Push it to the front so it's not moved into a branch until all lower-depth items are - payments.insert( - 0, - Payment { - address: branch_address.clone(), - data: None, - balance: Balance { coin: self.coin, amount: Amount(amount) }, - }, - ); - } - - Plan { - key: self.key, - inputs, - payments, - change: Some(N::change_address(key_for_any_change).unwrap()).filter(|_| change), - scheduler_addendum: (), - } - } - - fn add_outputs( - &mut self, - mut utxos: Vec, - key_for_any_change: ::G, - ) -> Vec> { - log::info!("adding {} outputs", utxos.len()); - - let mut txs = vec![]; - - for utxo in utxos.drain(..) { - if utxo.kind() == OutputType::Branch { - let amount = utxo.balance().amount.0; - if let Some(plans) = self.plans.get_mut(&amount) { - // Execute the first set of payments possible with an output of this amount - let payments = plans.pop_front().unwrap(); - // They won't be equal if we dropped payments due to being dust - assert!(amount >= payments.iter().map(|payment| payment.balance.amount.0).sum::()); - - // If we've grabbed the last plan for this output amount, remove it from the map - if plans.is_empty() { - self.plans.remove(&amount); - } - - // Create a TX for these payments - txs.push(self.execute(vec![utxo], payments, key_for_any_change)); - continue; - } - } - - self.utxos.push(utxo); - } - - log::info!("{} planned TXs have had their required inputs confirmed", txs.len()); - txs - } - - // Schedule a series of outputs/payments. - pub fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - mut payments: Vec>, - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec> { - for utxo in &utxos { - assert_eq!(utxo.balance().coin, self.coin); - } - for payment in &payments { - assert_eq!(payment.balance.coin, self.coin); - } - - // Drop payments to our own branch address - /* - created_output will be called any time we send to a branch address. If it's called, and it - wasn't expecting to be called, that's almost certainly an error. The only way to guarantee - this however is to only have us send to a branch address when creating a branch, hence the - dropping of pointless payments. - - This is not comprehensive as a payment may still be made to another active multisig's branch - address, depending on timing. This is safe as the issue only occurs when a multisig sends to - its *own* branch address, since created_output is called on the signer's Scheduler. - */ - { - let branch_address = N::branch_address(self.key).unwrap(); - payments = - payments.drain(..).filter(|payment| payment.address != branch_address).collect::>(); - } - - let mut plans = self.add_outputs(utxos, key_for_any_change); - - log::info!("scheduling {} new payments", payments.len()); - - // Add all new payments to the list of pending payments - self.payments.extend(payments); - let payments_at_start = self.payments.len(); - log::info!("{} payments are now scheduled", payments_at_start); - - // If we don't have UTXOs available, don't try to continue - if self.utxos.is_empty() { - log::info!("no utxos currently available"); - return plans; - } - - // Sort UTXOs so the highest valued ones are first - self.utxos.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); - - // We always want to aggregate our UTXOs into a single UTXO in the name of simplicity - // We may have more UTXOs than will fit into a TX though - // We use the most valuable UTXOs to handle our current payments, and we return aggregation TXs - // for the rest of the inputs - // Since we do multiple aggregation TXs at once, this will execute in logarithmic time - let utxos = self.utxos.drain(..).collect::>(); - let mut utxo_chunks = - utxos.chunks(N::MAX_INPUTS).map(<[::Output]>::to_vec).collect::>(); - - // Use the first chunk for any scheduled payments, since it has the most value - let utxos = utxo_chunks.remove(0); - - // If the last chunk exists and only has one output, don't try aggregating it - // Set it to be restored to UTXO set - let mut to_restore = None; - if let Some(mut chunk) = utxo_chunks.pop() { - if chunk.len() == 1 { - to_restore = Some(chunk.pop().unwrap()); - } else { - utxo_chunks.push(chunk); - } - } - - for chunk in utxo_chunks.drain(..) { - log::debug!("aggregating a chunk of {} inputs", chunk.len()); - plans.push(Plan { - key: self.key, - inputs: chunk, - payments: vec![], - change: Some(N::change_address(key_for_any_change).unwrap()), - scheduler_addendum: (), - }) - } - - // We want to use all possible UTXOs for all possible payments - let mut balance = utxos.iter().map(|output| output.balance().amount.0).sum::(); - - // If we can't fulfill the next payment, we have encountered an instance of the UTXO - // availability problem - // This shows up in networks like Monero, where because we spent outputs, our change has yet to - // re-appear. Since it has yet to re-appear, we only operate with a balance which is a subset - // of our total balance - // Despite this, we may be ordered to fulfill a payment which is our total balance - // The solution is to wait for the temporarily unavailable change outputs to re-appear, - // granting us access to our full balance - let mut executing = vec![]; - while !self.payments.is_empty() { - let amount = self.payments[0].balance.amount.0; - if balance.checked_sub(amount).is_some() { - balance -= amount; - executing.push(self.payments.pop_front().unwrap()); - } else { - // Doesn't check if other payments would fit into the current batch as doing so may never - // let enough inputs become simultaneously availabile to enable handling of payments[0] - break; - } - } - - // Now that we have the list of payments we can successfully handle right now, create the TX - // for them - if !executing.is_empty() { - plans.push(self.execute(utxos, executing, key_for_any_change)); - } else { - // If we don't have any payments to execute, save these UTXOs for later - self.utxos.extend(utxos); - } - - // If we're instructed to force a spend, do so - // This is used when an old multisig is retiring and we want to always transfer outputs to the - // new one, regardless if we currently have payments - if force_spend && (!self.utxos.is_empty()) { - assert!(self.utxos.len() <= N::MAX_INPUTS); - plans.push(Plan { - key: self.key, - inputs: self.utxos.drain(..).collect::>(), - payments: vec![], - change: Some(N::change_address(key_for_any_change).unwrap()), - scheduler_addendum: (), - }); - } - - // If there's a UTXO to restore, restore it - // This is done now as if there is a to_restore output, and it was inserted into self.utxos - // earlier, self.utxos.len() may become `N::MAX_INPUTS + 1` - // The prior block requires the len to be `<= N::MAX_INPUTS` - if let Some(to_restore) = to_restore { - self.utxos.push(to_restore); - } - - txn.put(scheduler_key::(&self.key), self.serialize()); - - log::info!( - "created {} plans containing {} payments to sign, with {} payments pending scheduling", - plans.len(), - payments_at_start - self.payments.len(), - self.payments.len(), - ); - plans - } - - pub fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { - let res: Vec<_> = self.payments.drain(..).collect(); - if !res.is_empty() { - txn.put(scheduler_key::(&self.key), self.serialize()); - } - res - } - - // Note a branch output as having been created, with the amount it was actually created with, - // or not having been created due to being too small - pub fn created_output( - &mut self, - txn: &mut D::Transaction<'_>, - expected: u64, - actual: Option, - ) { - log::debug!("output expected to have {} had {:?} after fees", expected, actual); - - // Get the payments this output is expected to handle - let queued = self.queued_plans.get_mut(&expected).unwrap(); - let mut payments = queued.pop_front().unwrap(); - assert_eq!(expected, payments.iter().map(|payment| payment.balance.amount.0).sum::()); - // If this was the last set of payments at this amount, remove it - if queued.is_empty() { - self.queued_plans.remove(&expected); - } - - // If we didn't actually create this output, return, dropping the child payments - let Some(actual) = actual else { return }; - - // Amortize the fee amongst all payments underneath this branch - { - let mut to_amortize = actual - expected; - // If the payments are worth less than this fee we need to amortize, return, dropping them - if payments.iter().map(|payment| payment.balance.amount.0).sum::() < to_amortize { - return; - } - while to_amortize != 0 { - let payments_len = u64::try_from(payments.len()).unwrap(); - let per_payment = to_amortize / payments_len; - let mut overage = to_amortize % payments_len; - - for payment in &mut payments { - let to_subtract = per_payment + overage; - // Only subtract the overage once - overage = 0; - - let subtractable = payment.balance.amount.0.min(to_subtract); - to_amortize -= subtractable; - payment.balance.amount.0 -= subtractable; - } - } - } - - // Drop payments now below the dust threshold - let payments = payments - .into_iter() - .filter(|payment| payment.balance.amount.0 >= N::DUST) - .collect::>(); - // Sanity check this was done properly - assert!(actual >= payments.iter().map(|payment| payment.balance.amount.0).sum::()); - - // If there's no payments left, return - if payments.is_empty() { - return; - } - - self.plans.entry(actual).or_insert(VecDeque::new()).push_back(payments); - - // TODO2: This shows how ridiculous the serialize function is - txn.put(scheduler_key::(&self.key), self.serialize()); - } -} - -impl> SchedulerTrait for Scheduler { - type Addendum = (); - - /// Check if this Scheduler is empty. - fn empty(&self) -> bool { - Scheduler::empty(self) - } - - /// Create a new Scheduler. - fn new( - txn: &mut D::Transaction<'_>, - key: ::G, - network: NetworkId, - ) -> Self { - Scheduler::new::(txn, key, network) - } - - /// Load a Scheduler from the DB. - fn from_db( - db: &D, - key: ::G, - network: NetworkId, - ) -> io::Result { - Scheduler::from_db::(db, key, network) - } - - /// Check if a branch is usable. - fn can_use_branch(&self, balance: Balance) -> bool { - Scheduler::can_use_branch(self, balance) - } - - /// Schedule a series of outputs/payments. - fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec> { - Scheduler::schedule::(self, txn, utxos, payments, key_for_any_change, force_spend) - } - - /// Consume all payments still pending within this Scheduler, without scheduling them. - fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { - Scheduler::consume_payments::(self, txn) - } - - /// Note a branch output as having been created, with the amount it was actually created with, - /// or not having been created due to being too small. - // TODO: Move this to Balance. - fn created_output( - &mut self, - txn: &mut D::Transaction<'_>, - expected: u64, - actual: Option, - ) { - Scheduler::created_output::(self, txn, expected, actual) - } - - fn refund_plan( - &mut self, - _: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan { - let output_id = output.id().as_ref().to_vec(); - let res = Plan { - key: output.key(), - // Uses a payment as this will still be successfully sent due to fee amortization, - // and because change is currently always a Serai key - payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], - inputs: vec![output], - change: None, - scheduler_addendum: (), - }; - log::info!("refund plan for {} has ID {}", hex::encode(output_id), hex::encode(res.id())); - res - } - - fn shim_forward_plan(output: N::Output, to: ::G) -> Option> { - Some(Plan { - key: output.key(), - payments: vec![Payment { - address: N::forward_address(to).unwrap(), - data: None, - balance: output.balance(), - }], - inputs: vec![output], - change: None, - scheduler_addendum: (), - }) - } - - fn forward_plan( - &mut self, - _: &mut D::Transaction<'_>, - output: N::Output, - to: ::G, - ) -> Option> { - assert_eq!(self.key, output.key()); - // Call shim as shim returns the actual - Self::shim_forward_plan(output, to) - } -} diff --git a/processor/src/plan.rs b/processor/src/plan.rs deleted file mode 100644 index 58a8a5e11..000000000 --- a/processor/src/plan.rs +++ /dev/null @@ -1,212 +0,0 @@ -use std::io; - -use scale::{Encode, Decode}; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::group::GroupEncoding; -use frost::curve::Ciphersuite; - -use serai_client::primitives::Balance; - -use crate::{ - networks::{Output, Network}, - multisigs::scheduler::{SchedulerAddendum, Scheduler}, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Payment { - pub address: N::Address, - pub data: Option>, - pub balance: Balance, -} - -impl Payment { - pub fn transcript(&self, transcript: &mut T) { - transcript.domain_separate(b"payment"); - transcript.append_message(b"address", self.address.to_string().as_bytes()); - if let Some(data) = self.data.as_ref() { - transcript.append_message(b"data", data); - } - transcript.append_message(b"coin", self.balance.coin.encode()); - transcript.append_message(b"amount", self.balance.amount.0.to_le_bytes()); - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - // TODO: Don't allow creating Payments with an Address which can't be serialized - let address: Vec = self - .address - .clone() - .try_into() - .map_err(|_| io::Error::other("address couldn't be serialized"))?; - writer.write_all(&u32::try_from(address.len()).unwrap().to_le_bytes())?; - writer.write_all(&address)?; - - writer.write_all(&[u8::from(self.data.is_some())])?; - if let Some(data) = &self.data { - writer.write_all(&u32::try_from(data.len()).unwrap().to_le_bytes())?; - writer.write_all(data)?; - } - - writer.write_all(&self.balance.encode()) - } - - pub fn read(reader: &mut R) -> io::Result { - let mut buf = [0; 4]; - reader.read_exact(&mut buf)?; - let mut address = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; - reader.read_exact(&mut address)?; - let address = N::Address::try_from(address).map_err(|_| io::Error::other("invalid address"))?; - - let mut buf = [0; 1]; - reader.read_exact(&mut buf)?; - let data = if buf[0] == 1 { - let mut buf = [0; 4]; - reader.read_exact(&mut buf)?; - let mut data = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; - reader.read_exact(&mut data)?; - Some(data) - } else { - None - }; - - let balance = Balance::decode(&mut scale::IoReader(reader)) - .map_err(|_| io::Error::other("invalid balance"))?; - - Ok(Payment { address, data, balance }) - } -} - -#[derive(Clone, PartialEq)] -pub struct Plan { - pub key: ::G, - pub inputs: Vec, - /// The payments this Plan is intended to create. - /// - /// This should only contain payments leaving Serai. While it is acceptable for users to enter - /// Serai's address(es) as the payment address, as that'll be handled by anything which expects - /// certain properties, Serai as a system MUST NOT use payments for internal transfers. Doing - /// so will cause a reduction in their value by the TX fee/operating costs, creating an - /// incomplete transfer. - pub payments: Vec>, - /// The change this Plan should use. - /// - /// This MUST contain a Serai address. Operating costs may be deducted from the payments in this - /// Plan on the premise that the change address is Serai's, and accordingly, Serai will recoup - /// the operating costs. - // - // TODO: Consider moving to ::G? - pub change: Option, - /// The scheduler's additional data. - pub scheduler_addendum: >::Addendum, -} -impl core::fmt::Debug for Plan { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - fmt - .debug_struct("Plan") - .field("key", &hex::encode(self.key.to_bytes())) - .field("inputs", &self.inputs) - .field("payments", &self.payments) - .field("change", &self.change.as_ref().map(ToString::to_string)) - .field("scheduler_addendum", &self.scheduler_addendum) - .finish() - } -} - -impl Plan { - pub fn transcript(&self) -> RecommendedTranscript { - let mut transcript = RecommendedTranscript::new(b"Serai Processor Plan ID"); - transcript.domain_separate(b"meta"); - transcript.append_message(b"network", N::ID); - transcript.append_message(b"key", self.key.to_bytes()); - - transcript.domain_separate(b"inputs"); - for input in &self.inputs { - transcript.append_message(b"input", input.id()); - } - - transcript.domain_separate(b"payments"); - for payment in &self.payments { - payment.transcript(&mut transcript); - } - - if let Some(change) = &self.change { - transcript.append_message(b"change", change.to_string()); - } - - let mut addendum_bytes = vec![]; - self.scheduler_addendum.write(&mut addendum_bytes).unwrap(); - transcript.append_message(b"scheduler_addendum", addendum_bytes); - - transcript - } - - pub fn id(&self) -> [u8; 32] { - let challenge = self.transcript().challenge(b"id"); - let mut res = [0; 32]; - res.copy_from_slice(&challenge[.. 32]); - res - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(self.key.to_bytes().as_ref())?; - - writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; - for input in &self.inputs { - input.write(writer)?; - } - - writer.write_all(&u32::try_from(self.payments.len()).unwrap().to_le_bytes())?; - for payment in &self.payments { - payment.write(writer)?; - } - - // TODO: Have Plan construction fail if change cannot be serialized - let change = if let Some(change) = &self.change { - change.clone().try_into().map_err(|_| { - io::Error::other(format!( - "an address we said to use as change couldn't be converted to a Vec: {}", - change.to_string(), - )) - })? - } else { - vec![] - }; - assert!(serai_client::primitives::MAX_ADDRESS_LEN <= u8::MAX.into()); - writer.write_all(&[u8::try_from(change.len()).unwrap()])?; - writer.write_all(&change)?; - self.scheduler_addendum.write(writer) - } - - pub fn read(reader: &mut R) -> io::Result { - let key = N::Curve::read_G(reader)?; - - let mut inputs = vec![]; - let mut buf = [0; 4]; - reader.read_exact(&mut buf)?; - for _ in 0 .. u32::from_le_bytes(buf) { - inputs.push(N::Output::read(reader)?); - } - - let mut payments = vec![]; - reader.read_exact(&mut buf)?; - for _ in 0 .. u32::from_le_bytes(buf) { - payments.push(Payment::::read(reader)?); - } - - let mut len = [0; 1]; - reader.read_exact(&mut len)?; - let mut change = vec![0; usize::from(len[0])]; - reader.read_exact(&mut change)?; - let change = - if change.is_empty() { - None - } else { - Some(N::Address::try_from(change).map_err(|_| { - io::Error::other("couldn't deserialize an Address serialized into a Plan") - })?) - }; - - let scheduler_addendum = >::Addendum::read(reader)?; - Ok(Plan { key, inputs, payments, change, scheduler_addendum }) - } -} From 451170e6424480612827376ce2b7a4399d1df10f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 4 Sep 2024 17:29:48 -0400 Subject: [PATCH 073/179] Move additional_key.rs to serai-processor-view-keys I don't love this. I wanted to simply add this function to `processor/key-gen`, but then anyone who wants a view key needs to pull in Bulletproofs which is a mess of code. They'd also be subject to an AGPL licensed library. This is so small it should be a primitive elsewhere, yet there is no primitives library eligible. Maybe serai-client since that has the code to make transactions to Serai (and will have this as a dependency)? Except then the processor has to import serai-client when this rewrite removed it as a dependency. --- .github/workflows/tests.yml | 1 + Cargo.lock | 7 +++++++ Cargo.toml | 1 + processor/src/additional_key.rs | 14 -------------- processor/src/lib.rs | 15 --------------- processor/src/main.rs | 6 ------ processor/src/multisigs/mod.rs | 2 +- processor/view-keys/Cargo.toml | 19 +++++++++++++++++++ processor/view-keys/LICENSE | 21 +++++++++++++++++++++ processor/view-keys/README.md | 6 ++++++ processor/view-keys/src/lib.rs | 13 +++++++++++++ 11 files changed, 69 insertions(+), 36 deletions(-) delete mode 100644 processor/src/additional_key.rs delete mode 100644 processor/src/lib.rs create mode 100644 processor/view-keys/Cargo.toml create mode 100644 processor/view-keys/LICENSE create mode 100644 processor/view-keys/README.md create mode 100644 processor/view-keys/src/lib.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a6260579f..1c37eb554 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -40,6 +40,7 @@ jobs: -p serai-message-queue \ -p serai-processor-messages \ -p serai-processor-key-gen \ + -p serai-processor-view-keys \ -p serai-processor-frost-attempt-manager \ -p serai-processor-primitives \ -p serai-processor-scanner \ diff --git a/Cargo.lock b/Cargo.lock index b3fa4e36e..8662be6fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8760,6 +8760,13 @@ dependencies = [ "serai-processor-scheduler-primitives", ] +[[package]] +name = "serai-processor-view-keys" +version = "0.1.0" +dependencies = [ + "ciphersuite", +] + [[package]] name = "serai-reproducible-runtime-tests" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index a2d86c823..eb98c263d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,6 +71,7 @@ members = [ "processor/messages", "processor/key-gen", + "processor/view-keys", "processor/frost-attempt-manager", "processor/primitives", diff --git a/processor/src/additional_key.rs b/processor/src/additional_key.rs deleted file mode 100644 index f875950d7..000000000 --- a/processor/src/additional_key.rs +++ /dev/null @@ -1,14 +0,0 @@ -use ciphersuite::Ciphersuite; - -use crate::networks::Network; - -// Generate a static additional key for a given chain in a globally consistent manner -// Doesn't consider the current group key to increase the simplicity of verifying Serai's status -// Takes an index, k, to support protocols which use multiple secondary keys -// Presumably a view key -pub fn additional_key(k: u64) -> ::F { - ::hash_to_F( - b"Serai DEX Additional Key", - &[N::ID.as_bytes(), &k.to_le_bytes()].concat(), - ) -} diff --git a/processor/src/lib.rs b/processor/src/lib.rs deleted file mode 100644 index bbff33f6a..000000000 --- a/processor/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -#![allow(dead_code)] - -mod plan; -pub use plan::*; - -mod db; -pub(crate) use db::*; - -use serai_processor_key_gen as key_gen; - -pub mod networks; -pub(crate) mod multisigs; - -mod additional_key; -pub use additional_key::additional_key; diff --git a/processor/src/main.rs b/processor/src/main.rs index 49406aaf3..104067290 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -27,9 +27,6 @@ use serai_env as env; use message_queue::{Service, client::MessageQueue}; -mod plan; -pub use plan::*; - mod networks; use networks::{Block, Network}; #[cfg(feature = "bitcoin")] @@ -39,9 +36,6 @@ use networks::Ethereum; #[cfg(feature = "monero")] use networks::Monero; -mod additional_key; -pub use additional_key::additional_key; - mod db; pub use db::*; diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index c20a922ca..1c4adabf8 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -4,5 +4,5 @@ pub enum MultisigEvent { // Batches to publish Batches(Option<(::G, ::G)>, Vec), // Eventuality completion found on-chain - Completed(Vec, [u8; 32], , [u8; 32], N::Eventuality), } diff --git a/processor/view-keys/Cargo.toml b/processor/view-keys/Cargo.toml new file mode 100644 index 000000000..6fdd91346 --- /dev/null +++ b/processor/view-keys/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "serai-processor-view-keys" +version = "0.1.0" +description = "View keys for the Serai processor" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/view-keys" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +ciphersuite = { version = "0.4", path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } diff --git a/processor/view-keys/LICENSE b/processor/view-keys/LICENSE new file mode 100644 index 000000000..91d893c11 --- /dev/null +++ b/processor/view-keys/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/processor/view-keys/README.md b/processor/view-keys/README.md new file mode 100644 index 000000000..4354eed6b --- /dev/null +++ b/processor/view-keys/README.md @@ -0,0 +1,6 @@ +# Serai Processor View Keys + +View keys for the Serai processor. + +This is a MIT-licensed library made available for anyone to generate Serai's +view keys, as necessary for auditing reasons and for sending coins to Serai. diff --git a/processor/view-keys/src/lib.rs b/processor/view-keys/src/lib.rs new file mode 100644 index 000000000..c0d4c68e0 --- /dev/null +++ b/processor/view-keys/src/lib.rs @@ -0,0 +1,13 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use ciphersuite::Ciphersuite; + +/// Generate a view key for usage within Serai. +/// +/// `k` is the index of the key to generate (enabling generating multiple view keys within a +/// single context). +pub fn view_key(k: u64) -> C::F { + C::hash_to_F(b"Serai DEX View Key", &k.to_le_bytes()) +} From 91d04b6dc6f7e5bb88ba759f53bd5fa7a9a3da4d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 4 Sep 2024 22:39:41 -0400 Subject: [PATCH 074/179] Split processor into bitcoin-processor, ethereum-processor, monero-processor --- .github/workflows/tests.yml | 4 +- Cargo.toml | 5 +- deny.toml | 6 +- processor/Cargo.toml | 96 --- processor/README.md | 6 +- processor/bitcoin/Cargo.toml | 46 ++ processor/{ => bitcoin}/LICENSE | 0 processor/bitcoin/README.md | 1 + .../bitcoin.rs => bitcoin/src/lib.rs} | 4 + processor/ethereum/Cargo.toml | 45 ++ processor/ethereum/LICENSE | 15 + processor/ethereum/README.md | 1 + .../ethereum.rs => ethereum/src/lib.rs} | 4 + processor/monero/Cargo.toml | 46 ++ processor/monero/LICENSE | 15 + processor/monero/README.md | 1 + .../networks/monero.rs => monero/src/lib.rs} | 4 + processor/scanner/src/lib.rs | 4 + .../scheduler/utxo/primitives/src/lib.rs | 1 + processor/src/networks/mod.rs | 658 ------------------ tests/full-stack/Cargo.toml | 2 +- tests/processor/Cargo.toml | 2 +- 22 files changed, 204 insertions(+), 762 deletions(-) delete mode 100644 processor/Cargo.toml create mode 100644 processor/bitcoin/Cargo.toml rename processor/{ => bitcoin}/LICENSE (100%) create mode 100644 processor/bitcoin/README.md rename processor/{src/networks/bitcoin.rs => bitcoin/src/lib.rs} (99%) create mode 100644 processor/ethereum/Cargo.toml create mode 100644 processor/ethereum/LICENSE create mode 100644 processor/ethereum/README.md rename processor/{src/networks/ethereum.rs => ethereum/src/lib.rs} (99%) create mode 100644 processor/monero/Cargo.toml create mode 100644 processor/monero/LICENSE create mode 100644 processor/monero/README.md rename processor/{src/networks/monero.rs => monero/src/lib.rs} (99%) delete mode 100644 processor/src/networks/mod.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1c37eb554..a572dcf95 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -48,7 +48,9 @@ jobs: -p serai-processor-utxo-scheduler-primitives \ -p serai-processor-utxo-scheduler \ -p serai-processor-transaction-chaining-scheduler \ - -p serai-processor \ + -p serai-bitcoin-processor \ + -p serai-ethereum-processor \ + -p serai-monero-processor \ -p tendermint-machine \ -p tributary-chain \ -p serai-coordinator \ diff --git a/Cargo.toml b/Cargo.toml index eb98c263d..3ec76f593 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,6 +70,7 @@ members = [ "message-queue", "processor/messages", + "processor/key-gen", "processor/view-keys", "processor/frost-attempt-manager", @@ -80,7 +81,9 @@ members = [ "processor/scheduler/utxo/primitives", "processor/scheduler/utxo/standard", "processor/scheduler/utxo/transaction-chaining", - "processor", + "processor/bitcoin", + "processor/ethereum", + "processor/monero", "coordinator/tributary/tendermint", "coordinator/tributary", diff --git a/deny.toml b/deny.toml index 16d3cbeaa..8fbb8fc9a 100644 --- a/deny.toml +++ b/deny.toml @@ -46,6 +46,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-message-queue" }, { allow = ["AGPL-3.0"], name = "serai-processor-messages" }, + { allow = ["AGPL-3.0"], name = "serai-processor-key-gen" }, { allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" }, @@ -54,7 +55,10 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" }, { allow = ["AGPL-3.0"], name = "serai-processor-standard-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, - { allow = ["AGPL-3.0"], name = "serai-processor" }, + + { allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" }, + { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, + { allow = ["AGPL-3.0"], name = "serai-monero-processor" }, { allow = ["AGPL-3.0"], name = "tributary-chain" }, { allow = ["AGPL-3.0"], name = "serai-coordinator" }, diff --git a/processor/Cargo.toml b/processor/Cargo.toml deleted file mode 100644 index 2d386f2d0..000000000 --- a/processor/Cargo.toml +++ /dev/null @@ -1,96 +0,0 @@ -[package] -name = "serai-processor" -version = "0.1.0" -description = "Multichain processor premised on canonicity to reach distributed consensus automatically" -license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/processor" -authors = ["Luke Parker "] -keywords = [] -edition = "2021" -publish = false - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true - -[dependencies] -# Macros -async-trait = { version = "0.1", default-features = false } -zeroize = { version = "1", default-features = false, features = ["std"] } -thiserror = { version = "1", default-features = false } - -# Libs -rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } -rand_chacha = { version = "0.3", default-features = false, features = ["std"] } - -# Encoders -const-hex = { version = "1", default-features = false } -hex = { version = "0.4", default-features = false, features = ["std"] } -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } -borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -serde_json = { version = "1", default-features = false, features = ["std"] } - -# Cryptography -ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } - -transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] } -ec-divisors = { package = "ec-divisors", path = "../crypto/evrf/divisors", default-features = false } -dkg = { package = "dkg", path = "../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } -frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } -frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } - -# Bitcoin/Ethereum -k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } - -# Bitcoin -secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } -bitcoin-serai = { path = "../networks/bitcoin", default-features = false, features = ["std"], optional = true } - -# Ethereum -ethereum-serai = { path = "../networks/ethereum", default-features = false, optional = true } - -# Monero -dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } -monero-simple-request-rpc = { path = "../networks/monero/rpc/simple-request", default-features = false, optional = true } -monero-wallet = { path = "../networks/monero/wallet", default-features = false, features = ["std", "multisig", "compile-time-generators"], optional = true } - -# Application -log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"], optional = true } -tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } - -zalloc = { path = "../common/zalloc" } -serai-db = { path = "../common/db" } -serai-env = { path = "../common/env", optional = true } -# TODO: Replace with direct usage of primitives -serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } - -messages = { package = "serai-processor-messages", path = "./messages" } - -message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true } - -[dev-dependencies] -frost = { package = "modular-frost", path = "../crypto/frost", features = ["tests"] } - -sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } - -ethereum-serai = { path = "../networks/ethereum", default-features = false, features = ["tests"] } - -dockertest = "0.5" -serai-docker-tests = { path = "../tests/docker" } - -[features] -secp256k1 = ["k256", "dkg/evrf-secp256k1", "frost/secp256k1"] -bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] - -ethereum = ["secp256k1", "ethereum-serai/tests"] - -ed25519 = ["dalek-ff-group", "dkg/evrf-ed25519", "frost/ed25519"] -monero = ["ed25519", "monero-simple-request-rpc", "monero-wallet", "serai-client/monero"] - -binaries = ["env_logger", "serai-env", "message-queue"] -parity-db = ["serai-db/parity-db"] -rocksdb = ["serai-db/rocksdb"] diff --git a/processor/README.md b/processor/README.md index 37d11e0d4..e942f5574 100644 --- a/processor/README.md +++ b/processor/README.md @@ -1,5 +1,5 @@ # Processor -The Serai processor scans a specified external network, communicating with the -coordinator. For details on its exact messaging flow, and overall policies, -please view `docs/processor`. +The Serai processors, built from the libraries here, scan an external network +and report the indexed data to the coordinator. For details on its exact +messaging flow, and overall policies, please view `docs/processor`. diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml new file mode 100644 index 000000000..a57495427 --- /dev/null +++ b/processor/bitcoin/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "serai-bitcoin-processor" +version = "0.1.0" +description = "Serai Bitcoin Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/bitcoin" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +async-trait = { version = "0.1", default-features = false } + +const-hex = { version = "1", default-features = false } +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +serde_json = { version = "1", default-features = false, features = ["std"] } + +k256 = { version = "^0.13.1", default-features = false, features = ["std"] } +secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"] } +bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["std"] } + +log = { version = "0.4", default-features = false, features = ["std"] } +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +zalloc = { path = "../../common/zalloc" } +serai-db = { path = "../../common/db" } +serai-env = { path = "../../common/env" } + +messages = { package = "serai-processor-messages", path = "../messages" } + +message-queue = { package = "serai-message-queue", path = "../../message-queue" } + +[features] +parity-db = ["serai-db/parity-db"] +rocksdb = ["serai-db/rocksdb"] diff --git a/processor/LICENSE b/processor/bitcoin/LICENSE similarity index 100% rename from processor/LICENSE rename to processor/bitcoin/LICENSE diff --git a/processor/bitcoin/README.md b/processor/bitcoin/README.md new file mode 100644 index 000000000..79d1cedde --- /dev/null +++ b/processor/bitcoin/README.md @@ -0,0 +1 @@ +# Serai Bitcoin Processor diff --git a/processor/src/networks/bitcoin.rs b/processor/bitcoin/src/lib.rs similarity index 99% rename from processor/src/networks/bitcoin.rs rename to processor/bitcoin/src/lib.rs index 43cad1c78..bccdc2861 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/bitcoin/src/lib.rs @@ -1,3 +1,7 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + use std::{sync::OnceLock, time::Duration, io, collections::HashMap}; use async_trait::async_trait; diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml new file mode 100644 index 000000000..eff47af96 --- /dev/null +++ b/processor/ethereum/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "serai-ethereum-processor" +version = "0.1.0" +description = "Serai Ethereum Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +async-trait = { version = "0.1", default-features = false } + +const-hex = { version = "1", default-features = false } +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +serde_json = { version = "1", default-features = false, features = ["std"] } + +k256 = { version = "^0.13.1", default-features = false, features = ["std"] } +ethereum-serai = { path = "../../networks/ethereum", default-features = false, optional = true } + +log = { version = "0.4", default-features = false, features = ["std"] } +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +zalloc = { path = "../../common/zalloc" } +serai-db = { path = "../../common/db" } +serai-env = { path = "../../common/env" } + +messages = { package = "serai-processor-messages", path = "../messages" } + +message-queue = { package = "serai-message-queue", path = "../../message-queue" } + +[features] +parity-db = ["serai-db/parity-db"] +rocksdb = ["serai-db/rocksdb"] diff --git a/processor/ethereum/LICENSE b/processor/ethereum/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/README.md b/processor/ethereum/README.md new file mode 100644 index 000000000..5301c64b9 --- /dev/null +++ b/processor/ethereum/README.md @@ -0,0 +1 @@ +# Serai Ethereum Processor diff --git a/processor/src/networks/ethereum.rs b/processor/ethereum/src/lib.rs similarity index 99% rename from processor/src/networks/ethereum.rs rename to processor/ethereum/src/lib.rs index 3545f34ac..99d042038 100644 --- a/processor/src/networks/ethereum.rs +++ b/processor/ethereum/src/lib.rs @@ -1,3 +1,7 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + use core::{fmt, time::Duration}; use std::{ sync::Arc, diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml new file mode 100644 index 000000000..e71472e49 --- /dev/null +++ b/processor/monero/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "serai-monero-processor" +version = "0.1.0" +description = "Serai Monero Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/monero" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +async-trait = { version = "0.1", default-features = false } + +const-hex = { version = "1", default-features = false } +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +serde_json = { version = "1", default-features = false, features = ["std"] } + +dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } +monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request", default-features = false, optional = true } +monero-wallet = { path = "../../networks/monero/wallet", default-features = false, features = ["std", "multisig", "compile-time-generators"], optional = true } + +log = { version = "0.4", default-features = false, features = ["std"] } +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +zalloc = { path = "../../common/zalloc" } +serai-db = { path = "../../common/db" } +serai-env = { path = "../../common/env" } + +messages = { package = "serai-processor-messages", path = "../messages" } + +message-queue = { package = "serai-message-queue", path = "../../message-queue" } + +[features] +parity-db = ["serai-db/parity-db"] +rocksdb = ["serai-db/rocksdb"] diff --git a/processor/monero/LICENSE b/processor/monero/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/monero/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/monero/README.md b/processor/monero/README.md new file mode 100644 index 000000000..564c83a02 --- /dev/null +++ b/processor/monero/README.md @@ -0,0 +1 @@ +# Serai Monero Processor diff --git a/processor/src/networks/monero.rs b/processor/monero/src/lib.rs similarity index 99% rename from processor/src/networks/monero.rs rename to processor/monero/src/lib.rs index 6ffa29df2..8786bef33 100644 --- a/processor/src/networks/monero.rs +++ b/processor/monero/src/lib.rs @@ -1,3 +1,7 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + use std::{time::Duration, collections::HashMap, io}; use async_trait::async_trait; diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index ecefb9a8b..17feefbea 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -1,3 +1,7 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + use core::{marker::PhantomData, fmt::Debug}; use std::{io, collections::HashMap}; diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index 274eb2a4d..2f51e9e09 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -97,6 +97,7 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// more information. /// /// Returns `None` if the fee exceeded the inputs, or `Some` otherwise. + // TODO: Enum for Change of None, Some, Mandatory fn plan_transaction_with_fee_amortization( operating_costs: &mut u64, fee_rate: Self::FeeRate, diff --git a/processor/src/networks/mod.rs b/processor/src/networks/mod.rs deleted file mode 100644 index 81838ae12..000000000 --- a/processor/src/networks/mod.rs +++ /dev/null @@ -1,658 +0,0 @@ -use core::{fmt::Debug, time::Duration}; -use std::{io, collections::HashMap}; - -use async_trait::async_trait; -use thiserror::Error; - -use frost::{ - dkg::evrf::EvrfCurve, - curve::{Ciphersuite, Curve}, - ThresholdKeys, - sign::PreprocessMachine, -}; - -use serai_client::primitives::{NetworkId, Balance}; - -use log::error; - -use tokio::time::sleep; - -#[cfg(feature = "bitcoin")] -pub mod bitcoin; -#[cfg(feature = "bitcoin")] -pub use self::bitcoin::Bitcoin; - -#[cfg(feature = "ethereum")] -pub mod ethereum; -#[cfg(feature = "ethereum")] -pub use ethereum::Ethereum; - -#[cfg(feature = "monero")] -pub mod monero; -#[cfg(feature = "monero")] -pub use monero::Monero; - -use crate::{Payment, Plan, multisigs::scheduler::Scheduler}; - -#[derive(Clone, Copy, Error, Debug)] -pub enum NetworkError { - #[error("failed to connect to network daemon")] - ConnectionError, -} - -pub trait Id: - Send + Sync + Clone + Default + PartialEq + AsRef<[u8]> + AsMut<[u8]> + Debug -{ -} -impl + AsMut<[u8]> + Debug> Id for I {} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum OutputType { - // Needs to be processed/sent up to Substrate - External, - - // Given a known output set, and a known series of outbound transactions, we should be able to - // form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs - // in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, say - // S[1], build off S[0], we need to observe when S[0] is included on-chain. - // - // We cannot. - // - // Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to - // create S[0], and the actual payment info behind it, we cannot observe it on the blockchain - // unless we participated in creating it. Locking the entire schedule, when we cannot sign for - // the entire schedule at once, to a single signing set isn't feasible. - // - // While any member of the active signing set can provide data enabling other signers to - // participate, it's several KB of data which we then have to code communication for. - // The other option is to simply not observe S[0]. Instead, observe a TX with an identical output - // to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a malicious - // actor, has sent us a forged TX which is... equally as usable? so who cares? - // - // The only issue is if we have multiple outputs on-chain with identical amounts and purposes. - // Accordingly, when the scheduler makes a plan for when a specific output is available, it - // shouldn't write that plan. It should *push* that plan to a queue of plans to perform when - // instances of that output occur. - Branch, - - // Should be added to the available UTXO pool with no further action - Change, - - // Forwarded output from the prior multisig - Forwarded, -} - -impl OutputType { - fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&[match self { - OutputType::External => 0, - OutputType::Branch => 1, - OutputType::Change => 2, - OutputType::Forwarded => 3, - }]) - } - - fn read(reader: &mut R) -> io::Result { - let mut byte = [0; 1]; - reader.read_exact(&mut byte)?; - Ok(match byte[0] { - 0 => OutputType::External, - 1 => OutputType::Branch, - 2 => OutputType::Change, - 3 => OutputType::Forwarded, - _ => Err(io::Error::other("invalid OutputType"))?, - }) - } -} - -pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Debug { - type Id: 'static + Id; - - fn kind(&self) -> OutputType; - - fn id(&self) -> Self::Id; - fn tx_id(&self) -> >::Id; // TODO: Review use of - fn key(&self) -> ::G; - - fn presumed_origin(&self) -> Option; - - fn balance(&self) -> Balance; - fn data(&self) -> &[u8]; - - fn write(&self, writer: &mut W) -> io::Result<()>; - fn read(reader: &mut R) -> io::Result; -} - -#[async_trait] -pub trait Transaction: Send + Sync + Sized + Clone + PartialEq + Debug { - type Id: 'static + Id; - fn id(&self) -> Self::Id; - // TODO: Move to Balance - #[cfg(test)] - async fn fee(&self, network: &N) -> u64; -} - -pub trait SignableTransaction: Send + Sync + Clone + Debug { - // TODO: Move to Balance - fn fee(&self) -> u64; -} - -pub trait Eventuality: Send + Sync + Clone + PartialEq + Debug { - type Claim: Send + Sync + Clone + PartialEq + Default + AsRef<[u8]> + AsMut<[u8]> + Debug; - type Completion: Send + Sync + Clone + PartialEq + Debug; - - fn lookup(&self) -> Vec; - - fn read(reader: &mut R) -> io::Result; - fn serialize(&self) -> Vec; - - fn claim(completion: &Self::Completion) -> Self::Claim; - - // TODO: Make a dedicated Completion trait - fn serialize_completion(completion: &Self::Completion) -> Vec; - fn read_completion(reader: &mut R) -> io::Result; -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct EventualitiesTracker { - // Lookup property (input, nonce, TX extra...) -> (plan ID, eventuality) - map: HashMap, ([u8; 32], E)>, - // Block number we've scanned these eventualities too - block_number: usize, -} - -impl EventualitiesTracker { - pub fn new() -> Self { - EventualitiesTracker { map: HashMap::new(), block_number: usize::MAX } - } - - pub fn register(&mut self, block_number: usize, id: [u8; 32], eventuality: E) { - log::info!("registering eventuality for {}", hex::encode(id)); - - let lookup = eventuality.lookup(); - if self.map.contains_key(&lookup) { - panic!("registering an eventuality multiple times or lookup collision"); - } - self.map.insert(lookup, (id, eventuality)); - // If our self tracker already went past this block number, set it back - self.block_number = self.block_number.min(block_number); - } - - pub fn drop(&mut self, id: [u8; 32]) { - // O(n) due to the lack of a reverse lookup - let mut found_key = None; - for (key, value) in &self.map { - if value.0 == id { - found_key = Some(key.clone()); - break; - } - } - - if let Some(key) = found_key { - self.map.remove(&key); - } - } -} - -impl Default for EventualitiesTracker { - fn default() -> Self { - Self::new() - } -} - -#[async_trait] -pub trait Block: Send + Sync + Sized + Clone + Debug { - // This is currently bounded to being 32 bytes. - type Id: 'static + Id; - fn id(&self) -> Self::Id; - fn parent(&self) -> Self::Id; - /// The monotonic network time at this block. - /// - /// This call is presumed to be expensive and should only be called sparingly. - async fn time(&self, rpc: &N) -> u64; -} - -// The post-fee value of an expected branch. -pub struct PostFeeBranch { - pub expected: u64, - pub actual: Option, -} - -// Return the PostFeeBranches needed when dropping a transaction -fn drop_branches( - key: ::G, - payments: &[Payment], -) -> Vec { - let mut branch_outputs = vec![]; - for payment in payments { - if Some(&payment.address) == N::branch_address(key).as_ref() { - branch_outputs.push(PostFeeBranch { expected: payment.balance.amount.0, actual: None }); - } - } - branch_outputs -} - -pub struct PreparedSend { - /// None for the transaction if the SignableTransaction was dropped due to lack of value. - pub tx: Option<(N::SignableTransaction, N::Eventuality)>, - pub post_fee_branches: Vec, - /// The updated operating costs after preparing this transaction. - pub operating_costs: u64, -} - -#[async_trait] -#[rustfmt::skip] -pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { - /// The elliptic curve used for this network. - type Curve: Curve - + EvrfCurve::F>>>; - - /// The type representing the transaction for this network. - type Transaction: Transaction; // TODO: Review use of - /// The type representing the block for this network. - type Block: Block; - - /// The type containing all information on a scanned output. - // This is almost certainly distinct from the network's native output type. - type Output: Output; - /// The type containing all information on a planned transaction, waiting to be signed. - type SignableTransaction: SignableTransaction; - /// The type containing all information to check if a plan was completed. - /// - /// This must be binding to both the outputs expected and the plan ID. - type Eventuality: Eventuality; - /// The FROST machine to sign a transaction. - type TransactionMachine: PreprocessMachine< - Signature = ::Completion, - >; - - /// The scheduler for this network. - type Scheduler: Scheduler; - - /// The type representing an address. - // This should NOT be a String, yet a tailored type representing an efficient binary encoding, - // as detailed in the integration documentation. - type Address: Send - + Sync - + Clone - + PartialEq - + Eq - + Debug - + ToString - + TryInto> - + TryFrom>; - - /// Network ID for this network. - const NETWORK: NetworkId; - /// String ID for this network. - const ID: &'static str; - /// The estimated amount of time a block will take. - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize; - /// The amount of confirmations required to consider a block 'final'. - const CONFIRMATIONS: usize; - /// The maximum amount of outputs which will fit in a TX. - /// This should be equal to MAX_INPUTS unless one is specifically limited. - /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. - const MAX_OUTPUTS: usize; - - /// Minimum output value which will be handled. - /// - /// For any received output, there's the cost to spend the output. This value MUST exceed the - /// cost to spend said output, and should by a notable margin (not just 2x, yet an order of - /// magnitude). - // TODO: Dust needs to be diversified per Coin - const DUST: u64; - - /// The cost to perform input aggregation with a 2-input 1-output TX. - const COST_TO_AGGREGATE: u64; - - /// Tweak keys for this network. - fn tweak_keys(key: &mut ThresholdKeys); - - /// Address for the given group key to receive external coins to. - #[cfg(test)] - async fn external_address(&self, key: ::G) -> Self::Address; - /// Address for the given group key to use for scheduled branches. - fn branch_address(key: ::G) -> Option; - /// Address for the given group key to use for change. - fn change_address(key: ::G) -> Option; - /// Address for forwarded outputs from prior multisigs. - /// - /// forward_address must only return None if explicit forwarding isn't necessary. - fn forward_address(key: ::G) -> Option; - - /// Get the latest block's number. - async fn get_latest_block_number(&self) -> Result; - /// Get a block by its number. - async fn get_block(&self, number: usize) -> Result; - - /// Get the latest block's number, retrying until success. - async fn get_latest_block_number_with_retries(&self) -> usize { - loop { - match self.get_latest_block_number().await { - Ok(number) => { - return number; - } - Err(e) => { - error!( - "couldn't get the latest block number in the with retry get_latest_block_number: {e:?}", - ); - sleep(Duration::from_secs(10)).await; - } - } - } - } - - /// Get a block, retrying until success. - async fn get_block_with_retries(&self, block_number: usize) -> Self::Block { - loop { - match self.get_block(block_number).await { - Ok(block) => { - return block; - } - Err(e) => { - error!("couldn't get block {block_number} in the with retry get_block: {:?}", e); - sleep(Duration::from_secs(10)).await; - } - } - } - } - - /// Get the outputs within a block for a specific key. - async fn get_outputs( - &self, - block: &Self::Block, - key: ::G, - ) -> Vec; - - /// Get the registered eventualities completed within this block, and any prior blocks which - /// registered eventualities may have been completed in. - /// - /// This may panic if not fed a block greater than the tracker's block number. - /// - /// Plan ID -> (block number, TX ID, completion) - // TODO: get_eventuality_completions_internal + provided get_eventuality_completions for common - // code - // TODO: Consider having this return the Transaction + the Completion? - // Or Transaction with extract_completion? - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Self::Block, - ) -> HashMap< - [u8; 32], - ( - usize, - >::Id, - ::Completion, - ), - >; - - /// Returns the needed fee to fulfill this Plan at this fee rate. - /// - /// Returns None if this Plan isn't fulfillable (such as when the fee exceeds the input value). - async fn needed_fee( - &self, - block_number: usize, - inputs: &[Self::Output], - payments: &[Payment], - change: &Option, - ) -> Result, NetworkError>; - - /// Create a SignableTransaction for the given Plan. - /// - /// The expected flow is: - /// 1) Call needed_fee - /// 2) If the Plan is fulfillable, amortize the fee - /// 3) Call signable_transaction *which MUST NOT return None if the above was done properly* - /// - /// This takes a destructured Plan as some of these arguments are malleated from the original - /// Plan. - // TODO: Explicit AmortizedPlan? - #[allow(clippy::too_many_arguments)] - async fn signable_transaction( - &self, - block_number: usize, - plan_id: &[u8; 32], - key: ::G, - inputs: &[Self::Output], - payments: &[Payment], - change: &Option, - scheduler_addendum: &>::Addendum, - ) -> Result, NetworkError>; - - /// Prepare a SignableTransaction for a transaction. - /// - /// This must not persist anything as we will prepare Plans we never intend to execute. - async fn prepare_send( - &self, - block_number: usize, - plan: Plan, - operating_costs: u64, - ) -> Result, NetworkError> { - // Sanity check this has at least one output planned - assert!((!plan.payments.is_empty()) || plan.change.is_some()); - - let plan_id = plan.id(); - let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan; - let theoretical_change_amount = if change.is_some() { - inputs.iter().map(|input| input.balance().amount.0).sum::() - - payments.iter().map(|payment| payment.balance.amount.0).sum::() - } else { - 0 - }; - - let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else { - // This Plan is not fulfillable - // TODO: Have Plan explicitly distinguish payments and branches in two separate Vecs? - return Ok(PreparedSend { - tx: None, - // Have all of its branches dropped - post_fee_branches: drop_branches(key, &payments), - // This plan expects a change output valued at sum(inputs) - sum(outputs) - // Since we can no longer create this change output, it becomes an operating cost - // TODO: Look at input restoration to reduce this operating cost - operating_costs: operating_costs + - if change.is_some() { theoretical_change_amount } else { 0 }, - }); - }; - - // Amortize the fee over the plan's payments - let (post_fee_branches, mut operating_costs) = (|| { - // If we're creating a change output, letting us recoup coins, amortize the operating costs - // as well - let total_fee = tx_fee + if change.is_some() { operating_costs } else { 0 }; - - let original_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::(); - // If this isn't enough for the total fee, drop and move on - if original_outputs < total_fee { - let mut remaining_operating_costs = operating_costs; - if change.is_some() { - // Operating costs increase by the TX fee - remaining_operating_costs += tx_fee; - // Yet decrease by the payments we managed to drop - remaining_operating_costs = remaining_operating_costs.saturating_sub(original_outputs); - } - return (drop_branches(key, &payments), remaining_operating_costs); - } - - let initial_payment_amounts = - payments.iter().map(|payment| payment.balance.amount.0).collect::>(); - - // Amortize the transaction fee across outputs - let mut remaining_fee = total_fee; - // Run as many times as needed until we can successfully subtract this fee - while remaining_fee != 0 { - // This shouldn't be a / by 0 as these payments have enough value to cover the fee - let this_iter_fee = remaining_fee / u64::try_from(payments.len()).unwrap(); - let mut overage = remaining_fee % u64::try_from(payments.len()).unwrap(); - for payment in &mut payments { - let this_payment_fee = this_iter_fee + overage; - // Only subtract the overage once - overage = 0; - - let subtractable = payment.balance.amount.0.min(this_payment_fee); - remaining_fee -= subtractable; - payment.balance.amount.0 -= subtractable; - } - } - - // If any payment is now below the dust threshold, set its value to 0 so it'll be dropped - for payment in &mut payments { - if payment.balance.amount.0 < Self::DUST { - payment.balance.amount.0 = 0; - } - } - - // Note the branch outputs' new values - let mut branch_outputs = vec![]; - for (initial_amount, payment) in initial_payment_amounts.into_iter().zip(&payments) { - if Some(&payment.address) == Self::branch_address(key).as_ref() { - branch_outputs.push(PostFeeBranch { - expected: initial_amount, - actual: if payment.balance.amount.0 == 0 { - None - } else { - Some(payment.balance.amount.0) - }, - }); - } - } - - // Drop payments now worth 0 - payments = payments - .drain(..) - .filter(|payment| { - if payment.balance.amount.0 != 0 { - true - } else { - log::debug!("dropping dust payment from plan {}", hex::encode(plan_id)); - false - } - }) - .collect(); - - // Sanity check the fee was successfully amortized - let new_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::(); - assert!((new_outputs + total_fee) <= original_outputs); - - ( - branch_outputs, - if change.is_none() { - // If the change is None, this had no effect on the operating costs - operating_costs - } else { - // Since the change is some, and we successfully amortized, the operating costs were - // recouped - 0 - }, - ) - })(); - - let Some(tx) = self - .signable_transaction( - block_number, - &plan_id, - key, - &inputs, - &payments, - &change, - &scheduler_addendum, - ) - .await? - else { - panic!( - "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}, {}: {:?}", - "signable_transaction returned None for a TX we prior successfully calculated the fee for", - "id", - hex::encode(plan_id), - "inputs", - inputs, - "post-amortization payments", - payments, - "change", - change, - "successfully amoritized fee", - tx_fee, - "scheduler's addendum", - scheduler_addendum, - ) - }; - - if change.is_some() { - let on_chain_expected_change = - inputs.iter().map(|input| input.balance().amount.0).sum::() - - payments.iter().map(|payment| payment.balance.amount.0).sum::() - - tx_fee; - // If the change value is less than the dust threshold, it becomes an operating cost - // This may be slightly inaccurate as dropping payments may reduce the fee, raising the - // change above dust - // That's fine since it'd have to be in a very precarious state AND then it's over-eager in - // tabulating costs - if on_chain_expected_change < Self::DUST { - operating_costs += theoretical_change_amount; - } - } - - Ok(PreparedSend { tx: Some(tx), post_fee_branches, operating_costs }) - } - - /// Attempt to sign a SignableTransaction. - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: Self::SignableTransaction, - ) -> Result; - - /// Publish a completion. - async fn publish_completion( - &self, - completion: &::Completion, - ) -> Result<(), NetworkError>; - - /// Confirm a plan was completed by the specified transaction, per our bounds. - /// - /// Returns Err if there was an error with the confirmation methodology. - /// Returns Ok(None) if this is not a valid completion. - /// Returns Ok(Some(_)) with the completion if it's valid. - async fn confirm_completion( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> Result::Completion>, NetworkError>; - - /// Get a block's number by its ID. - #[cfg(test)] - async fn get_block_number(&self, id: &>::Id) -> usize; - - /// Check an Eventuality is fulfilled by a claim. - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> bool; - - /// Get a transaction by the Eventuality it completes. - #[cfg(test)] - async fn get_transaction_by_eventuality( - &self, - block: usize, - eventuality: &Self::Eventuality, - ) -> Self::Transaction; - - #[cfg(test)] - async fn mine_block(&self); - - /// Sends to the specified address. - /// Additionally mines enough blocks so that the TX is past the confirmation depth. - #[cfg(test)] - async fn test_send(&self, key: Self::Address) -> Self::Block; -} - -pub trait UtxoNetwork: Network { - /// The maximum amount of inputs which will fit in a TX. - /// This should be equal to MAX_OUTPUTS unless one is specifically limited. - /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. - const MAX_INPUTS: usize; -} diff --git a/tests/full-stack/Cargo.toml b/tests/full-stack/Cargo.toml index 12af01bdf..a9dbdc63a 100644 --- a/tests/full-stack/Cargo.toml +++ b/tests/full-stack/Cargo.toml @@ -34,7 +34,7 @@ scale = { package = "parity-scale-codec", version = "3" } serde = "1" serde_json = "1" -processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "monero"] } +# processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "monero"] } serai-client = { path = "../../substrate/client", features = ["serai"] } diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index f06e47419..13299b932 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -46,7 +46,7 @@ serde_json = { version = "1", default-features = false } tokio = { version = "1", features = ["time"] } -processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "ethereum", "monero"] } +# processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "ethereum", "monero"] } dockertest = "0.5" serai-docker-tests = { path = "../docker" } From d2fb3276a4c410ee3e7a305ea3ccb8a69f828d7b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 4 Sep 2024 22:50:02 -0400 Subject: [PATCH 075/179] Add empty serai-processor-signers library This will replace the signers still in the monolithic Processor binary. --- .github/workflows/tests.yml | 1 + Cargo.toml | 2 ++ deny.toml | 1 + processor/frost-attempt-manager/Cargo.toml | 2 +- processor/frost-attempt-manager/README.md | 2 +- processor/key-gen/README.md | 2 +- processor/scanner/Cargo.toml | 4 ++-- processor/signers/Cargo.toml | 22 ++++++++++++++++++++++ processor/signers/LICENSE | 15 +++++++++++++++ processor/signers/README.md | 6 ++++++ processor/signers/src/cosigner.rs | 0 processor/signers/src/lib.rs | 0 processor/signers/src/substrate.rs | 0 processor/signers/src/transaction.rs | 0 14 files changed, 52 insertions(+), 5 deletions(-) create mode 100644 processor/signers/Cargo.toml create mode 100644 processor/signers/LICENSE create mode 100644 processor/signers/README.md create mode 100644 processor/signers/src/cosigner.rs create mode 100644 processor/signers/src/lib.rs create mode 100644 processor/signers/src/substrate.rs create mode 100644 processor/signers/src/transaction.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a572dcf95..edd219f97 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -48,6 +48,7 @@ jobs: -p serai-processor-utxo-scheduler-primitives \ -p serai-processor-utxo-scheduler \ -p serai-processor-transaction-chaining-scheduler \ + -p serai-processor-signers \ -p serai-bitcoin-processor \ -p serai-ethereum-processor \ -p serai-monero-processor \ diff --git a/Cargo.toml b/Cargo.toml index 3ec76f593..25e6c25d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,6 +81,8 @@ members = [ "processor/scheduler/utxo/primitives", "processor/scheduler/utxo/standard", "processor/scheduler/utxo/transaction-chaining", + "processor/signers", + "processor/bitcoin", "processor/ethereum", "processor/monero", diff --git a/deny.toml b/deny.toml index 8fbb8fc9a..ef195411e 100644 --- a/deny.toml +++ b/deny.toml @@ -55,6 +55,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" }, { allow = ["AGPL-3.0"], name = "serai-processor-standard-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, + { allow = ["AGPL-3.0"], name = "serai-processor-signers" }, { allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, diff --git a/processor/frost-attempt-manager/Cargo.toml b/processor/frost-attempt-manager/Cargo.toml index a01acf0fd..67bd8bb63 100644 --- a/processor/frost-attempt-manager/Cargo.toml +++ b/processor/frost-attempt-manager/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/processor/frost-at authors = ["Luke Parker "] keywords = ["frost", "multisig", "threshold"] edition = "2021" -rust-version = "1.79" +publish = false [package.metadata.docs.rs] all-features = true diff --git a/processor/frost-attempt-manager/README.md b/processor/frost-attempt-manager/README.md index c7b0be259..08a61398a 100644 --- a/processor/frost-attempt-manager/README.md +++ b/processor/frost-attempt-manager/README.md @@ -3,4 +3,4 @@ A library for helper structures to manage various attempts of a FROST signing protocol. -This library is interacted with via the `serai-processor-messages::sign` API. +This library is interacted with via the `serai_processor_messages::sign` API. diff --git a/processor/key-gen/README.md b/processor/key-gen/README.md index c28357ba0..566d10354 100644 --- a/processor/key-gen/README.md +++ b/processor/key-gen/README.md @@ -5,4 +5,4 @@ protocol. Two invocations of the eVRF-based DKG are performed, one for Ristretto (to have a key to oraclize values onto the Serai blockchain with) and one for the external network's curve. -This library is interacted with via the `serai-processor-messages::key_gen` API. +This library is interacted with via the `serai_processor_messages::key_gen` API. diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index c2dc31fe3..a3e6a9bac 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -5,9 +5,9 @@ description = "Scanner of abstract blockchains for Serai" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/processor/scanner" authors = ["Luke Parker "] -keywords = ["frost", "multisig", "threshold"] +keywords = [] edition = "2021" -rust-version = "1.79" +publish = false [package.metadata.docs.rs] all-features = true diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml new file mode 100644 index 000000000..70248960c --- /dev/null +++ b/processor/signers/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "serai-processor-signers" +version = "0.1.0" +description = "Signers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/signers" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["borsh", "scale"] + +[lints] +workspace = true + +[dependencies] diff --git a/processor/signers/LICENSE b/processor/signers/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/signers/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/signers/README.md b/processor/signers/README.md new file mode 100644 index 000000000..b6eddd56a --- /dev/null +++ b/processor/signers/README.md @@ -0,0 +1,6 @@ +# Processor Signers + +Implementations of the tree signers used by a processor (the transaction signer, +the Substrate signer, and the cosigner). + +This library is interacted with via the `serai_processor_messages::sign` API. diff --git a/processor/signers/src/cosigner.rs b/processor/signers/src/cosigner.rs new file mode 100644 index 000000000..e69de29bb diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs new file mode 100644 index 000000000..e69de29bb diff --git a/processor/signers/src/substrate.rs b/processor/signers/src/substrate.rs new file mode 100644 index 000000000..e69de29bb diff --git a/processor/signers/src/transaction.rs b/processor/signers/src/transaction.rs new file mode 100644 index 000000000..e69de29bb From b61e5b0ac7e13072a37507e80ae8d60e94926b19 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 5 Sep 2024 14:42:06 -0400 Subject: [PATCH 076/179] Minor work on the transaction signing task --- processor/frost-attempt-manager/src/lib.rs | 4 +- processor/primitives/src/eventuality.rs | 5 ++ processor/scanner/Cargo.toml | 1 + processor/scanner/src/lib.rs | 3 + processor/scheduler/primitives/src/lib.rs | 17 ++++- processor/scheduler/utxo/standard/src/lib.rs | 2 + .../utxo/transaction-chaining/src/lib.rs | 2 + processor/signers/Cargo.toml | 10 +++ processor/signers/src/cosigner.rs | 0 processor/signers/src/lib.rs | 56 +++++++++++++++ processor/signers/src/substrate.rs | 0 processor/signers/src/transaction.rs | 0 processor/signers/src/transaction/db.rs | 1 + processor/signers/src/transaction/mod.rs | 70 +++++++++++++++++++ 14 files changed, 169 insertions(+), 2 deletions(-) delete mode 100644 processor/signers/src/cosigner.rs delete mode 100644 processor/signers/src/substrate.rs delete mode 100644 processor/signers/src/transaction.rs create mode 100644 processor/signers/src/transaction/db.rs create mode 100644 processor/signers/src/transaction/mod.rs diff --git a/processor/frost-attempt-manager/src/lib.rs b/processor/frost-attempt-manager/src/lib.rs index cd8452fa5..c4d1708d6 100644 --- a/processor/frost-attempt-manager/src/lib.rs +++ b/processor/frost-attempt-manager/src/lib.rs @@ -32,6 +32,8 @@ pub struct AttemptManager { impl AttemptManager { /// Create a new attempt manager. + /// + /// This will not restore any signing sessions from the database. Those must be re-registered. pub fn new(db: D, session: Session, start_i: Participant) -> Self { AttemptManager { db, session, start_i, active: HashMap::new() } } @@ -52,7 +54,7 @@ impl AttemptManager { /// This frees all memory used for it and means no further messages will be handled for it. /// This does not stop the protocol from being re-registered and further worked on (with /// undefined behavior) then. The higher-level context must never call `register` again with this - /// ID. + /// ID accordingly. pub fn retire(&mut self, id: [u8; 32]) { if self.active.remove(&id).is_none() { log::info!("retiring protocol {}, which we didn't register/already retired", hex::encode(id)); diff --git a/processor/primitives/src/eventuality.rs b/processor/primitives/src/eventuality.rs index 6a52194d8..803378244 100644 --- a/processor/primitives/src/eventuality.rs +++ b/processor/primitives/src/eventuality.rs @@ -7,6 +7,11 @@ pub trait Eventuality: Sized + Send + Sync { /// The type used to identify a received output. type OutputId: Id; + /// The ID of the transaction this Eventuality is for. + /// + /// This is an internal ID arbitrarily definable so long as it's unique. + fn id(&self) -> [u8; 32]; + /// A unique byte sequence which can be used to identify potentially resolving transactions. /// /// Both a transaction and an Eventuality are expected to be able to yield lookup sequences. diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index a3e6a9bac..2a3e7e0af 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -39,3 +39,4 @@ serai-in-instructions-primitives = { path = "../../substrate/in-instructions/pri serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 17feefbea..5573e4849 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -247,6 +247,9 @@ impl SchedulerUpdate { /// The object responsible for accumulating outputs and planning new transactions. pub trait Scheduler: 'static + Send { + /// The type for a signable transaction. + type SignableTransaction: scheduler_primitives::SignableTransaction; + /// Activate a key. /// /// This SHOULD setup any necessary database structures. This SHOULD NOT cause the new key to diff --git a/processor/scheduler/primitives/src/lib.rs b/processor/scheduler/primitives/src/lib.rs index 97a00c03e..b3bf525c0 100644 --- a/processor/scheduler/primitives/src/lib.rs +++ b/processor/scheduler/primitives/src/lib.rs @@ -10,11 +10,26 @@ use group::GroupEncoding; use serai_db::DbTxn; /// A signable transaction. -pub trait SignableTransaction: 'static + Sized + Send + Sync { +pub trait SignableTransaction: 'static + Sized + Send + Sync + Clone { + /// The ciphersuite used to sign this transaction. + type Ciphersuite: Cuphersuite; + /// The preprocess machine for the signing protocol for this transaction. + type PreprocessMachine: PreprocessMachine; + /// Read a `SignableTransaction`. fn read(reader: &mut impl io::Read) -> io::Result; /// Write a `SignableTransaction`. fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; + + /// The ID for this transaction. + /// + /// This is an internal ID arbitrarily definable so long as it's unique. + /// + /// This same ID MUST be returned by the Eventuality for this transaction. + fn id(&self) -> [u8; 32]; + + /// Sign this transaction. + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine; } mod db { diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs index f69ca54b6..10e40f150 100644 --- a/processor/scheduler/utxo/standard/src/lib.rs +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -309,6 +309,8 @@ impl> Scheduler { } impl> SchedulerTrait for Scheduler { + type SignableTransaction = P::SignableTransaction; + fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { assert!(Db::::outputs(txn, key, *coin).is_none()); diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 9a4ed2ebc..d11e4ac26 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -368,6 +368,8 @@ impl>> Sched impl>> SchedulerTrait for Scheduler { + type SignableTransaction = P::SignableTransaction; + fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { assert!(Db::::outputs(txn, key, *coin).is_none()); diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml index 70248960c..007c814c4 100644 --- a/processor/signers/Cargo.toml +++ b/processor/signers/Cargo.toml @@ -20,3 +20,13 @@ ignored = ["borsh", "scale"] workspace = true [dependencies] +group = { version = "0.13", default-features = false } + +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +scheduler = { package = "serai-scheduler-primitives", path = "../scheduler/primitives" } + +frost-attempt-manager = { package = "serai-processor-frost-attempt-manager", path = "../frost-attempt-manager" } diff --git a/processor/signers/src/cosigner.rs b/processor/signers/src/cosigner.rs deleted file mode 100644 index e69de29bb..000000000 diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index e69de29bb..c221ca4c8 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -0,0 +1,56 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +mod transaction; + +/* +// The signers used by a Processor, key-scoped. +struct KeySigners { + transaction: AttemptManager, + substrate: AttemptManager>, + cosigner: AttemptManager>, +} + +/// The signers used by a protocol. +pub struct Signers(HashMap, KeySigners>); + +impl Signers { + /// Create a new set of signers. + pub fn new(db: D) -> Self { + // TODO: Load the registered keys + // TODO: Load the transactions being signed + // TODO: Load the batches being signed + todo!("TODO") + } + + /// Register a transaction to sign. + pub fn sign_transaction(&mut self) -> Vec { + todo!("TODO") + } + /// Mark a transaction as signed. + pub fn signed_transaction(&mut self) { todo!("TODO") } + + /// Register a batch to sign. + pub fn sign_batch(&mut self, key: KeyFor, batch: Batch) -> Vec { + todo!("TODO") + } + /// Mark a batch as signed. + pub fn signed_batch(&mut self, batch: u32) { todo!("TODO") } + + /// Register a slash report to sign. + pub fn sign_slash_report(&mut self) -> Vec { + todo!("TODO") + } + /// Mark a slash report as signed. + pub fn signed_slash_report(&mut self) { todo!("TODO") } + + /// Start a cosigning protocol. + pub fn cosign(&mut self) { todo!("TODO") } + + /// Handle a message for a signing protocol. + pub fn handle(&mut self, msg: CoordinatorMessage) -> Vec { + todo!("TODO") + } +} +*/ diff --git a/processor/signers/src/substrate.rs b/processor/signers/src/substrate.rs deleted file mode 100644 index e69de29bb..000000000 diff --git a/processor/signers/src/transaction.rs b/processor/signers/src/transaction.rs deleted file mode 100644 index e69de29bb..000000000 diff --git a/processor/signers/src/transaction/db.rs b/processor/signers/src/transaction/db.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/processor/signers/src/transaction/db.rs @@ -0,0 +1 @@ + diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs new file mode 100644 index 000000000..ba1487cbc --- /dev/null +++ b/processor/signers/src/transaction/mod.rs @@ -0,0 +1,70 @@ +use serai_db::{Get, DbTxn, Db}; + +use primitives::task::ContinuallyRan; +use scanner::ScannerFeed; +use scheduler::TransactionsToSign; + +mod db; +use db::IndexDb; + +// Fetches transactions to sign and signs them. +pub(crate) struct TransactionTask { + db: D, + keys: ThresholdKeys<::Ciphersuite>, + attempt_manager: + AttemptManager::PreprocessMachine>, +} + +impl TransactionTask { + pub(crate) async fn new( + db: D, + keys: ThresholdKeys<::Ciphersuite>, + ) -> Self { + Self { db, keys, attempt_manager: AttemptManager::new() } + } +} + +#[async_trait::async_trait] +impl ContinuallyRan for TransactionTask { + async fn run_iteration(&mut self) -> Result { + let mut iterated = false; + + // Check for new transactions to sign + loop { + let mut txn = self.db.txn(); + let Some(tx) = TransactionsToSign::try_recv(&mut txn, self.key) else { break }; + iterated = true; + + let mut machines = Vec::with_capacity(self.keys.len()); + for keys in &self.keys { + machines.push(tx.clone().sign(keys.clone())); + } + let messages = self.attempt_manager.register(tx.id(), machines); + todo!("TODO"); + txn.commit(); + } + + // Check for completed Eventualities (meaning we should no longer sign for these transactions) + loop { + let mut txn = self.db.txn(); + let Some(tx) = CompletedEventualities::try_recv(&mut txn, self.key) else { break }; + iterated = true; + + self.attempt_manager.retire(tx); + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = TransactionSignMessages::try_recv(&mut txn, self.key) else { break }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(messages) => todo!("TODO"), + Response::Signature(signature) => todo!("TODO"), + } + } + + Ok(iterated) + } +} From db74a715d884a0a12c79f9d3e8760f86663ccb10 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 6 Sep 2024 03:20:38 -0400 Subject: [PATCH 077/179] Further work on transaction signing --- Cargo.lock | 135 ++++++++++++------ .../frost-attempt-manager/src/individual.rs | 7 +- processor/frost-attempt-manager/src/lib.rs | 4 + processor/primitives/src/eventuality.rs | 2 +- processor/scanner/src/db.rs | 23 +++ processor/scanner/src/eventuality/mod.rs | 11 +- processor/scanner/src/lib.rs | 5 + processor/scheduler/primitives/Cargo.toml | 3 +- processor/scheduler/primitives/src/lib.rs | 15 +- processor/signers/Cargo.toml | 14 +- processor/signers/src/db.rs | 27 ++++ processor/signers/src/lib.rs | 30 ++++ processor/signers/src/transaction/mod.rs | 97 ++++++++++--- 13 files changed, 299 insertions(+), 74 deletions(-) create mode 100644 processor/signers/src/db.rs diff --git a/Cargo.lock b/Cargo.lock index 8662be6fd..768191b41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8120,6 +8120,29 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "serai-bitcoin-processor" +version = "0.1.0" +dependencies = [ + "async-trait", + "bitcoin-serai", + "borsh", + "const-hex", + "env_logger", + "hex", + "k256", + "log", + "parity-scale-codec", + "secp256k1", + "serai-db", + "serai-env", + "serai-message-queue", + "serai-processor-messages", + "serde_json", + "tokio", + "zalloc", +] + [[package]] name = "serai-client" version = "0.1.0" @@ -8315,6 +8338,28 @@ dependencies = [ name = "serai-env" version = "0.1.0" +[[package]] +name = "serai-ethereum-processor" +version = "0.1.0" +dependencies = [ + "async-trait", + "borsh", + "const-hex", + "env_logger", + "ethereum-serai", + "hex", + "k256", + "log", + "parity-scale-codec", + "serai-db", + "serai-env", + "serai-message-queue", + "serai-processor-messages", + "serde_json", + "tokio", + "zalloc", +] + [[package]] name = "serai-ethereum-relayer" version = "0.1.0" @@ -8343,7 +8388,6 @@ dependencies = [ "serai-coordinator-tests", "serai-docker-tests", "serai-message-queue-tests", - "serai-processor", "serai-processor-tests", "serde", "serde_json", @@ -8459,6 +8503,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-monero-processor" +version = "0.1.0" +dependencies = [ + "async-trait", + "borsh", + "const-hex", + "dalek-ff-group", + "env_logger", + "hex", + "log", + "monero-simple-request-rpc", + "monero-wallet", + "parity-scale-codec", + "serai-db", + "serai-env", + "serai-message-queue", + "serai-processor-messages", + "serde_json", + "tokio", + "zalloc", +] + [[package]] name = "serai-no-std-tests" version = "0.1.0" @@ -8558,47 +8625,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "serai-processor" -version = "0.1.0" -dependencies = [ - "async-trait", - "bitcoin-serai", - "borsh", - "ciphersuite", - "const-hex", - "dalek-ff-group", - "dkg", - "dockertest", - "ec-divisors", - "env_logger", - "ethereum-serai", - "flexible-transcript", - "frost-schnorrkel", - "hex", - "k256", - "log", - "modular-frost", - "monero-simple-request-rpc", - "monero-wallet", - "parity-scale-codec", - "rand_chacha", - "rand_core", - "secp256k1", - "serai-client", - "serai-db", - "serai-docker-tests", - "serai-env", - "serai-message-queue", - "serai-processor-messages", - "serde_json", - "sp-application-crypto", - "thiserror", - "tokio", - "zalloc", - "zeroize", -] - [[package]] name = "serai-processor-frost-attempt-manager" version = "0.1.0" @@ -8676,6 +8702,7 @@ dependencies = [ "serai-in-instructions-primitives", "serai-primitives", "serai-processor-primitives", + "serai-processor-scheduler-primitives", "tokio", ] @@ -8684,11 +8711,32 @@ name = "serai-processor-scheduler-primitives" version = "0.1.0" dependencies = [ "borsh", - "group", + "ciphersuite", + "modular-frost", "parity-scale-codec", "serai-db", ] +[[package]] +name = "serai-processor-signers" +version = "0.1.0" +dependencies = [ + "async-trait", + "borsh", + "ciphersuite", + "log", + "modular-frost", + "parity-scale-codec", + "serai-db", + "serai-processor-frost-attempt-manager", + "serai-processor-messages", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-validator-sets-primitives", + "tokio", +] + [[package]] name = "serai-processor-tests" version = "0.1.0" @@ -8711,7 +8759,6 @@ dependencies = [ "serai-docker-tests", "serai-message-queue", "serai-message-queue-tests", - "serai-processor", "serai-processor-messages", "serde_json", "tokio", diff --git a/processor/frost-attempt-manager/src/individual.rs b/processor/frost-attempt-manager/src/individual.rs index d7f4eec0d..049731c6e 100644 --- a/processor/frost-attempt-manager/src/individual.rs +++ b/processor/frost-attempt-manager/src/individual.rs @@ -80,10 +80,15 @@ impl SigningProtocol { We avoid this by saving to the DB we preprocessed before sending our preprocessed, and only keeping our preprocesses for this instance of the processor. Accordingly, on reboot, we will - flag the prior preprocess and not send new preprocesses. + flag the prior preprocess and not send new preprocesses. This does require our own DB + transaction (to ensure we save to the DB we preprocessed before yielding the preprocess + messages). We also won't send the share we were supposed to, unfortunately, yet caching/reloading the preprocess has enough safety issues it isn't worth the headache. + + Since we bind a signing attempt to the lifetime of the application, we're also safe against + nonce reuse (as the state machines enforce single-use and we never reuse a preprocess). */ { let mut txn = self.db.txn(); diff --git a/processor/frost-attempt-manager/src/lib.rs b/processor/frost-attempt-manager/src/lib.rs index c4d1708d6..2ce46784d 100644 --- a/processor/frost-attempt-manager/src/lib.rs +++ b/processor/frost-attempt-manager/src/lib.rs @@ -65,6 +65,10 @@ impl AttemptManager { } /// Handle a message for a signing protocol. + /// + /// Handling a message multiple times is safe and will cause subsequent calls to return + /// `Response::Messages(vec![])`. Handling a message for a signing protocol which isn't being + /// worked on (potentially due to rebooting) will also return `Response::Messages(vec![])`. pub fn handle(&mut self, msg: CoordinatorMessage) -> Response { match msg { CoordinatorMessage::Preprocesses { id, preprocesses } => { diff --git a/processor/primitives/src/eventuality.rs b/processor/primitives/src/eventuality.rs index 803378244..f68ceeae9 100644 --- a/processor/primitives/src/eventuality.rs +++ b/processor/primitives/src/eventuality.rs @@ -7,7 +7,7 @@ pub trait Eventuality: Sized + Send + Sync { /// The type used to identify a received output. type OutputId: Id; - /// The ID of the transaction this Eventuality is for. + /// The ID of the SignableTransaction this Eventuality is for. /// /// This is an internal ID arbitrarily definable so long as it's unique. fn id(&self) -> [u8; 32]; diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index f45d29664..246e5f46a 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -520,3 +520,26 @@ impl SubstrateToEventualityDb { Burns::try_recv(txn, acknowledged_block) } } + +mod _completed_eventualities { + use serai_db::{Get, DbTxn, create_db, db_channel}; + + db_channel! { + ScannerPublic { + CompletedEventualities: (empty_key: ()) -> [u8; 32], + } + } +} + +/// The IDs of completed Eventualities found on-chain, within a finalized block. +pub struct CompletedEventualities(PhantomData); +impl CompletedEventualities { + pub(crate) fn send(txn: &mut impl DbTxn, id: [u8; 32]) { + _completed_eventualities::CompletedEventualities::send(txn, (), &id); + } + + /// Receive the ID of a completed Eventuality. + pub fn try_recv(txn: &mut impl DbTxn) -> Option<[u8; 32]> { + _completed_eventualities::CompletedEventualities::try_recv(txn, ()) + } +} diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 6db60b71e..7dadbe559 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -14,7 +14,7 @@ use crate::{ ScanToEventualityDb, }, BlockExt, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, SchedulerUpdate, Scheduler, - sort_outputs, + CompletedEventualities, sort_outputs, scan::{next_to_scan_for_outputs_block, queue_output_until_block}, }; @@ -292,8 +292,13 @@ impl> ContinuallyRan for EventualityTas completed_eventualities }; - for tx in completed_eventualities.keys() { - log::info!("eventuality resolved by {}", hex::encode(tx.as_ref())); + for (tx, eventuality) in &completed_eventualities { + log::info!( + "eventuality {} resolved by {}", + hex::encode(eventuality.id()), + hex::encode(tx.as_ref()) + ); + CompletedEventualities::::send(&mut txn, eventuality.id()); } // Fetch all non-External outputs diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 5573e4849..7c699e9cd 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -21,6 +21,7 @@ pub use lifetime::LifetimeStage; // Database schema definition and associated functions. mod db; +pub use db::CompletedEventualities; // Task to index the blockchain, ensuring we don't reorganize finalized blocks. mod index; // Scans blocks for received coins. @@ -170,6 +171,10 @@ pub type EventualityFor = <::Block as Block>::Eventuality; /// The block type for this ScannerFeed. pub type BlockFor = ::Block; +/// An object usable to publish a Batch. +// This will presumably be the Batch signer defined in `serai-processor-signers` or a test shim. +// It could also be some app-layer database for the purpose of verifying the Batches published to +// Serai. #[async_trait::async_trait] pub trait BatchPublisher: 'static + Send + Sync { /// An error encountered when publishing the Batch. diff --git a/processor/scheduler/primitives/Cargo.toml b/processor/scheduler/primitives/Cargo.toml index cdf12cbbb..f847300a8 100644 --- a/processor/scheduler/primitives/Cargo.toml +++ b/processor/scheduler/primitives/Cargo.toml @@ -20,7 +20,8 @@ ignored = ["scale", "borsh"] workspace = true [dependencies] -group = { version = "0.13", default-features = false } +ciphersuite = { path = "../../../crypto/ciphersuite", default-features = false, features = ["std"] } +frost = { package = "modular-frost", path = "../../../crypto/frost", default-features = false } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } diff --git a/processor/scheduler/primitives/src/lib.rs b/processor/scheduler/primitives/src/lib.rs index b3bf525c0..4de4f67ad 100644 --- a/processor/scheduler/primitives/src/lib.rs +++ b/processor/scheduler/primitives/src/lib.rs @@ -5,16 +5,25 @@ use core::marker::PhantomData; use std::io; -use group::GroupEncoding; +use ciphersuite::{group::GroupEncoding, Ciphersuite}; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; use serai_db::DbTxn; +/// A transaction. +pub trait Transaction: Sized { + /// Read a `Transaction`. + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write a `Transaction`. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; +} + /// A signable transaction. pub trait SignableTransaction: 'static + Sized + Send + Sync + Clone { /// The ciphersuite used to sign this transaction. - type Ciphersuite: Cuphersuite; + type Ciphersuite: Ciphersuite; /// The preprocess machine for the signing protocol for this transaction. - type PreprocessMachine: PreprocessMachine; + type PreprocessMachine: Clone + PreprocessMachine; /// Read a `SignableTransaction`. fn read(reader: &mut impl io::Read) -> io::Result; diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml index 007c814c4..06d64da2b 100644 --- a/processor/signers/Cargo.toml +++ b/processor/signers/Cargo.toml @@ -20,13 +20,23 @@ ignored = ["borsh", "scale"] workspace = true [dependencies] -group = { version = "0.13", default-features = false } +async-trait = { version = "0.1", default-features = false } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../common/db" } log = { version = "0.4", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } +messages = { package = "serai-processor-messages", path = "../messages" } primitives = { package = "serai-processor-primitives", path = "../primitives" } scanner = { package = "serai-processor-scanner", path = "../scanner" } -scheduler = { package = "serai-scheduler-primitives", path = "../scheduler/primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } frost-attempt-manager = { package = "serai-processor-frost-attempt-manager", path = "../frost-attempt-manager" } diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs new file mode 100644 index 000000000..5ba5f7d49 --- /dev/null +++ b/processor/signers/src/db.rs @@ -0,0 +1,27 @@ +use serai_validator_sets_primitives::Session; + +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use messages::sign::{ProcessorMessage, CoordinatorMessage}; + +db_channel! { + SignersGlobal { + // CompletedEventualities needs to be handled by each signer, meaning we need to turn its + // effective spsc into a spmc. We do this by duplicating its message for all keys we're + // signing for. + // TODO: Populate from CompletedEventualities + CompletedEventualitiesForEachKey: (session: Session) -> [u8; 32], + + CoordinatorToTransactionSignerMessages: (session: Session) -> CoordinatorMessage, + TransactionSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + + CoordinatorToBatchSignerMessages: (session: Session) -> CoordinatorMessage, + BatchSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + + CoordinatorToSlashReportSignerMessages: (session: Session) -> CoordinatorMessage, + SlashReportSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + + CoordinatorToCosignerMessages: (session: Session) -> CoordinatorMessage, + CosignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + } +} diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index c221ca4c8..7453f4b69 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -2,8 +2,38 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] +use core::fmt::Debug; + +use frost::sign::PreprocessMachine; + +use scheduler::SignableTransaction; + +pub(crate) mod db; + mod transaction; +/// An object capable of publishing a transaction. +#[async_trait::async_trait] +pub trait TransactionPublisher: 'static + Send + Sync { + /// An error encountered when publishing a transaction. + /// + /// This MUST be an ephemeral error. Retrying publication MUST eventually resolve without manual + /// intervention/changing the arguments. + /// + /// The transaction already being present in the mempool/on-chain SHOULD NOT be considered an + /// error. + type EphemeralError: Debug; + + /// Publish a transaction. + /// + /// This will be called multiple times, with the same transaction, until the transaction is + /// confirmed on-chain. + async fn publish( + &self, + tx: ::Signature, + ) -> Result<(), Self::EphemeralError>; +} + /* // The signers used by a Processor, key-scoped. struct KeySigners { diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index ba1487cbc..4ed573f45 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -1,68 +1,127 @@ -use serai_db::{Get, DbTxn, Db}; +use frost::dkg::ThresholdKeys; + +use serai_validator_sets_primitives::Session; + +use serai_db::{DbTxn, Db}; use primitives::task::ContinuallyRan; -use scanner::ScannerFeed; -use scheduler::TransactionsToSign; +use scheduler::{SignableTransaction, TransactionsToSign}; +use scanner::{ScannerFeed, Scheduler}; + +use frost_attempt_manager::*; + +use crate::{ + db::{ + CoordinatorToTransactionSignerMessages, TransactionSignerToCoordinatorMessages, + CompletedEventualitiesForEachKey, + }, + TransactionPublisher, +}; mod db; -use db::IndexDb; // Fetches transactions to sign and signs them. -pub(crate) struct TransactionTask { +pub(crate) struct TransactionTask< + D: Db, + S: ScannerFeed, + Sch: Scheduler, + P: TransactionPublisher, +> { db: D, - keys: ThresholdKeys<::Ciphersuite>, + session: Session, + keys: Vec::Ciphersuite>>, attempt_manager: AttemptManager::PreprocessMachine>, + publisher: P, } -impl TransactionTask { - pub(crate) async fn new( +impl, P: TransactionPublisher> + TransactionTask +{ + pub(crate) fn new( db: D, - keys: ThresholdKeys<::Ciphersuite>, + session: Session, + keys: Vec::Ciphersuite>>, + publisher: P, ) -> Self { - Self { db, keys, attempt_manager: AttemptManager::new() } + let attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a transaction signer with 0 keys").params().i(), + ); + Self { db, session, keys, attempt_manager, publisher } } } #[async_trait::async_trait] -impl ContinuallyRan for TransactionTask { +impl, P: TransactionPublisher> + ContinuallyRan for TransactionTask +{ async fn run_iteration(&mut self) -> Result { let mut iterated = false; // Check for new transactions to sign loop { let mut txn = self.db.txn(); - let Some(tx) = TransactionsToSign::try_recv(&mut txn, self.key) else { break }; + let Some(tx) = TransactionsToSign::::try_recv( + &mut txn, + &self.keys[0].group_key(), + ) else { + break; + }; iterated = true; let mut machines = Vec::with_capacity(self.keys.len()); for keys in &self.keys { machines.push(tx.clone().sign(keys.clone())); } - let messages = self.attempt_manager.register(tx.id(), machines); - todo!("TODO"); + for msg in self.attempt_manager.register(tx.id(), machines) { + TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } txn.commit(); } // Check for completed Eventualities (meaning we should no longer sign for these transactions) loop { let mut txn = self.db.txn(); - let Some(tx) = CompletedEventualities::try_recv(&mut txn, self.key) else { break }; + let Some(id) = CompletedEventualitiesForEachKey::try_recv(&mut txn, self.session) else { + break; + }; iterated = true; - self.attempt_manager.retire(tx); + self.attempt_manager.retire(id); + // TODO: Stop rebroadcasting this transaction txn.commit(); } + // Handle any messages sent to us loop { let mut txn = self.db.txn(); - let Some(msg) = TransactionSignMessages::try_recv(&mut txn, self.key) else { break }; + let Some(msg) = CoordinatorToTransactionSignerMessages::try_recv(&mut txn, self.session) + else { + break; + }; iterated = true; match self.attempt_manager.handle(msg) { - Response::Messages(messages) => todo!("TODO"), - Response::Signature(signature) => todo!("TODO"), + Response::Messages(msgs) => { + for msg in msgs { + TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature(signed_tx) => { + // TODO: Save this TX to the DB + // TODO: Attempt publication every minute + // TODO: On boot, reload all TXs to rebroadcast + self + .publisher + .publish(signed_tx) + .await + .map_err(|e| format!("couldn't publish transaction: {e:?}"))?; + } } + + txn.commit(); } Ok(iterated) From 45576eba60e940b0e18528cacf1fc2e4128729be Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 6 Sep 2024 04:15:02 -0400 Subject: [PATCH 078/179] Finish transaction signing task with TX rebroadcast code --- .../frost-attempt-manager/src/individual.rs | 6 +- processor/frost-attempt-manager/src/lib.rs | 15 ++- processor/scheduler/primitives/src/lib.rs | 2 +- processor/signers/src/lib.rs | 6 +- processor/signers/src/transaction/db.rs | 10 ++ processor/signers/src/transaction/mod.rs | 93 +++++++++++++++++-- 6 files changed, 109 insertions(+), 23 deletions(-) diff --git a/processor/frost-attempt-manager/src/individual.rs b/processor/frost-attempt-manager/src/individual.rs index 049731c6e..2591b582f 100644 --- a/processor/frost-attempt-manager/src/individual.rs +++ b/processor/frost-attempt-manager/src/individual.rs @@ -278,9 +278,7 @@ impl SigningProtocol { } /// Cleanup the database entries for a specified signing protocol. - pub(crate) fn cleanup(db: &mut D, id: [u8; 32]) { - let mut txn = db.txn(); - Attempted::del(&mut txn, id); - txn.commit(); + pub(crate) fn cleanup(txn: &mut impl DbTxn, id: [u8; 32]) { + Attempted::del(txn, id); } } diff --git a/processor/frost-attempt-manager/src/lib.rs b/processor/frost-attempt-manager/src/lib.rs index 2ce46784d..6666ffacc 100644 --- a/processor/frost-attempt-manager/src/lib.rs +++ b/processor/frost-attempt-manager/src/lib.rs @@ -8,7 +8,7 @@ use frost::{Participant, sign::PreprocessMachine}; use serai_validator_sets_primitives::Session; -use serai_db::Db; +use serai_db::{DbTxn, Db}; use messages::sign::{ProcessorMessage, CoordinatorMessage}; mod individual; @@ -19,7 +19,12 @@ pub enum Response { /// Messages to send to the coordinator. Messages(Vec), /// A produced signature. - Signature(M::Signature), + Signature { + /// The ID of the protocol this is for. + id: [u8; 32], + /// The signature. + signature: M::Signature, + }, } /// A manager of attempts for a variety of signing protocols. @@ -55,13 +60,13 @@ impl AttemptManager { /// This does not stop the protocol from being re-registered and further worked on (with /// undefined behavior) then. The higher-level context must never call `register` again with this /// ID accordingly. - pub fn retire(&mut self, id: [u8; 32]) { + pub fn retire(&mut self, txn: &mut impl DbTxn, id: [u8; 32]) { if self.active.remove(&id).is_none() { log::info!("retiring protocol {}, which we didn't register/already retired", hex::encode(id)); } else { log::info!("retired signing protocol {}", hex::encode(id)); } - SigningProtocol::::cleanup(&mut self.db, id); + SigningProtocol::::cleanup(txn, id); } /// Handle a message for a signing protocol. @@ -90,7 +95,7 @@ impl AttemptManager { return Response::Messages(vec![]); }; match protocol.shares(id.attempt, shares) { - Ok(signature) => Response::Signature(signature), + Ok(signature) => Response::Signature { id: id.id, signature }, Err(messages) => Response::Messages(messages), } } diff --git a/processor/scheduler/primitives/src/lib.rs b/processor/scheduler/primitives/src/lib.rs index 4de4f67ad..cef10d35e 100644 --- a/processor/scheduler/primitives/src/lib.rs +++ b/processor/scheduler/primitives/src/lib.rs @@ -23,7 +23,7 @@ pub trait SignableTransaction: 'static + Sized + Send + Sync + Clone { /// The ciphersuite used to sign this transaction. type Ciphersuite: Ciphersuite; /// The preprocess machine for the signing protocol for this transaction. - type PreprocessMachine: Clone + PreprocessMachine; + type PreprocessMachine: Clone + PreprocessMachine; /// Read a `SignableTransaction`. fn read(reader: &mut impl io::Read) -> io::Result; diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index 7453f4b69..eb09440d9 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -19,15 +19,15 @@ pub trait TransactionPublisher: 'static + Send + Sync { /// /// This MUST be an ephemeral error. Retrying publication MUST eventually resolve without manual /// intervention/changing the arguments. - /// - /// The transaction already being present in the mempool/on-chain SHOULD NOT be considered an - /// error. type EphemeralError: Debug; /// Publish a transaction. /// /// This will be called multiple times, with the same transaction, until the transaction is /// confirmed on-chain. + /// + /// The transaction already being present in the mempool/on-chain MUST NOT be considered an + /// error. async fn publish( &self, tx: ::Signature, diff --git a/processor/signers/src/transaction/db.rs b/processor/signers/src/transaction/db.rs index 8b1378917..b77d38c7f 100644 --- a/processor/signers/src/transaction/db.rs +++ b/processor/signers/src/transaction/db.rs @@ -1 +1,11 @@ +use serai_validator_sets_primitives::Session; +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + TransactionSigner { + ActiveSigningProtocols: (session: Session) -> Vec<[u8; 32]>, + SerializedSignableTransactions: (id: [u8; 32]) -> Vec, + SerializedTransactions: (id: [u8; 32]) -> Vec, + } +} diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index 4ed573f45..85a6a0ab6 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -1,11 +1,13 @@ -use frost::dkg::ThresholdKeys; +use std::{collections::HashSet, time::{Duration, Instant}}; + +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; use serai_validator_sets_primitives::Session; use serai_db::{DbTxn, Db}; use primitives::task::ContinuallyRan; -use scheduler::{SignableTransaction, TransactionsToSign}; +use scheduler::{Transaction, SignableTransaction, TransactionsToSign}; use scanner::{ScannerFeed, Scheduler}; use frost_attempt_manager::*; @@ -19,6 +21,13 @@ use crate::{ }; mod db; +use db::*; + +type TransactionFor = < + < + + >::SignableTransaction as SignableTransaction>::PreprocessMachine as PreprocessMachine +>::Signature; // Fetches transactions to sign and signs them. pub(crate) struct TransactionTask< @@ -28,11 +37,16 @@ pub(crate) struct TransactionTask< P: TransactionPublisher, > { db: D, + publisher: P, + session: Session, keys: Vec::Ciphersuite>>, + + active_signing_protocols: HashSet<[u8; 32]>, attempt_manager: AttemptManager::PreprocessMachine>, - publisher: P, + + last_publication: Instant, } impl, P: TransactionPublisher> @@ -40,16 +54,35 @@ impl, P: TransactionPublisher::Ciphersuite>>, - publisher: P, ) -> Self { - let attempt_manager = AttemptManager::new( + let mut active_signing_protocols = HashSet::new(); + let mut attempt_manager = AttemptManager::new( db.clone(), session, keys.first().expect("creating a transaction signer with 0 keys").params().i(), ); - Self { db, session, keys, attempt_manager, publisher } + + // Re-register all active signing protocols + for tx in ActiveSigningProtocols::get(&db, session).unwrap_or(vec![]) { + active_signing_protocols.insert(tx); + + let signable_transaction_buf = SerializedSignableTransactions::get(&db, tx).unwrap(); + let mut signable_transaction_buf = signable_transaction_buf.as_slice(); + let signable_transaction = >::SignableTransaction::read(&mut signable_transaction_buf).unwrap(); + assert!(signable_transaction_buf.is_empty()); + assert_eq!(signable_transaction.id(), tx); + + let mut machines = Vec::with_capacity(keys.len()); + for keys in &keys { + machines.push(signable_transaction.clone().sign(keys.clone())); + } + attempt_manager.register(tx, machines); + } + + Self { db, publisher, session, keys, active_signing_protocols, attempt_manager, last_publication: Instant::now() } } } @@ -71,6 +104,15 @@ impl, P: TransactionPublisher, P: TransactionPublisher, P: TransactionPublisher, P: TransactionPublisher { - // TODO: Save this TX to the DB + Response::Signature { id, signature: signed_tx } => { + // Save this transaction to the database + { + let mut buf = Vec::with_capacity(256); + signed_tx.write(&mut buf).unwrap(); + SerializedTransactions::set(&mut txn, id, &buf); + } + // TODO: Attempt publication every minute - // TODO: On boot, reload all TXs to rebroadcast self .publisher .publish(signed_tx) @@ -124,6 +182,21 @@ impl, P: TransactionPublisher Duration::from_secs(5 * 60) { + for tx in &self.active_signing_protocols { + let Some(tx_buf) = SerializedTransactions::get(&self.db, *tx) else { continue }; + let mut tx_buf = tx_buf.as_slice(); + let tx = TransactionFor::::read(&mut tx_buf).unwrap(); + assert!(tx_buf.is_empty()); + + self.publisher.publish(tx).await.map_err(|e| format!("couldn't re-broadcast transactions: {e:?}"))?; + } + + self.last_publication = Instant::now(); + } + Ok(iterated) } } From ebd8675a47bd3be6038357924dfb428bcd7e46ac Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 6 Sep 2024 17:33:02 -0400 Subject: [PATCH 079/179] Tidy transaction signing task --- processor/signers/src/transaction/mod.rs | 60 +++++++++++++++++------- 1 file changed, 42 insertions(+), 18 deletions(-) diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index 85a6a0ab6..b638eac07 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -1,4 +1,7 @@ -use std::{collections::HashSet, time::{Duration, Instant}}; +use std::{ + collections::HashSet, + time::{Duration, Instant}, +}; use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; @@ -71,7 +74,8 @@ impl, P: TransactionPublisher>::SignableTransaction::read(&mut signable_transaction_buf).unwrap(); + let signable_transaction = + >::SignableTransaction::read(&mut signable_transaction_buf).unwrap(); assert!(signable_transaction_buf.is_empty()); assert_eq!(signable_transaction.id(), tx); @@ -82,7 +86,15 @@ impl, P: TransactionPublisher, P: TransactionPublisher, P: TransactionPublisher, P: TransactionPublisher, P: TransactionPublisher::read(&mut tx_buf).unwrap(); assert!(tx_buf.is_empty()); - self.publisher.publish(tx).await.map_err(|e| format!("couldn't re-broadcast transactions: {e:?}"))?; + self + .publisher + .publish(tx) + .await + .map_err(|e| format!("couldn't re-broadcast transactions: {e:?}"))?; } self.last_publication = Instant::now(); From 279cb72fbef4b4b9aec712e6723b2646b9339253 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 7 Sep 2024 03:33:26 -0400 Subject: [PATCH 080/179] Work on the higher-level signers API --- Cargo.lock | 1 + processor/signers/Cargo.toml | 1 + processor/signers/src/db.rs | 8 ++ processor/signers/src/lib.rs | 137 +++++++++++++++++++++-- processor/signers/src/transaction/mod.rs | 38 +++---- 5 files changed, 153 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 768191b41..b960db4d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8735,6 +8735,7 @@ dependencies = [ "serai-processor-scheduler-primitives", "serai-validator-sets-primitives", "tokio", + "zeroize", ] [[package]] diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml index 06d64da2b..3a96c0435 100644 --- a/processor/signers/Cargo.toml +++ b/processor/signers/Cargo.toml @@ -21,6 +21,7 @@ workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } +zeroize = { version = "1", default-features = false, features = ["std"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs index 5ba5f7d49..9975cbda0 100644 --- a/processor/signers/src/db.rs +++ b/processor/signers/src/db.rs @@ -4,6 +4,14 @@ use serai_db::{Get, DbTxn, create_db, db_channel}; use messages::sign::{ProcessorMessage, CoordinatorMessage}; +create_db! { + SignersGlobal { + RegisteredKeys: () -> Vec, + SerializedKeys: (session: Session) -> Vec, + LatestRetiredSession: () -> Session, + } +} + db_channel! { SignersGlobal { // CompletedEventualities needs to be handled by each signer, meaning we need to turn its diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index eb09440d9..9bc2459dc 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -2,11 +2,18 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] -use core::fmt::Debug; +use core::{fmt::Debug, marker::PhantomData}; -use frost::sign::PreprocessMachine; +use zeroize::Zeroizing; -use scheduler::SignableTransaction; +use serai_validator_sets_primitives::Session; + +use ciphersuite::{group::GroupEncoding, Ristretto}; +use frost::dkg::{ThresholdCore, ThresholdKeys}; + +use serai_db::{DbTxn, Db}; + +use scheduler::{Transaction, SignableTransaction, TransactionsToSign}; pub(crate) mod db; @@ -14,7 +21,7 @@ mod transaction; /// An object capable of publishing a transaction. #[async_trait::async_trait] -pub trait TransactionPublisher: 'static + Send + Sync { +pub trait TransactionPublisher: 'static + Send + Sync { /// An error encountered when publishing a transaction. /// /// This MUST be an ephemeral error. Retrying publication MUST eventually resolve without manual @@ -28,10 +35,124 @@ pub trait TransactionPublisher: 'static + Send + Sync { /// /// The transaction already being present in the mempool/on-chain MUST NOT be considered an /// error. - async fn publish( - &self, - tx: ::Signature, - ) -> Result<(), Self::EphemeralError>; + async fn publish(&self, tx: T) -> Result<(), Self::EphemeralError>; +} + +/// The signers used by a processor. +pub struct Signers(PhantomData); + +/* + This is completely outside of consensus, so the worst that can happen is: + + 1) Leakage of a private key, hence the usage of frost-attempt-manager which has an API to ensure + that doesn't happen + 2) The database isn't perfectly cleaned up (leaving some bytes on disk wasted) + 3) The state isn't perfectly cleaned up (leaving some bytes in RAM wasted) + + The last two are notably possible via a series of race conditions. For example, if an Eventuality + completion comes in *before* we registered a key, the signer will hold the signing protocol in + memory until the session is retired entirely. +*/ +impl Signers { + /// Initialize the signers. + /// + /// This will spawn tasks for any historically registered keys. + pub fn new(db: impl Db) -> Self { + for session in db::RegisteredKeys::get(&db).unwrap_or(vec![]) { + let buf = db::SerializedKeys::get(&db, session).unwrap(); + let mut buf = buf.as_slice(); + + let mut substrate_keys = vec![]; + let mut external_keys = vec![]; + while !buf.is_empty() { + substrate_keys + .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); + external_keys + .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); + } + + todo!("TODO") + } + + todo!("TODO") + } + + /// Register a set of keys to sign with. + /// + /// If this session (or a session after it) has already been retired, this is a NOP. + pub fn register_keys( + &mut self, + txn: &mut impl DbTxn, + session: Session, + substrate_keys: Vec>, + network_keys: Vec>, + ) { + if Some(session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { + return; + } + + { + let mut sessions = db::RegisteredKeys::get(txn).unwrap_or_else(|| Vec::with_capacity(1)); + sessions.push(session); + db::RegisteredKeys::set(txn, &sessions); + } + + { + let mut buf = Zeroizing::new(Vec::with_capacity(2 * substrate_keys.len() * 128)); + for (substrate_keys, network_keys) in substrate_keys.into_iter().zip(network_keys) { + buf.extend(&*substrate_keys.serialize()); + buf.extend(&*network_keys.serialize()); + } + db::SerializedKeys::set(txn, session, &buf); + } + } + + /// Retire the signers for a session. + /// + /// This MUST be called in order, for every session (even if we didn't register keys for this + /// session). + pub fn retire_session( + &mut self, + txn: &mut impl DbTxn, + session: Session, + external_key: &impl GroupEncoding, + ) { + // Update the latest retired session + { + let next_to_retire = + db::LatestRetiredSession::get(txn).map_or(Session(0), |session| Session(session.0 + 1)); + assert_eq!(session, next_to_retire); + db::LatestRetiredSession::set(txn, &session); + } + + // Kill the tasks + todo!("TODO"); + + // Update RegisteredKeys/SerializedKeys + if let Some(registered) = db::RegisteredKeys::get(txn) { + db::RegisteredKeys::set( + txn, + ®istered.into_iter().filter(|session_i| *session_i != session).collect(), + ); + } + db::SerializedKeys::del(txn, session); + + // Drain the transactions to sign + // Presumably, TransactionsToSign will be fully populated before retiry occurs, making this + // perfect in not leaving any pending blobs behind + while TransactionsToSign::::try_recv(txn, external_key).is_some() {} + + // Drain our DB channels + while db::CompletedEventualitiesForEachKey::try_recv(txn, session).is_some() {} + while db::CoordinatorToTransactionSignerMessages::try_recv(txn, session).is_some() {} + while db::TransactionSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} + while db::CoordinatorToBatchSignerMessages::try_recv(txn, session).is_some() {} + while db::BatchSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} + while db::CoordinatorToSlashReportSignerMessages::try_recv(txn, session).is_some() {} + while db::SlashReportSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} + while db::CoordinatorToCosignerMessages::try_recv(txn, session).is_some() {} + while db::CosignerToCoordinatorMessages::try_recv(txn, session).is_some() {} + } } /* diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index b638eac07..8fdf81455 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -11,7 +11,6 @@ use serai_db::{DbTxn, Db}; use primitives::task::ContinuallyRan; use scheduler::{Transaction, SignableTransaction, TransactionsToSign}; -use scanner::{ScannerFeed, Scheduler}; use frost_attempt_manager::*; @@ -26,40 +25,35 @@ use crate::{ mod db; use db::*; -type TransactionFor = < - < - - >::SignableTransaction as SignableTransaction>::PreprocessMachine as PreprocessMachine ->::Signature; +type TransactionFor = + <::PreprocessMachine as PreprocessMachine>::Signature; // Fetches transactions to sign and signs them. pub(crate) struct TransactionTask< D: Db, - S: ScannerFeed, - Sch: Scheduler, - P: TransactionPublisher, + ST: SignableTransaction, + P: TransactionPublisher>, > { db: D, publisher: P, session: Session, - keys: Vec::Ciphersuite>>, + keys: Vec>, active_signing_protocols: HashSet<[u8; 32]>, - attempt_manager: - AttemptManager::PreprocessMachine>, + attempt_manager: AttemptManager::PreprocessMachine>, last_publication: Instant, } -impl, P: TransactionPublisher> - TransactionTask +impl>> + TransactionTask { pub(crate) fn new( db: D, publisher: P, session: Session, - keys: Vec::Ciphersuite>>, + keys: Vec>, ) -> Self { let mut active_signing_protocols = HashSet::new(); let mut attempt_manager = AttemptManager::new( @@ -74,8 +68,7 @@ impl, P: TransactionPublisher>::SignableTransaction::read(&mut signable_transaction_buf).unwrap(); + let signable_transaction = ST::read(&mut signable_transaction_buf).unwrap(); assert!(signable_transaction_buf.is_empty()); assert_eq!(signable_transaction.id(), tx); @@ -99,8 +92,8 @@ impl, P: TransactionPublisher, P: TransactionPublisher> - ContinuallyRan for TransactionTask +impl>> ContinuallyRan + for TransactionTask { async fn run_iteration(&mut self) -> Result { let mut iterated = false; @@ -108,10 +101,7 @@ impl, P: TransactionPublisher::try_recv( - &mut txn, - &self.keys[0].group_key(), - ) else { + let Some(tx) = TransactionsToSign::::try_recv(&mut txn, &self.keys[0].group_key()) else { break; }; iterated = true; @@ -208,7 +198,7 @@ impl, P: TransactionPublisher::read(&mut tx_buf).unwrap(); + let tx = TransactionFor::::read(&mut tx_buf).unwrap(); assert!(tx_buf.is_empty()); self From 442198c8ad0f54d4a5ac1ebb9727cdee583d8332 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 8 Sep 2024 00:30:55 -0400 Subject: [PATCH 081/179] Expand task management These extensions are necessary for the signers task management. --- processor/primitives/src/task.rs | 89 ++++++++++++++++++++++++++------ processor/scanner/src/lib.rs | 12 ++--- processor/signers/src/db.rs | 1 + processor/signers/src/lib.rs | 30 +++++++++-- 4 files changed, 105 insertions(+), 27 deletions(-) diff --git a/processor/primitives/src/task.rs b/processor/primitives/src/task.rs index 94a576a0a..a40fb9ff1 100644 --- a/processor/primitives/src/task.rs +++ b/processor/primitives/src/task.rs @@ -1,28 +1,54 @@ use core::time::Duration; +use std::sync::Arc; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot, Mutex}; -/// A handle to immediately run an iteration of a task. +enum Closed { + NotClosed(Option>), + Closed, +} + +/// A handle for a task. #[derive(Clone)] -pub struct RunNowHandle(mpsc::Sender<()>); -/// An instruction recipient to immediately run an iteration of a task. -pub struct RunNowRecipient(mpsc::Receiver<()>); +pub struct TaskHandle { + run_now: mpsc::Sender<()>, + close: mpsc::Sender<()>, + closed: Arc>, +} +/// A task's internal structures. +pub struct Task { + run_now: mpsc::Receiver<()>, + close: mpsc::Receiver<()>, + closed: oneshot::Sender<()>, +} -impl RunNowHandle { - /// Create a new run-now handle to be assigned to a task. - pub fn new() -> (Self, RunNowRecipient) { +impl Task { + /// Create a new task definition. + pub fn new() -> (Self, TaskHandle) { // Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as // soon as possible - let (send, recv) = mpsc::channel(1); - (Self(send), RunNowRecipient(recv)) + let (run_now_send, run_now_recv) = mpsc::channel(1); + // And any call to close satisfies all calls to close + let (close_send, close_recv) = mpsc::channel(1); + let (closed_send, closed_recv) = oneshot::channel(); + ( + Self { run_now: run_now_recv, close: close_recv, closed: closed_send }, + TaskHandle { + run_now: run_now_send, + close: close_send, + closed: Arc::new(Mutex::new(Closed::NotClosed(Some(closed_recv)))), + }, + ) } +} +impl TaskHandle { /// Tell the task to run now (and not whenever its next iteration on a timer is). /// /// Panics if the task has been dropped. pub fn run_now(&self) { #[allow(clippy::match_same_arms)] - match self.0.try_send(()) { + match self.run_now.try_send(()) { Ok(()) => {} // NOP on full, as this task will already be ran as soon as possible Err(mpsc::error::TrySendError::Full(())) => {} @@ -31,6 +57,24 @@ impl RunNowHandle { } } } + + /// Close the task. + /// + /// Returns once the task shuts down after it finishes its current iteration (which may be of + /// unbounded time). + pub async fn close(self) { + // If another instance of the handle called tfhis, don't error + let _ = self.close.send(()).await; + // Wait until we receive the closed message + let mut closed = self.closed.lock().await; + match &mut *closed { + Closed::NotClosed(ref mut recv) => { + assert_eq!(recv.take().unwrap().await, Ok(()), "continually ran task dropped itself?"); + *closed = Closed::Closed; + } + Closed::Closed => {} + } + } } /// A task to be continually ran. @@ -50,10 +94,7 @@ pub trait ContinuallyRan: Sized { async fn run_iteration(&mut self) -> Result; /// Continually run the task. - /// - /// This returns a channel which can have a message set to immediately trigger a new run of an - /// iteration. - async fn continually_run(mut self, mut run_now: RunNowRecipient, dependents: Vec) { + async fn continually_run(mut self, mut task: Task, dependents: Vec) { // The default number of seconds to sleep before running the task again let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS; // The current number of seconds to sleep before running the task again @@ -66,6 +107,15 @@ pub trait ContinuallyRan: Sized { }; loop { + // If we were told to close/all handles were dropped, drop it + { + let should_close = task.close.try_recv(); + match should_close { + Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break, + Err(mpsc::error::TryRecvError::Empty) => {} + } + } + match self.run_iteration().await { Ok(run_dependents) => { // Upon a successful (error-free) loop iteration, reset the amount of time we sleep @@ -86,8 +136,15 @@ pub trait ContinuallyRan: Sized { // Don't run the task again for another few seconds UNLESS told to run now tokio::select! { () = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {}, - msg = run_now.0.recv() => assert_eq!(msg, Some(()), "run now handle was dropped"), + msg = task.run_now.recv() => { + // Check if this is firing because the handle was dropped + if msg.is_none() { + break; + } + }, } } + + task.closed.send(()).unwrap(); } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 7c699e9cd..6403605d4 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -343,7 +343,7 @@ pub trait Scheduler: 'static + Send { /// A representation of a scanner. #[allow(non_snake_case)] pub struct Scanner { - substrate_handle: RunNowHandle, + substrate_handle: TaskHandle, _S: PhantomData, } impl Scanner { @@ -362,11 +362,11 @@ impl Scanner { let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone()); let eventuality_task = eventuality::EventualityTask::<_, _, Sch>::new(db, feed, start_block); - let (_index_handle, index_run) = RunNowHandle::new(); - let (scan_handle, scan_run) = RunNowHandle::new(); - let (report_handle, report_run) = RunNowHandle::new(); - let (substrate_handle, substrate_run) = RunNowHandle::new(); - let (eventuality_handle, eventuality_run) = RunNowHandle::new(); + let (index_run, _index_handle) = Task::new(); + let (scan_run, scan_handle) = Task::new(); + let (report_run, report_handle) = Task::new(); + let (substrate_run, substrate_handle) = Task::new(); + let (eventuality_run, eventuality_handle) = Task::new(); // Upon indexing a new block, scan it tokio::spawn(index_task.continually_run(index_run, vec![scan_handle.clone()])); diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs index 9975cbda0..ec9b879c2 100644 --- a/processor/signers/src/db.rs +++ b/processor/signers/src/db.rs @@ -9,6 +9,7 @@ create_db! { RegisteredKeys: () -> Vec, SerializedKeys: (session: Session) -> Vec, LatestRetiredSession: () -> Session, + ToCleanup: () -> Vec<(Session, Vec)>, } } diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index 9bc2459dc..72fe2d178 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -3,6 +3,7 @@ #![deny(missing_docs)] use core::{fmt::Debug, marker::PhantomData}; +use std::collections::HashMap; use zeroize::Zeroizing; @@ -13,6 +14,7 @@ use frost::dkg::{ThresholdCore, ThresholdKeys}; use serai_db::{DbTxn, Db}; +use primitives::task::TaskHandle; use scheduler::{Transaction, SignableTransaction, TransactionsToSign}; pub(crate) mod db; @@ -39,7 +41,10 @@ pub trait TransactionPublisher: 'static + Send + Sync { } /// The signers used by a processor. -pub struct Signers(PhantomData); +pub struct Signers { + tasks: HashMap>, + _ST: PhantomData, +} /* This is completely outside of consensus, so the worst that can happen is: @@ -58,6 +63,8 @@ impl Signers { /// /// This will spawn tasks for any historically registered keys. pub fn new(db: impl Db) -> Self { + let mut tasks = HashMap::new(); + for session in db::RegisteredKeys::get(&db).unwrap_or(vec![]) { let buf = db::SerializedKeys::get(&db, session).unwrap(); let mut buf = buf.as_slice(); @@ -74,7 +81,7 @@ impl Signers { todo!("TODO") } - todo!("TODO") + Self { tasks, _ST: PhantomData } } /// Register a set of keys to sign with. @@ -87,6 +94,7 @@ impl Signers { substrate_keys: Vec>, network_keys: Vec>, ) { + // Don't register already retired keys if Some(session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { return; } @@ -125,9 +133,6 @@ impl Signers { db::LatestRetiredSession::set(txn, &session); } - // Kill the tasks - todo!("TODO"); - // Update RegisteredKeys/SerializedKeys if let Some(registered) = db::RegisteredKeys::get(txn) { db::RegisteredKeys::set( @@ -137,6 +142,20 @@ impl Signers { } db::SerializedKeys::del(txn, session); + // Queue the session for clean up + let mut to_cleanup = db::ToCleanup::get(txn).unwrap_or(vec![]); + to_cleanup.push((session, external_key.to_bytes().as_ref().to_vec())); + db::ToCleanup::set(txn, &to_cleanup); + + // TODO: Handle all of the following cleanup on a task + /* + // Kill the tasks + if let Some(tasks) = self.tasks.remove(&session) { + for task in tasks { + task.close().await; + } + } + // Drain the transactions to sign // Presumably, TransactionsToSign will be fully populated before retiry occurs, making this // perfect in not leaving any pending blobs behind @@ -152,6 +171,7 @@ impl Signers { while db::SlashReportSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} while db::CoordinatorToCosignerMessages::try_recv(txn, session).is_some() {} while db::CosignerToCoordinatorMessages::try_recv(txn, session).is_some() {} + */ } } From 30c4c1a7ed22da118774289fa79ce2b12e97c1b9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 8 Sep 2024 22:13:42 -0400 Subject: [PATCH 082/179] Route the coordinator, fix race conditions in the signers library --- Cargo.lock | 2 +- processor/frost-attempt-manager/Cargo.toml | 1 - .../frost-attempt-manager/src/individual.rs | 24 +- processor/frost-attempt-manager/src/lib.rs | 26 +-- processor/messages/Cargo.toml | 2 + processor/messages/src/lib.rs | 36 ++- processor/primitives/src/block.rs | 2 + processor/scanner/src/db.rs | 15 +- processor/scanner/src/eventuality/mod.rs | 2 +- processor/scanner/src/lib.rs | 20 +- processor/scheduler/primitives/src/lib.rs | 4 + processor/signers/src/coordinator.rs | 98 +++++++++ processor/signers/src/db.rs | 14 +- processor/signers/src/lib.rs | 206 +++++++++++------- processor/signers/src/transaction/mod.rs | 78 ++++--- 15 files changed, 357 insertions(+), 173 deletions(-) create mode 100644 processor/signers/src/coordinator.rs diff --git a/Cargo.lock b/Cargo.lock index b960db4d4..d6b0e3def 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8630,7 +8630,6 @@ name = "serai-processor-frost-attempt-manager" version = "0.1.0" dependencies = [ "borsh", - "hex", "log", "modular-frost", "parity-scale-codec", @@ -8666,6 +8665,7 @@ version = "0.1.0" dependencies = [ "borsh", "dkg", + "hex", "parity-scale-codec", "serai-coins-primitives", "serai-in-instructions-primitives", diff --git a/processor/frost-attempt-manager/Cargo.toml b/processor/frost-attempt-manager/Cargo.toml index 67bd8bb63..ad8d2a4c9 100644 --- a/processor/frost-attempt-manager/Cargo.toml +++ b/processor/frost-attempt-manager/Cargo.toml @@ -26,7 +26,6 @@ frost = { package = "modular-frost", path = "../../crypto/frost", version = "^0. serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } -hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } diff --git a/processor/frost-attempt-manager/src/individual.rs b/processor/frost-attempt-manager/src/individual.rs index 2591b582f..6a8b33526 100644 --- a/processor/frost-attempt-manager/src/individual.rs +++ b/processor/frost-attempt-manager/src/individual.rs @@ -10,11 +10,11 @@ use frost::{ use serai_validator_sets_primitives::Session; use serai_db::{Get, DbTxn, Db, create_db}; -use messages::sign::{SignId, ProcessorMessage}; +use messages::sign::{VariantSignId, SignId, ProcessorMessage}; create_db!( FrostAttemptManager { - Attempted: (id: [u8; 32]) -> u32, + Attempted: (id: VariantSignId) -> u32, } ); @@ -28,7 +28,7 @@ pub(crate) struct SigningProtocol { // The key shares we sign with are expected to be continguous from this position. start_i: Participant, // The ID of this signing protocol. - id: [u8; 32], + id: VariantSignId, // This accepts a vector of `root` machines in order to support signing with multiple key shares. root: Vec, preprocessed: HashMap, HashMap>)>, @@ -48,10 +48,10 @@ impl SigningProtocol { db: D, session: Session, start_i: Participant, - id: [u8; 32], + id: VariantSignId, root: Vec, ) -> Self { - log::info!("starting signing protocol {}", hex::encode(id)); + log::info!("starting signing protocol {id:?}"); Self { db, @@ -100,7 +100,7 @@ impl SigningProtocol { txn.commit(); } - log::debug!("attemting a new instance of signing protocol {}", hex::encode(self.id)); + log::debug!("attemting a new instance of signing protocol {:?}", self.id); let mut our_preprocesses = HashMap::with_capacity(self.root.len()); let mut preprocessed = Vec::with_capacity(self.root.len()); @@ -137,7 +137,7 @@ impl SigningProtocol { attempt: u32, serialized_preprocesses: HashMap>, ) -> Vec { - log::debug!("handling preprocesses for signing protocol {}", hex::encode(self.id)); + log::debug!("handling preprocesses for signing protocol {:?}", self.id); let Some((machines, our_serialized_preprocesses)) = self.preprocessed.remove(&attempt) else { return vec![]; @@ -211,8 +211,8 @@ impl SigningProtocol { assert!(self.shared.insert(attempt, (shared.swap_remove(0), our_shares)).is_none()); log::debug!( - "successfully handled preprocesses for signing protocol {}, sending shares", - hex::encode(self.id) + "successfully handled preprocesses for signing protocol {:?}, sending shares", + self.id, ); msgs.push(ProcessorMessage::Shares { id: SignId { session: self.session, id: self.id, attempt }, @@ -229,7 +229,7 @@ impl SigningProtocol { attempt: u32, serialized_shares: HashMap>, ) -> Result> { - log::debug!("handling shares for signing protocol {}", hex::encode(self.id)); + log::debug!("handling shares for signing protocol {:?}", self.id); let Some((machine, our_serialized_shares)) = self.shared.remove(&attempt) else { Err(vec![])? }; @@ -272,13 +272,13 @@ impl SigningProtocol { }, }; - log::info!("finished signing for protocol {}", hex::encode(self.id)); + log::info!("finished signing for protocol {:?}", self.id); Ok(signature) } /// Cleanup the database entries for a specified signing protocol. - pub(crate) fn cleanup(txn: &mut impl DbTxn, id: [u8; 32]) { + pub(crate) fn cleanup(txn: &mut impl DbTxn, id: VariantSignId) { Attempted::del(txn, id); } } diff --git a/processor/frost-attempt-manager/src/lib.rs b/processor/frost-attempt-manager/src/lib.rs index 6666ffacc..db8b08617 100644 --- a/processor/frost-attempt-manager/src/lib.rs +++ b/processor/frost-attempt-manager/src/lib.rs @@ -9,7 +9,7 @@ use frost::{Participant, sign::PreprocessMachine}; use serai_validator_sets_primitives::Session; use serai_db::{DbTxn, Db}; -use messages::sign::{ProcessorMessage, CoordinatorMessage}; +use messages::sign::{VariantSignId, ProcessorMessage, CoordinatorMessage}; mod individual; use individual::SigningProtocol; @@ -21,7 +21,7 @@ pub enum Response { /// A produced signature. Signature { /// The ID of the protocol this is for. - id: [u8; 32], + id: VariantSignId, /// The signature. signature: M::Signature, }, @@ -32,7 +32,7 @@ pub struct AttemptManager { db: D, session: Session, start_i: Participant, - active: HashMap<[u8; 32], SigningProtocol>, + active: HashMap>, } impl AttemptManager { @@ -46,7 +46,7 @@ impl AttemptManager { /// Register a signing protocol to attempt. /// /// This ID must be unique across all sessions, attempt managers, protocols, etc. - pub fn register(&mut self, id: [u8; 32], machines: Vec) -> Vec { + pub fn register(&mut self, id: VariantSignId, machines: Vec) -> Vec { let mut protocol = SigningProtocol::new(self.db.clone(), self.session, self.start_i, id, machines); let messages = protocol.attempt(0); @@ -60,11 +60,11 @@ impl AttemptManager { /// This does not stop the protocol from being re-registered and further worked on (with /// undefined behavior) then. The higher-level context must never call `register` again with this /// ID accordingly. - pub fn retire(&mut self, txn: &mut impl DbTxn, id: [u8; 32]) { + pub fn retire(&mut self, txn: &mut impl DbTxn, id: VariantSignId) { if self.active.remove(&id).is_none() { - log::info!("retiring protocol {}, which we didn't register/already retired", hex::encode(id)); + log::info!("retiring protocol {id:?}, which we didn't register/already retired"); } else { - log::info!("retired signing protocol {}", hex::encode(id)); + log::info!("retired signing protocol {id:?}"); } SigningProtocol::::cleanup(txn, id); } @@ -79,8 +79,8 @@ impl AttemptManager { CoordinatorMessage::Preprocesses { id, preprocesses } => { let Some(protocol) = self.active.get_mut(&id.id) else { log::trace!( - "handling preprocesses for signing protocol {}, which we're not actively running", - hex::encode(id.id) + "handling preprocesses for signing protocol {:?}, which we're not actively running", + id.id, ); return Response::Messages(vec![]); }; @@ -89,8 +89,8 @@ impl AttemptManager { CoordinatorMessage::Shares { id, shares } => { let Some(protocol) = self.active.get_mut(&id.id) else { log::trace!( - "handling shares for signing protocol {}, which we're not actively running", - hex::encode(id.id) + "handling shares for signing protocol {:?}, which we're not actively running", + id.id, ); return Response::Messages(vec![]); }; @@ -102,8 +102,8 @@ impl AttemptManager { CoordinatorMessage::Reattempt { id } => { let Some(protocol) = self.active.get_mut(&id.id) else { log::trace!( - "reattempting signing protocol {}, which we're not actively running", - hex::encode(id.id) + "reattempting signing protocol {:?}, which we're not actively running", + id.id, ); return Response::Messages(vec![]); }; diff --git a/processor/messages/Cargo.toml b/processor/messages/Cargo.toml index 0eba999df..dbadd9db7 100644 --- a/processor/messages/Cargo.toml +++ b/processor/messages/Cargo.toml @@ -17,6 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] +hex = { version = "0.4", default-features = false, features = ["std"] } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 27d75d2e9..ef907f97f 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -1,3 +1,4 @@ +use core::fmt; use std::collections::HashMap; use scale::{Encode, Decode}; @@ -85,10 +86,37 @@ pub mod key_gen { pub mod sign { use super::*; - #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] + #[derive(Clone, Copy, PartialEq, Eq, Hash, Encode, Decode, BorshSerialize, BorshDeserialize)] + pub enum VariantSignId { + Cosign([u8; 32]), + Batch(u32), + SlashReport([u8; 32]), + Transaction([u8; 32]), + } + impl fmt::Debug for VariantSignId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + match self { + Self::Cosign(cosign) => { + f.debug_struct("VariantSignId::Cosign").field("0", &hex::encode(cosign)).finish() + } + Self::Batch(batch) => f.debug_struct("VariantSignId::Batch").field("0", &batch).finish(), + Self::SlashReport(slash_report) => f + .debug_struct("VariantSignId::SlashReport") + .field("0", &hex::encode(slash_report)) + .finish(), + Self::Transaction(tx) => { + f.debug_struct("VariantSignId::Transaction").field("0", &hex::encode(tx)).finish() + } + } + } + } + + #[derive( + Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, + )] pub struct SignId { pub session: Session, - pub id: [u8; 32], + pub id: VariantSignId, pub attempt: u32, } @@ -109,11 +137,11 @@ pub mod sign { None } - pub fn session(&self) -> Session { + pub fn sign_id(&self) -> &SignId { match self { CoordinatorMessage::Preprocesses { id, .. } | CoordinatorMessage::Shares { id, .. } | - CoordinatorMessage::Reattempt { id, .. } => id.session, + CoordinatorMessage::Reattempt { id, .. } => id, } } } diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs index 6f603ab2b..89dff54ff 100644 --- a/processor/primitives/src/block.rs +++ b/processor/primitives/src/block.rs @@ -60,6 +60,8 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { /// Check if this block resolved any Eventualities. /// + /// This MUST mutate `eventualities` to no longer contain the resolved Eventualities. + /// /// Returns tbe resolved Eventualities, indexed by the ID of the transactions which resolved /// them. fn check_for_eventuality_resolutions( diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 246e5f46a..f72fa202f 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -1,6 +1,7 @@ use core::marker::PhantomData; use std::io::{self, Read, Write}; +use group::GroupEncoding; use scale::{Encode, Decode, IoReader}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db, db_channel}; @@ -526,20 +527,20 @@ mod _completed_eventualities { db_channel! { ScannerPublic { - CompletedEventualities: (empty_key: ()) -> [u8; 32], + CompletedEventualities: (key: &[u8]) -> [u8; 32], } } } /// The IDs of completed Eventualities found on-chain, within a finalized block. -pub struct CompletedEventualities(PhantomData); -impl CompletedEventualities { - pub(crate) fn send(txn: &mut impl DbTxn, id: [u8; 32]) { - _completed_eventualities::CompletedEventualities::send(txn, (), &id); +pub struct CompletedEventualities(PhantomData); +impl CompletedEventualities { + pub(crate) fn send(txn: &mut impl DbTxn, key: &K, id: [u8; 32]) { + _completed_eventualities::CompletedEventualities::send(txn, key.to_bytes().as_ref(), &id); } /// Receive the ID of a completed Eventuality. - pub fn try_recv(txn: &mut impl DbTxn) -> Option<[u8; 32]> { - _completed_eventualities::CompletedEventualities::try_recv(txn, ()) + pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option<[u8; 32]> { + _completed_eventualities::CompletedEventualities::try_recv(txn, key.to_bytes().as_ref()) } } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 7dadbe559..be5b45554 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -298,7 +298,7 @@ impl> ContinuallyRan for EventualityTas hex::encode(eventuality.id()), hex::encode(tx.as_ref()) ); - CompletedEventualities::::send(&mut txn, eventuality.id()); + CompletedEventualities::send(&mut txn, &key.key, eventuality.id()); } // Fetch all non-External outputs diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 6403605d4..3323c6ff3 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -362,24 +362,24 @@ impl Scanner { let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone()); let eventuality_task = eventuality::EventualityTask::<_, _, Sch>::new(db, feed, start_block); - let (index_run, _index_handle) = Task::new(); - let (scan_run, scan_handle) = Task::new(); - let (report_run, report_handle) = Task::new(); - let (substrate_run, substrate_handle) = Task::new(); - let (eventuality_run, eventuality_handle) = Task::new(); + let (index_task_def, _index_handle) = Task::new(); + let (scan_task_def, scan_handle) = Task::new(); + let (report_task_def, report_handle) = Task::new(); + let (substrate_task_def, substrate_handle) = Task::new(); + let (eventuality_task_def, eventuality_handle) = Task::new(); // Upon indexing a new block, scan it - tokio::spawn(index_task.continually_run(index_run, vec![scan_handle.clone()])); + tokio::spawn(index_task.continually_run(index_task_def, vec![scan_handle.clone()])); // Upon scanning a block, report it - tokio::spawn(scan_task.continually_run(scan_run, vec![report_handle])); + tokio::spawn(scan_task.continually_run(scan_task_def, vec![report_handle])); // Upon reporting a block, we do nothing (as the burden is on Substrate which won't be // immediately ready) - tokio::spawn(report_task.continually_run(report_run, vec![])); + tokio::spawn(report_task.continually_run(report_task_def, vec![])); // Upon handling an event from Substrate, we run the Eventuality task (as it's what's affected) - tokio::spawn(substrate_task.continually_run(substrate_run, vec![eventuality_handle])); + tokio::spawn(substrate_task.continually_run(substrate_task_def, vec![eventuality_handle])); // Upon handling the Eventualities in a block, we run the scan task as we've advanced the // window its allowed to scan - tokio::spawn(eventuality_task.continually_run(eventuality_run, vec![scan_handle])); + tokio::spawn(eventuality_task.continually_run(eventuality_task_def, vec![scan_handle])); Self { substrate_handle, _S: PhantomData } } diff --git a/processor/scheduler/primitives/src/lib.rs b/processor/scheduler/primitives/src/lib.rs index cef10d35e..f146027d5 100644 --- a/processor/scheduler/primitives/src/lib.rs +++ b/processor/scheduler/primitives/src/lib.rs @@ -41,6 +41,10 @@ pub trait SignableTransaction: 'static + Sized + Send + Sync + Clone { fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine; } +/// The transaction type for a SignableTransaction. +pub type TransactionFor = + <::PreprocessMachine as PreprocessMachine>::Signature; + mod db { use serai_db::{Get, DbTxn, create_db, db_channel}; diff --git a/processor/signers/src/coordinator.rs b/processor/signers/src/coordinator.rs new file mode 100644 index 000000000..43dcc571e --- /dev/null +++ b/processor/signers/src/coordinator.rs @@ -0,0 +1,98 @@ +use serai_db::{DbTxn, Db}; + +use primitives::task::ContinuallyRan; + +use crate::{ + db::{ + RegisteredKeys, CosignerToCoordinatorMessages, BatchSignerToCoordinatorMessages, + SlashReportSignerToCoordinatorMessages, TransactionSignerToCoordinatorMessages, + }, + Coordinator, +}; + +// Fetches messages to send the coordinator and sends them. +pub(crate) struct CoordinatorTask { + db: D, + coordinator: C, +} + +impl CoordinatorTask { + pub(crate) fn new(db: D, coordinator: C) -> Self { + Self { db, coordinator } + } +} + +#[async_trait::async_trait] +impl ContinuallyRan for CoordinatorTask { + async fn run_iteration(&mut self) -> Result { + let mut iterated = false; + + for session in RegisteredKeys::get(&self.db).unwrap_or(vec![]) { + loop { + let mut txn = self.db.txn(); + let Some(msg) = CosignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = BatchSignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + } + + Ok(iterated) + } +} diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs index ec9b879c2..ae62c947d 100644 --- a/processor/signers/src/db.rs +++ b/processor/signers/src/db.rs @@ -15,14 +15,8 @@ create_db! { db_channel! { SignersGlobal { - // CompletedEventualities needs to be handled by each signer, meaning we need to turn its - // effective spsc into a spmc. We do this by duplicating its message for all keys we're - // signing for. - // TODO: Populate from CompletedEventualities - CompletedEventualitiesForEachKey: (session: Session) -> [u8; 32], - - CoordinatorToTransactionSignerMessages: (session: Session) -> CoordinatorMessage, - TransactionSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + CoordinatorToCosignerMessages: (session: Session) -> CoordinatorMessage, + CosignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, CoordinatorToBatchSignerMessages: (session: Session) -> CoordinatorMessage, BatchSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, @@ -30,7 +24,7 @@ db_channel! { CoordinatorToSlashReportSignerMessages: (session: Session) -> CoordinatorMessage, SlashReportSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, - CoordinatorToCosignerMessages: (session: Session) -> CoordinatorMessage, - CosignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + CoordinatorToTransactionSignerMessages: (session: Session) -> CoordinatorMessage, + TransactionSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, } } diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index 72fe2d178..a53f22085 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -7,23 +7,42 @@ use std::collections::HashMap; use zeroize::Zeroizing; -use serai_validator_sets_primitives::Session; - -use ciphersuite::{group::GroupEncoding, Ristretto}; +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use frost::dkg::{ThresholdCore, ThresholdKeys}; +use serai_validator_sets_primitives::Session; + use serai_db::{DbTxn, Db}; -use primitives::task::TaskHandle; -use scheduler::{Transaction, SignableTransaction, TransactionsToSign}; +use messages::sign::{VariantSignId, ProcessorMessage, CoordinatorMessage}; + +use primitives::task::{Task, TaskHandle, ContinuallyRan}; +use scheduler::{Transaction, SignableTransaction, TransactionFor}; pub(crate) mod db; +mod coordinator; +use coordinator::CoordinatorTask; + mod transaction; +use transaction::TransactionTask; + +/// A connection to the Coordinator which messages can be published with. +#[async_trait::async_trait] +pub trait Coordinator: 'static + Send + Sync { + /// An error encountered when sending a message. + /// + /// This MUST be an ephemeral error. Retrying sending a message MUST eventually resolve without + /// manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// Send a `messages::sign::ProcessorMessage`. + async fn send(&mut self, message: ProcessorMessage) -> Result<(), Self::EphemeralError>; +} /// An object capable of publishing a transaction. #[async_trait::async_trait] -pub trait TransactionPublisher: 'static + Send + Sync { +pub trait TransactionPublisher: 'static + Send + Sync + Clone { /// An error encountered when publishing a transaction. /// /// This MUST be an ephemeral error. Retrying publication MUST eventually resolve without manual @@ -40,9 +59,18 @@ pub trait TransactionPublisher: 'static + Send + Sync { async fn publish(&self, tx: T) -> Result<(), Self::EphemeralError>; } +struct Tasks { + cosigner: TaskHandle, + batch: TaskHandle, + slash_report: TaskHandle, + transaction: TaskHandle, +} + /// The signers used by a processor. +#[allow(non_snake_case)] pub struct Signers { - tasks: HashMap>, + coordinator_handle: TaskHandle, + tasks: HashMap, _ST: PhantomData, } @@ -62,9 +90,57 @@ impl Signers { /// Initialize the signers. /// /// This will spawn tasks for any historically registered keys. - pub fn new(db: impl Db) -> Self { + pub fn new( + mut db: impl Db, + coordinator: impl Coordinator, + publisher: &impl TransactionPublisher>, + ) -> Self { + /* + On boot, perform any database cleanup which was queued. + + We don't do this cleanup at time of dropping the task as we'd need to wait an unbounded + amount of time for the task to stop (requiring an async task), then we'd have to drain the + channels (which would be on a distinct DB transaction and risk not occurring if we rebooted + while waiting for the task to stop). This is the easiest way to handle this. + */ + { + let mut txn = db.txn(); + for (session, external_key_bytes) in db::ToCleanup::get(&txn).unwrap_or(vec![]) { + let mut external_key_bytes = external_key_bytes.as_slice(); + let external_key = + ::read_G(&mut external_key_bytes).unwrap(); + assert!(external_key_bytes.is_empty()); + + // Drain the transactions to sign + // TransactionsToSign will be fully populated by the scheduler before retiry occurs, making + // this perfect in not leaving any pending blobs behind + while scheduler::TransactionsToSign::::try_recv(&mut txn, &external_key).is_some() {} + + // Drain the completed Eventualities + // This will be fully populated by the scanner before retiry + while scanner::CompletedEventualities::try_recv(&mut txn, &external_key).is_some() {} + + // Drain our DB channels + while db::CoordinatorToCosignerMessages::try_recv(&mut txn, session).is_some() {} + while db::CosignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToBatchSignerMessages::try_recv(&mut txn, session).is_some() {} + while db::BatchSignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, session).is_some() {} + while db::SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToTransactionSignerMessages::try_recv(&mut txn, session).is_some() {} + while db::TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + } + db::ToCleanup::del(&mut txn); + txn.commit(); + } + let mut tasks = HashMap::new(); + let (coordinator_task, coordinator_handle) = Task::new(); + tokio::spawn( + CoordinatorTask::new(db.clone(), coordinator).continually_run(coordinator_task, vec![]), + ); + for session in db::RegisteredKeys::get(&db).unwrap_or(vec![]) { let buf = db::SerializedKeys::get(&db, session).unwrap(); let mut buf = buf.as_slice(); @@ -78,10 +154,23 @@ impl Signers { .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); } - todo!("TODO") + // TODO: Batch signer, cosigner, slash report signers + + let (transaction_task, transaction_handle) = Task::new(); + tokio::spawn( + TransactionTask::<_, ST, _>::new(db.clone(), publisher.clone(), session, external_keys) + .continually_run(transaction_task, vec![coordinator_handle.clone()]), + ); + + tasks.insert(session, Tasks { + cosigner: todo!("TODO"), + batch: todo!("TODO"), + slash_report: todo!("TODO"), + transaction: transaction_handle, + }); } - Self { tasks, _ST: PhantomData } + Self { coordinator_handle, tasks, _ST: PhantomData } } /// Register a set of keys to sign with. @@ -146,82 +235,31 @@ impl Signers { let mut to_cleanup = db::ToCleanup::get(txn).unwrap_or(vec![]); to_cleanup.push((session, external_key.to_bytes().as_ref().to_vec())); db::ToCleanup::set(txn, &to_cleanup); + } - // TODO: Handle all of the following cleanup on a task - /* - // Kill the tasks - if let Some(tasks) = self.tasks.remove(&session) { - for task in tasks { - task.close().await; + /// Queue handling a message. + /// + /// This is a cheap call and able to be done inline with a higher-level loop. + pub fn queue_message(&mut self, txn: &mut impl DbTxn, message: &CoordinatorMessage) { + let sign_id = message.sign_id(); + let tasks = self.tasks.get(&sign_id.session); + match sign_id.id { + VariantSignId::Cosign(_) => { + db::CoordinatorToCosignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { tasks.cosigner.run_now(); } + } + VariantSignId::Batch(_) => { + db::CoordinatorToBatchSignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { tasks.batch.run_now(); } + } + VariantSignId::SlashReport(_) => { + db::CoordinatorToSlashReportSignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { tasks.slash_report.run_now(); } + } + VariantSignId::Transaction(_) => { + db::CoordinatorToTransactionSignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { tasks.transaction.run_now(); } } } - - // Drain the transactions to sign - // Presumably, TransactionsToSign will be fully populated before retiry occurs, making this - // perfect in not leaving any pending blobs behind - while TransactionsToSign::::try_recv(txn, external_key).is_some() {} - - // Drain our DB channels - while db::CompletedEventualitiesForEachKey::try_recv(txn, session).is_some() {} - while db::CoordinatorToTransactionSignerMessages::try_recv(txn, session).is_some() {} - while db::TransactionSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} - while db::CoordinatorToBatchSignerMessages::try_recv(txn, session).is_some() {} - while db::BatchSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} - while db::CoordinatorToSlashReportSignerMessages::try_recv(txn, session).is_some() {} - while db::SlashReportSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} - while db::CoordinatorToCosignerMessages::try_recv(txn, session).is_some() {} - while db::CosignerToCoordinatorMessages::try_recv(txn, session).is_some() {} - */ } } - -/* -// The signers used by a Processor, key-scoped. -struct KeySigners { - transaction: AttemptManager, - substrate: AttemptManager>, - cosigner: AttemptManager>, -} - -/// The signers used by a protocol. -pub struct Signers(HashMap, KeySigners>); - -impl Signers { - /// Create a new set of signers. - pub fn new(db: D) -> Self { - // TODO: Load the registered keys - // TODO: Load the transactions being signed - // TODO: Load the batches being signed - todo!("TODO") - } - - /// Register a transaction to sign. - pub fn sign_transaction(&mut self) -> Vec { - todo!("TODO") - } - /// Mark a transaction as signed. - pub fn signed_transaction(&mut self) { todo!("TODO") } - - /// Register a batch to sign. - pub fn sign_batch(&mut self, key: KeyFor, batch: Batch) -> Vec { - todo!("TODO") - } - /// Mark a batch as signed. - pub fn signed_batch(&mut self, batch: u32) { todo!("TODO") } - - /// Register a slash report to sign. - pub fn sign_slash_report(&mut self) -> Vec { - todo!("TODO") - } - /// Mark a slash report as signed. - pub fn signed_slash_report(&mut self) { todo!("TODO") } - - /// Start a cosigning protocol. - pub fn cosign(&mut self) { todo!("TODO") } - - /// Handle a message for a signing protocol. - pub fn handle(&mut self, msg: CoordinatorMessage) -> Vec { - todo!("TODO") - } -} -*/ diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index 8fdf81455..be08cec27 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -3,31 +3,28 @@ use std::{ time::{Duration, Instant}, }; -use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; +use frost::dkg::ThresholdKeys; use serai_validator_sets_primitives::Session; use serai_db::{DbTxn, Db}; +use messages::sign::VariantSignId; + use primitives::task::ContinuallyRan; -use scheduler::{Transaction, SignableTransaction, TransactionsToSign}; +use scheduler::{Transaction, SignableTransaction, TransactionFor, TransactionsToSign}; +use scanner::CompletedEventualities; use frost_attempt_manager::*; use crate::{ - db::{ - CoordinatorToTransactionSignerMessages, TransactionSignerToCoordinatorMessages, - CompletedEventualitiesForEachKey, - }, + db::{CoordinatorToTransactionSignerMessages, TransactionSignerToCoordinatorMessages}, TransactionPublisher, }; mod db; use db::*; -type TransactionFor = - <::PreprocessMachine as PreprocessMachine>::Signature; - // Fetches transactions to sign and signs them. pub(crate) struct TransactionTask< D: Db, @@ -76,7 +73,7 @@ impl> for keys in &keys { machines.push(signable_transaction.clone().sign(keys.clone())); } - attempt_manager.register(tx, machines); + attempt_manager.register(VariantSignId::Transaction(tx), machines); } Self { @@ -123,7 +120,7 @@ impl> for keys in &self.keys { machines.push(tx.clone().sign(keys.clone())); } - for msg in self.attempt_manager.register(tx.id(), machines) { + for msg in self.attempt_manager.register(VariantSignId::Transaction(tx.id()), machines) { TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); } @@ -133,28 +130,42 @@ impl> // Check for completed Eventualities (meaning we should no longer sign for these transactions) loop { let mut txn = self.db.txn(); - let Some(id) = CompletedEventualitiesForEachKey::try_recv(&mut txn, self.session) else { + let Some(id) = CompletedEventualities::try_recv(&mut txn, &self.keys[0].group_key()) else { break; }; - iterated = true; - // This may or may not be an ID this key was responsible for - if self.active_signing_protocols.remove(&id) { - // Since it was, remove this as an active signing protocol - ActiveSigningProtocols::set( - &mut txn, - self.session, - &self.active_signing_protocols.iter().copied().collect(), - ); - // Clean up the database - SerializedSignableTransactions::del(&mut txn, id); - SerializedTransactions::del(&mut txn, id); - - // We retire with a txn so we either successfully flag this Eventuality as completed, and - // won't re-register it (making this retire safe), or we don't flag it, meaning we will - // re-register it, yet that's safe as we have yet to retire it - self.attempt_manager.retire(&mut txn, id); + /* + We may have yet to register this signing protocol. + + While `TransactionsToSign` is populated before `CompletedEventualities`, we could + theoretically have `TransactionsToSign` populated with a new transaction _while iterating + over `CompletedEventualities`_, and then have `CompletedEventualities` populated. In that + edge case, we will see the completion notification before we see the transaction. + + In such a case, we break (dropping the txn, re-queueing the completion notification). On + the task's next iteration, we'll process the transaction from `TransactionsToSign` and be + able to make progress. + */ + if !self.active_signing_protocols.remove(&id) { + break; } + iterated = true; + + // Since it was, remove this as an active signing protocol + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + // Clean up the database + SerializedSignableTransactions::del(&mut txn, id); + SerializedTransactions::del(&mut txn, id); + + // We retire with a txn so we either successfully flag this Eventuality as completed, and + // won't re-register it (making this retire safe), or we don't flag it, meaning we will + // re-register it, yet that's safe as we have yet to retire it + self.attempt_manager.retire(&mut txn, VariantSignId::Transaction(id)); + txn.commit(); } @@ -178,7 +189,14 @@ impl> { let mut buf = Vec::with_capacity(256); signed_tx.write(&mut buf).unwrap(); - SerializedTransactions::set(&mut txn, id, &buf); + SerializedTransactions::set( + &mut txn, + match id { + VariantSignId::Transaction(id) => id, + _ => panic!("TransactionTask signed a non-transaction"), + }, + &buf, + ); } self From aa712067e09a9600127929709e739b525b86e9f3 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 8 Sep 2024 23:42:18 -0400 Subject: [PATCH 083/179] Replace scanner's BatchPublisher with a pair of DB channels --- processor/scanner/Cargo.toml | 2 +- processor/scanner/src/db.rs | 67 ++++++++++++++++++++++---- processor/scanner/src/lib.rs | 34 +++++-------- processor/scanner/src/report/db.rs | 30 +++++++++++- processor/scanner/src/report/mod.rs | 52 ++++++++++++-------- processor/scanner/src/scan/mod.rs | 18 +++++-- processor/scanner/src/substrate/mod.rs | 8 ++- 7 files changed, 151 insertions(+), 60 deletions(-) diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index 2a3e7e0af..e3e083296 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -35,7 +35,7 @@ tokio = { version = "1", default-features = false, features = ["rt-multi-thread" serai-db = { path = "../../common/db" } serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } -serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std"] } +serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] } serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } primitives = { package = "serai-processor-primitives", path = "../primitives" } diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index f72fa202f..f54ff8e18 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -2,11 +2,12 @@ use core::marker::PhantomData; use std::io::{self, Read, Write}; use group::GroupEncoding; + use scale::{Encode, Decode, IoReader}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db, db_channel}; -use serai_in_instructions_primitives::InInstructionWithBalance; +use serai_in_instructions_primitives::{InInstructionWithBalance, Batch}; use serai_coins_primitives::OutInstructionWithBalance; use primitives::{EncodableG, Address, ReceivedOutput}; @@ -105,6 +106,10 @@ create_db!( pub(crate) struct ScannerGlobalDb(PhantomData); impl ScannerGlobalDb { + pub(crate) fn has_any_key_been_queued(getter: &impl Get) -> bool { + ActiveKeys::get::>>(getter).is_some() + } + /// Queue a key. /// /// Keys may be queued whenever, so long as they're scheduled to activate `WINDOW_LENGTH` blocks @@ -460,15 +465,20 @@ db_channel! { } } +pub(crate) struct InInstructionData { + pub(crate) external_key_for_session_to_sign_batch: KeyFor, + pub(crate) returnable_in_instructions: Vec>, +} + pub(crate) struct ScanToReportDb(PhantomData); impl ScanToReportDb { pub(crate) fn send_in_instructions( txn: &mut impl DbTxn, block_number: u64, - returnable_in_instructions: &[Returnable], + data: &InInstructionData, ) { - let mut buf = vec![]; - for returnable_in_instruction in returnable_in_instructions { + let mut buf = data.external_key_for_session_to_sign_batch.to_bytes().as_ref().to_vec(); + for returnable_in_instruction in &data.returnable_in_instructions { returnable_in_instruction.write(&mut buf).unwrap(); } InInstructions::send( @@ -481,7 +491,7 @@ impl ScanToReportDb { pub(crate) fn recv_in_instructions( txn: &mut impl DbTxn, block_number: u64, - ) -> Vec> { + ) -> InInstructionData { let data = InInstructions::try_recv(txn, ()) .expect("receiving InInstructions for a scanned block not yet sent"); assert_eq!( @@ -490,11 +500,20 @@ impl ScanToReportDb { ); let mut buf = data.returnable_in_instructions.as_slice(); + let external_key_for_session_to_sign_batch = { + let mut external_key_for_session_to_sign_batch = + as GroupEncoding>::Repr::default(); + let key_len = external_key_for_session_to_sign_batch.as_ref().len(); + external_key_for_session_to_sign_batch.as_mut().copy_from_slice(&buf[.. key_len]); + buf = &buf[key_len ..]; + KeyFor::::from_bytes(&external_key_for_session_to_sign_batch).unwrap() + }; + let mut returnable_in_instructions = vec![]; while !buf.is_empty() { returnable_in_instructions.push(Returnable::read(&mut buf).unwrap()); } - returnable_in_instructions + InInstructionData { external_key_for_session_to_sign_batch, returnable_in_instructions } } } @@ -522,25 +541,55 @@ impl SubstrateToEventualityDb { } } -mod _completed_eventualities { +mod _public_db { + use serai_in_instructions_primitives::Batch; + use serai_db::{Get, DbTxn, create_db, db_channel}; db_channel! { ScannerPublic { + BatchToSign: (key: &[u8]) -> Batch, + AcknowledgedBatch: (key: &[u8]) -> u32, CompletedEventualities: (key: &[u8]) -> [u8; 32], } } } +/// The batches to sign and publish. +pub struct BatchToSign(PhantomData); +impl BatchToSign { + pub(crate) fn send(txn: &mut impl DbTxn, key: &K, batch: &Batch) { + _public_db::BatchToSign::send(txn, key.to_bytes().as_ref(), batch); + } + + /// Receive a batch to sign and publish. + pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option { + _public_db::BatchToSign::try_recv(txn, key.to_bytes().as_ref()) + } +} + +/// The batches which were acknowledged on-chain. +pub struct AcknowledgedBatch(PhantomData); +impl AcknowledgedBatch { + pub(crate) fn send(txn: &mut impl DbTxn, key: &K, batch: u32) { + _public_db::AcknowledgedBatch::send(txn, key.to_bytes().as_ref(), &batch); + } + + /// Receive the ID of a batch which was acknowledged. + pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option { + _public_db::AcknowledgedBatch::try_recv(txn, key.to_bytes().as_ref()) + } +} + /// The IDs of completed Eventualities found on-chain, within a finalized block. pub struct CompletedEventualities(PhantomData); impl CompletedEventualities { pub(crate) fn send(txn: &mut impl DbTxn, key: &K, id: [u8; 32]) { - _completed_eventualities::CompletedEventualities::send(txn, key.to_bytes().as_ref(), &id); + _public_db::CompletedEventualities::send(txn, key.to_bytes().as_ref(), &id); } /// Receive the ID of a completed Eventuality. pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option<[u8; 32]> { - _completed_eventualities::CompletedEventualities::try_recv(txn, key.to_bytes().as_ref()) + _public_db::CompletedEventualities::try_recv(txn, key.to_bytes().as_ref()) } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 3323c6ff3..bcd195ec5 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -10,7 +10,6 @@ use group::GroupEncoding; use serai_db::{Get, DbTxn, Db}; use serai_primitives::{NetworkId, Coin, Amount}; -use serai_in_instructions_primitives::Batch; use serai_coins_primitives::OutInstructionWithBalance; use primitives::{task::*, Address, ReceivedOutput, Block, Payment}; @@ -21,7 +20,8 @@ pub use lifetime::LifetimeStage; // Database schema definition and associated functions. mod db; -pub use db::CompletedEventualities; +use db::ScannerGlobalDb; +pub use db::{BatchToSign, AcknowledgedBatch, CompletedEventualities}; // Task to index the blockchain, ensuring we don't reorganize finalized blocks. mod index; // Scans blocks for received coins. @@ -171,24 +171,6 @@ pub type EventualityFor = <::Block as Block>::Eventuality; /// The block type for this ScannerFeed. pub type BlockFor = ::Block; -/// An object usable to publish a Batch. -// This will presumably be the Batch signer defined in `serai-processor-signers` or a test shim. -// It could also be some app-layer database for the purpose of verifying the Batches published to -// Serai. -#[async_trait::async_trait] -pub trait BatchPublisher: 'static + Send + Sync { - /// An error encountered when publishing the Batch. - /// - /// This MUST be an ephemeral error. Retrying publication MUST eventually resolve without manual - /// intervention/changing the arguments. - type EphemeralError: Debug; - - /// Publish a Batch. - /// - /// This function must be safe to call with the same Batch multiple times. - async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError>; -} - /// A return to occur. pub struct Return { address: AddressFor, @@ -351,14 +333,20 @@ impl Scanner { /// /// This will begin its execution, spawning several asynchronous tasks. pub async fn new>( - db: impl Db, + mut db: impl Db, feed: S, - batch_publisher: impl BatchPublisher, start_block: u64, + start_key: KeyFor, ) -> Self { + if !ScannerGlobalDb::::has_any_key_been_queued(&db) { + let mut txn = db.txn(); + ScannerGlobalDb::::queue_key(&mut txn, start_block, start_key); + txn.commit(); + } + let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); - let report_task = report::ReportTask::<_, S, _>::new(db.clone(), batch_publisher, start_block); + let report_task = report::ReportTask::<_, S>::new(db.clone(), start_block); let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone()); let eventuality_task = eventuality::EventualityTask::<_, _, Sch>::new(db, feed, start_block); diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs index baff66354..052397791 100644 --- a/processor/scanner/src/report/db.rs +++ b/processor/scanner/src/report/db.rs @@ -1,6 +1,8 @@ use core::marker::PhantomData; use std::io::{Read, Write}; +use group::GroupEncoding; + use scale::{Encode, Decode, IoReader}; use serai_db::{Get, DbTxn, create_db}; @@ -8,7 +10,7 @@ use serai_primitives::Balance; use primitives::Address; -use crate::{ScannerFeed, AddressFor}; +use crate::{ScannerFeed, KeyFor, AddressFor}; create_db!( ScannerReport { @@ -20,6 +22,9 @@ create_db!( // The block number which caused a batch BlockNumberForBatch: (batch: u32) -> u64, + // The external key for the session which should sign a batch + ExternalKeyForSessionToSignBatch: (batch: u32) -> Vec, + // The return addresses for the InInstructions within a Batch SerializedReturnAddresses: (batch: u32) -> Vec, } @@ -55,6 +60,29 @@ impl ReportDb { Some(block_number) } + pub(crate) fn save_external_key_for_session_to_sign_batch( + txn: &mut impl DbTxn, + id: u32, + external_key_for_session_to_sign_batch: &KeyFor, + ) { + ExternalKeyForSessionToSignBatch::set( + txn, + id, + &external_key_for_session_to_sign_batch.to_bytes().as_ref().to_vec(), + ); + } + + pub(crate) fn take_external_key_for_session_to_sign_batch( + txn: &mut impl DbTxn, + id: u32, + ) -> Option> { + ExternalKeyForSessionToSignBatch::get(txn, id).map(|key_vec| { + let mut key = as GroupEncoding>::Repr::default(); + key.as_mut().copy_from_slice(&key_vec); + KeyFor::::from_bytes(&key).unwrap() + }) + } + pub(crate) fn save_return_information( txn: &mut impl DbTxn, id: u32, diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs index ba851713b..f983d0e77 100644 --- a/processor/scanner/src/report/mod.rs +++ b/processor/scanner/src/report/mod.rs @@ -8,28 +8,35 @@ use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; use primitives::task::ContinuallyRan; use crate::{ - db::{Returnable, ScannerGlobalDb, ScanToReportDb}, + db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToReportDb, BatchToSign}, index, scan::next_to_scan_for_outputs_block, - ScannerFeed, BatchPublisher, + ScannerFeed, KeyFor, }; mod db; pub(crate) use db::ReturnInformation; use db::ReportDb; -pub(crate) fn take_return_information( +pub(crate) fn take_block_number_for_batch( txn: &mut impl DbTxn, id: u32, -) -> Option>>> { - ReportDb::::take_return_information(txn, id) +) -> Option { + ReportDb::::take_block_number_for_batch(txn, id) } -pub(crate) fn take_block_number_for_batch( +pub(crate) fn take_external_key_for_session_to_sign_batch( txn: &mut impl DbTxn, id: u32, -) -> Option { - ReportDb::::take_block_number_for_batch(txn, id) +) -> Option> { + ReportDb::::take_external_key_for_session_to_sign_batch(txn, id) +} + +pub(crate) fn take_return_information( + txn: &mut impl DbTxn, + id: u32, +) -> Option>>> { + ReportDb::::take_return_information(txn, id) } /* @@ -40,14 +47,13 @@ pub(crate) fn take_block_number_for_batch( the InInstructions for it. */ #[allow(non_snake_case)] -pub(crate) struct ReportTask { +pub(crate) struct ReportTask { db: D, - batch_publisher: B, _S: PhantomData, } -impl ReportTask { - pub(crate) fn new(mut db: D, batch_publisher: B, start_block: u64) -> Self { +impl ReportTask { + pub(crate) fn new(mut db: D, start_block: u64) -> Self { if ReportDb::::next_to_potentially_report_block(&db).is_none() { // Initialize the DB let mut txn = db.txn(); @@ -55,12 +61,12 @@ impl ReportTask { txn.commit(); } - Self { db, batch_publisher, _S: PhantomData } + Self { db, _S: PhantomData } } } #[async_trait::async_trait] -impl ContinuallyRan for ReportTask { +impl ContinuallyRan for ReportTask { async fn run_iteration(&mut self) -> Result { let highest_reportable = { // Fetch the next to scan block @@ -87,7 +93,10 @@ impl ContinuallyRan for ReportTask::recv_in_instructions(&mut txn, b); + let InInstructionData { + external_key_for_session_to_sign_batch, + returnable_in_instructions: in_instructions, + } = ScanToReportDb::::recv_in_instructions(&mut txn, b); let notable = ScannerGlobalDb::::is_block_notable(&txn, b); if !notable { assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions"); @@ -138,19 +147,20 @@ impl ContinuallyRan for ReportTask::save_external_key_for_session_to_sign_batch( + &mut txn, + batch.id, + &external_key_for_session_to_sign_batch, + ); ReportDb::::save_return_information(&mut txn, batch.id, return_information); } for batch in batches { - self - .batch_publisher - .publish_batch(batch) - .await - .map_err(|e| format!("failed to publish batch: {e:?}"))?; + BatchToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch); } } diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index 51671dc68..91c97f60f 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -13,8 +13,8 @@ use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Block}; use crate::{ lifetime::LifetimeStage, db::{ - OutputWithInInstruction, Returnable, SenderScanData, ScannerGlobalDb, ScanToReportDb, - ScanToEventualityDb, + OutputWithInInstruction, Returnable, SenderScanData, ScannerGlobalDb, InInstructionData, + ScanToReportDb, ScanToEventualityDb, }, BlockExt, ScannerFeed, AddressFor, OutputFor, Return, sort_outputs, eventuality::latest_scannable_block, @@ -166,7 +166,7 @@ impl ContinuallyRan for ScanTask { let mut costs_to_aggregate = HashMap::with_capacity(1); // Scan for each key - for key in keys { + for key in &keys { for output in block.scan_for_outputs(key.key) { assert_eq!(output.key(), key.key); @@ -339,7 +339,17 @@ impl ContinuallyRan for ScanTask { let in_instructions = in_instructions.into_iter().map(|(_id, in_instruction)| in_instruction).collect::>(); // Send the InInstructions to the report task - ScanToReportDb::::send_in_instructions(&mut txn, b, &in_instructions); + // We need to also specify which key is responsible for signing the Batch for these, which + // will always be the oldest key (as the new key signing the Batch signifies handover + // acceptance) + ScanToReportDb::::send_in_instructions( + &mut txn, + b, + &InInstructionData { + external_key_for_session_to_sign_batch: keys[0].key, + returnable_in_instructions: in_instructions, + }, + ); // Send the scan data to the eventuality task ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs index d67be9dc3..6f9cd86b8 100644 --- a/processor/scanner/src/substrate/mod.rs +++ b/processor/scanner/src/substrate/mod.rs @@ -6,7 +6,7 @@ use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance}; use primitives::task::ContinuallyRan; use crate::{ - db::{ScannerGlobalDb, SubstrateToEventualityDb}, + db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatch}, report, ScannerFeed, KeyFor, }; @@ -79,6 +79,12 @@ impl ContinuallyRan for SubstrateTask { return Ok(made_progress); }; + { + let external_key_for_session_to_sign_batch = + report::take_external_key_for_session_to_sign_batch::(&mut txn, batch_id).unwrap(); + AcknowledgedBatch::send(&mut txn, &external_key_for_session_to_sign_batch, batch_id); + } + // Mark we made progress and handle this made_progress = true; From ac106e3d2dd748b106c7ae8c1b09cbfe69129518 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 9 Sep 2024 01:01:29 -0400 Subject: [PATCH 084/179] Add BatchSignerTask Uses a wrapper around AlgorithmMachine Schnorrkel to let the message be &[]. --- Cargo.lock | 3 + processor/scanner/src/db.rs | 20 +-- processor/scanner/src/lib.rs | 2 +- processor/scanner/src/report/mod.rs | 4 +- processor/scanner/src/substrate/mod.rs | 4 +- processor/signers/Cargo.toml | 3 + processor/signers/src/batch/db.rs | 13 ++ processor/signers/src/batch/mod.rs | 180 ++++++++++++++++++++ processor/signers/src/coordinator.rs | 2 + processor/signers/src/lib.rs | 78 +++++++-- processor/signers/src/transaction/mod.rs | 17 +- processor/signers/src/wrapped_schnorrkel.rs | 86 ++++++++++ processor/src/multisigs/mod.rs | 8 - 13 files changed, 370 insertions(+), 50 deletions(-) create mode 100644 processor/signers/src/batch/db.rs create mode 100644 processor/signers/src/batch/mod.rs create mode 100644 processor/signers/src/wrapped_schnorrkel.rs delete mode 100644 processor/src/multisigs/mod.rs diff --git a/Cargo.lock b/Cargo.lock index d6b0e3def..9db0bb741 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8724,10 +8724,13 @@ dependencies = [ "async-trait", "borsh", "ciphersuite", + "frost-schnorrkel", "log", "modular-frost", "parity-scale-codec", + "rand_core", "serai-db", + "serai-in-instructions-primitives", "serai-processor-frost-attempt-manager", "serai-processor-messages", "serai-processor-primitives", diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index f54ff8e18..3dd5a2e20 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -548,36 +548,36 @@ mod _public_db { db_channel! { ScannerPublic { - BatchToSign: (key: &[u8]) -> Batch, - AcknowledgedBatch: (key: &[u8]) -> u32, + BatchesToSign: (key: &[u8]) -> Batch, + AcknowledgedBatches: (key: &[u8]) -> u32, CompletedEventualities: (key: &[u8]) -> [u8; 32], } } } /// The batches to sign and publish. -pub struct BatchToSign(PhantomData); -impl BatchToSign { +pub struct BatchesToSign(PhantomData); +impl BatchesToSign { pub(crate) fn send(txn: &mut impl DbTxn, key: &K, batch: &Batch) { - _public_db::BatchToSign::send(txn, key.to_bytes().as_ref(), batch); + _public_db::BatchesToSign::send(txn, key.to_bytes().as_ref(), batch); } /// Receive a batch to sign and publish. pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option { - _public_db::BatchToSign::try_recv(txn, key.to_bytes().as_ref()) + _public_db::BatchesToSign::try_recv(txn, key.to_bytes().as_ref()) } } /// The batches which were acknowledged on-chain. -pub struct AcknowledgedBatch(PhantomData); -impl AcknowledgedBatch { +pub struct AcknowledgedBatches(PhantomData); +impl AcknowledgedBatches { pub(crate) fn send(txn: &mut impl DbTxn, key: &K, batch: u32) { - _public_db::AcknowledgedBatch::send(txn, key.to_bytes().as_ref(), &batch); + _public_db::AcknowledgedBatches::send(txn, key.to_bytes().as_ref(), &batch); } /// Receive the ID of a batch which was acknowledged. pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option { - _public_db::AcknowledgedBatch::try_recv(txn, key.to_bytes().as_ref()) + _public_db::AcknowledgedBatches::try_recv(txn, key.to_bytes().as_ref()) } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index bcd195ec5..e5b39cdd2 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -21,7 +21,7 @@ pub use lifetime::LifetimeStage; // Database schema definition and associated functions. mod db; use db::ScannerGlobalDb; -pub use db::{BatchToSign, AcknowledgedBatch, CompletedEventualities}; +pub use db::{BatchesToSign, AcknowledgedBatches, CompletedEventualities}; // Task to index the blockchain, ensuring we don't reorganize finalized blocks. mod index; // Scans blocks for received coins. diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs index f983d0e77..309b44aaa 100644 --- a/processor/scanner/src/report/mod.rs +++ b/processor/scanner/src/report/mod.rs @@ -8,7 +8,7 @@ use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; use primitives::task::ContinuallyRan; use crate::{ - db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToReportDb, BatchToSign}, + db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToReportDb, BatchesToSign}, index, scan::next_to_scan_for_outputs_block, ScannerFeed, KeyFor, @@ -160,7 +160,7 @@ impl ContinuallyRan for ReportTask { } for batch in batches { - BatchToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch); + BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch); } } diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs index 6f9cd86b8..76961c378 100644 --- a/processor/scanner/src/substrate/mod.rs +++ b/processor/scanner/src/substrate/mod.rs @@ -6,7 +6,7 @@ use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance}; use primitives::task::ContinuallyRan; use crate::{ - db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatch}, + db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatches}, report, ScannerFeed, KeyFor, }; @@ -82,7 +82,7 @@ impl ContinuallyRan for SubstrateTask { { let external_key_for_session_to_sign_batch = report::take_external_key_for_session_to_sign_batch::(&mut txn, batch_id).unwrap(); - AcknowledgedBatch::send(&mut txn, &external_key_for_session_to_sign_batch, batch_id); + AcknowledgedBatches::send(&mut txn, &external_key_for_session_to_sign_batch, batch_id); } // Mark we made progress and handle this diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml index 3a96c0435..91192a9e3 100644 --- a/processor/signers/Cargo.toml +++ b/processor/signers/Cargo.toml @@ -21,15 +21,18 @@ workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } +rand_core = { version = "0.6", default-features = false } zeroize = { version = "1", default-features = false, features = ["std"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } +frost-schnorrkel = { path = "../../crypto/schnorrkel", default-features = false } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } +serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std"] } serai-db = { path = "../../common/db" } log = { version = "0.4", default-features = false, features = ["std"] } diff --git a/processor/signers/src/batch/db.rs b/processor/signers/src/batch/db.rs new file mode 100644 index 000000000..fec0a8946 --- /dev/null +++ b/processor/signers/src/batch/db.rs @@ -0,0 +1,13 @@ +use serai_validator_sets_primitives::Session; +use serai_in_instructions_primitives::{Batch, SignedBatch}; + +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + BatchSigner { + ActiveSigningProtocols: (session: Session) -> Vec, + Batches: (id: u32) -> Batch, + SignedBatches: (id: u32) -> SignedBatch, + LastAcknowledgedBatch: () -> u32, + } +} diff --git a/processor/signers/src/batch/mod.rs b/processor/signers/src/batch/mod.rs new file mode 100644 index 000000000..410ca3781 --- /dev/null +++ b/processor/signers/src/batch/mod.rs @@ -0,0 +1,180 @@ +use std::collections::HashSet; + +use ciphersuite::{group::GroupEncoding, Ristretto}; +use frost::dkg::ThresholdKeys; + +use serai_validator_sets_primitives::Session; +use serai_in_instructions_primitives::{SignedBatch, batch_message}; + +use serai_db::{DbTxn, Db}; + +use messages::sign::VariantSignId; + +use primitives::task::ContinuallyRan; +use scanner::{BatchesToSign, AcknowledgedBatches}; + +use frost_attempt_manager::*; + +use crate::{ + db::{CoordinatorToBatchSignerMessages, BatchSignerToCoordinatorMessages}, + WrappedSchnorrkelMachine, +}; + +mod db; +use db::*; + +// Fetches batches to sign and signs them. +pub(crate) struct BatchSignerTask { + db: D, + + session: Session, + external_key: E, + keys: Vec>, + + active_signing_protocols: HashSet, + attempt_manager: AttemptManager, +} + +impl BatchSignerTask { + pub(crate) fn new( + db: D, + session: Session, + external_key: E, + keys: Vec>, + ) -> Self { + let mut active_signing_protocols = HashSet::new(); + let mut attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a batch signer with 0 keys").params().i(), + ); + + // Re-register all active signing protocols + for id in ActiveSigningProtocols::get(&db, session).unwrap_or(vec![]) { + active_signing_protocols.insert(id); + + let batch = Batches::get(&db, id).unwrap(); + assert_eq!(batch.id, id); + + let mut machines = Vec::with_capacity(keys.len()); + for keys in &keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch))); + } + attempt_manager.register(VariantSignId::Batch(id), machines); + } + + Self { db, session, external_key, keys, active_signing_protocols, attempt_manager } + } +} + +#[async_trait::async_trait] +impl ContinuallyRan for BatchSignerTask { + async fn run_iteration(&mut self) -> Result { + let mut iterated = false; + + // Check for new batches to sign + loop { + let mut txn = self.db.txn(); + let Some(batch) = BatchesToSign::try_recv(&mut txn, &self.external_key) else { + break; + }; + iterated = true; + + // Save this to the database as a transaction to sign + self.active_signing_protocols.insert(batch.id); + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + Batches::set(&mut txn, batch.id, &batch); + + let mut machines = Vec::with_capacity(self.keys.len()); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch))); + } + for msg in self.attempt_manager.register(VariantSignId::Batch(batch.id), machines) { + BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + + txn.commit(); + } + + // Check for acknowledged Batches (meaning we should no longer sign for these Batches) + loop { + let mut txn = self.db.txn(); + let Some(id) = AcknowledgedBatches::try_recv(&mut txn, &self.external_key) else { + break; + }; + + { + let last_acknowledged = LastAcknowledgedBatch::get(&txn); + if Some(id) > last_acknowledged { + LastAcknowledgedBatch::set(&mut txn, &id); + } + } + + /* + We may have yet to register this signing protocol. + + While `BatchesToSign` is populated before `AcknowledgedBatches`, we could theoretically have + `BatchesToSign` populated with a new batch _while iterating over `AcknowledgedBatches`_, and + then have `AcknowledgedBatched` populated. In that edge case, we will see the + acknowledgement notification before we see the transaction. + + In such a case, we break (dropping the txn, re-queueing the acknowledgement notification). + On the task's next iteration, we'll process the Batch from `BatchesToSign` and be + able to make progress. + */ + if !self.active_signing_protocols.remove(&id) { + break; + } + iterated = true; + + // Since it was, remove this as an active signing protocol + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + // Clean up the database + Batches::del(&mut txn, id); + SignedBatches::del(&mut txn, id); + + // We retire with a txn so we either successfully flag this Batch as acknowledged, and + // won't re-register it (making this retire safe), or we don't flag it, meaning we will + // re-register it, yet that's safe as we have yet to retire it + self.attempt_manager.retire(&mut txn, VariantSignId::Batch(id)); + + txn.commit(); + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToBatchSignerMessages::try_recv(&mut txn, self.session) else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::Batch(id) = id else { panic!("BatchSignerTask signed a non-Batch") }; + let batch = + Batches::get(&txn, id).expect("signed a Batch we didn't save to the database"); + let signed_batch = SignedBatch { batch, signature: signature.into() }; + SignedBatches::set(&mut txn, signed_batch.batch.id, &signed_batch); + } + } + + txn.commit(); + } + + Ok(iterated) + } +} diff --git a/processor/signers/src/coordinator.rs b/processor/signers/src/coordinator.rs index 43dcc571e..c87dc4bb0 100644 --- a/processor/signers/src/coordinator.rs +++ b/processor/signers/src/coordinator.rs @@ -93,6 +93,8 @@ impl ContinuallyRan for CoordinatorTask { } } + // TODO: For max(last acknowledged batch, last published batch) onwards, publish every batch + Ok(iterated) } } diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index a53f22085..def6ef165 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -11,6 +11,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use frost::dkg::{ThresholdCore, ThresholdKeys}; use serai_validator_sets_primitives::Session; +use serai_in_instructions_primitives::SignedBatch; use serai_db::{DbTxn, Db}; @@ -19,25 +20,34 @@ use messages::sign::{VariantSignId, ProcessorMessage, CoordinatorMessage}; use primitives::task::{Task, TaskHandle, ContinuallyRan}; use scheduler::{Transaction, SignableTransaction, TransactionFor}; +mod wrapped_schnorrkel; +pub(crate) use wrapped_schnorrkel::WrappedSchnorrkelMachine; + pub(crate) mod db; mod coordinator; use coordinator::CoordinatorTask; +mod batch; +use batch::BatchSignerTask; + mod transaction; -use transaction::TransactionTask; +use transaction::TransactionSignerTask; /// A connection to the Coordinator which messages can be published with. #[async_trait::async_trait] pub trait Coordinator: 'static + Send + Sync { - /// An error encountered when sending a message. + /// An error encountered when interacting with a coordinator. /// - /// This MUST be an ephemeral error. Retrying sending a message MUST eventually resolve without + /// This MUST be an ephemeral error. Retrying an interaction MUST eventually resolve without /// manual intervention/changing the arguments. type EphemeralError: Debug; /// Send a `messages::sign::ProcessorMessage`. async fn send(&mut self, message: ProcessorMessage) -> Result<(), Self::EphemeralError>; + + /// Publish a `SignedBatch`. + async fn publish_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError>; } /// An object capable of publishing a transaction. @@ -111,13 +121,18 @@ impl Signers { ::read_G(&mut external_key_bytes).unwrap(); assert!(external_key_bytes.is_empty()); + // Drain the Batches to sign + // This will be fully populated by the scanner before retiry occurs, making this perfect + // in not leaving any pending blobs behind + while scanner::BatchesToSign::try_recv(&mut txn, &external_key).is_some() {} + // Drain the acknowledged batches to no longer sign + while scanner::AcknowledgedBatches::try_recv(&mut txn, &external_key).is_some() {} + // Drain the transactions to sign - // TransactionsToSign will be fully populated by the scheduler before retiry occurs, making - // this perfect in not leaving any pending blobs behind + // This will be fully populated by the scheduler before retiry while scheduler::TransactionsToSign::::try_recv(&mut txn, &external_key).is_some() {} // Drain the completed Eventualities - // This will be fully populated by the scanner before retiry while scanner::CompletedEventualities::try_recv(&mut txn, &external_key).is_some() {} // Drain our DB channels @@ -156,18 +171,37 @@ impl Signers { // TODO: Batch signer, cosigner, slash report signers + let (batch_task, batch_handle) = Task::new(); + tokio::spawn( + BatchSignerTask::new( + db.clone(), + session, + external_keys[0].group_key(), + substrate_keys.clone(), + ) + .continually_run(batch_task, vec![coordinator_handle.clone()]), + ); + let (transaction_task, transaction_handle) = Task::new(); tokio::spawn( - TransactionTask::<_, ST, _>::new(db.clone(), publisher.clone(), session, external_keys) - .continually_run(transaction_task, vec![coordinator_handle.clone()]), + TransactionSignerTask::<_, ST, _>::new( + db.clone(), + publisher.clone(), + session, + external_keys, + ) + .continually_run(transaction_task, vec![coordinator_handle.clone()]), ); - tasks.insert(session, Tasks { - cosigner: todo!("TODO"), - batch: todo!("TODO"), - slash_report: todo!("TODO"), - transaction: transaction_handle, - }); + tasks.insert( + session, + Tasks { + cosigner: todo!("TODO"), + batch: batch_handle, + slash_report: todo!("TODO"), + transaction: transaction_handle, + }, + ); } Self { coordinator_handle, tasks, _ST: PhantomData } @@ -246,19 +280,27 @@ impl Signers { match sign_id.id { VariantSignId::Cosign(_) => { db::CoordinatorToCosignerMessages::send(txn, sign_id.session, message); - if let Some(tasks) = tasks { tasks.cosigner.run_now(); } + if let Some(tasks) = tasks { + tasks.cosigner.run_now(); + } } VariantSignId::Batch(_) => { db::CoordinatorToBatchSignerMessages::send(txn, sign_id.session, message); - if let Some(tasks) = tasks { tasks.batch.run_now(); } + if let Some(tasks) = tasks { + tasks.batch.run_now(); + } } VariantSignId::SlashReport(_) => { db::CoordinatorToSlashReportSignerMessages::send(txn, sign_id.session, message); - if let Some(tasks) = tasks { tasks.slash_report.run_now(); } + if let Some(tasks) = tasks { + tasks.slash_report.run_now(); + } } VariantSignId::Transaction(_) => { db::CoordinatorToTransactionSignerMessages::send(txn, sign_id.session, message); - if let Some(tasks) = tasks { tasks.transaction.run_now(); } + if let Some(tasks) = tasks { + tasks.transaction.run_now(); + } } } } diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index be08cec27..9311eb32b 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -26,7 +26,7 @@ mod db; use db::*; // Fetches transactions to sign and signs them. -pub(crate) struct TransactionTask< +pub(crate) struct TransactionSignerTask< D: Db, ST: SignableTransaction, P: TransactionPublisher>, @@ -44,7 +44,7 @@ pub(crate) struct TransactionTask< } impl>> - TransactionTask + TransactionSignerTask { pub(crate) fn new( db: D, @@ -90,7 +90,7 @@ impl> #[async_trait::async_trait] impl>> ContinuallyRan - for TransactionTask + for TransactionSignerTask { async fn run_iteration(&mut self) -> Result { let mut iterated = false; @@ -193,17 +193,16 @@ impl> &mut txn, match id { VariantSignId::Transaction(id) => id, - _ => panic!("TransactionTask signed a non-transaction"), + _ => panic!("TransactionSignerTask signed a non-transaction"), }, &buf, ); } - self - .publisher - .publish(signed_tx) - .await - .map_err(|e| format!("couldn't publish transaction: {e:?}"))?; + match self.publisher.publish(signed_tx).await { + Ok(()) => {} + Err(e) => log::warn!("couldn't broadcast transaction: {e:?}"), + } } } diff --git a/processor/signers/src/wrapped_schnorrkel.rs b/processor/signers/src/wrapped_schnorrkel.rs new file mode 100644 index 000000000..d81eaa705 --- /dev/null +++ b/processor/signers/src/wrapped_schnorrkel.rs @@ -0,0 +1,86 @@ +use std::{ + collections::HashMap, + io::{self, Read}, +}; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::Ristretto; +use frost::{ + dkg::{Participant, ThresholdKeys}, + FrostError, + algorithm::Algorithm, + sign::*, +}; +use frost_schnorrkel::Schnorrkel; + +// This wraps a Schnorrkel sign machine into one with a preset message. +#[derive(Clone)] +pub(crate) struct WrappedSchnorrkelMachine(ThresholdKeys, Vec); +impl WrappedSchnorrkelMachine { + pub(crate) fn new(keys: ThresholdKeys, msg: Vec) -> Self { + Self(keys, msg) + } +} + +pub(crate) struct WrappedSchnorrkelSignMachine( + as PreprocessMachine>::SignMachine, + Vec, +); + +type Signature = as PreprocessMachine>::Signature; +impl PreprocessMachine for WrappedSchnorrkelMachine { + type Preprocess = as PreprocessMachine>::Preprocess; + type Signature = Signature; + type SignMachine = WrappedSchnorrkelSignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Preprocess>::Addendum>) + { + let WrappedSchnorrkelMachine(keys, batch) = self; + let (machine, preprocess) = + AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys).preprocess(rng); + (WrappedSchnorrkelSignMachine(machine, batch), preprocess) + } +} + +impl SignMachine for WrappedSchnorrkelSignMachine { + type Params = as SignMachine>::Params; + type Keys = as SignMachine>::Keys; + type Preprocess = + as SignMachine>::Preprocess; + type SignatureShare = + as SignMachine>::SignatureShare; + type SignatureMachine = + as SignMachine>::SignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!() + } + + fn from_cache( + _algorithm: Schnorrkel, + _keys: ThresholdKeys, + _cache: CachedPreprocess, + ) -> (Self, Self::Preprocess) { + unimplemented!() + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.0.read_preprocess(reader) + } + + fn sign( + self, + preprocesses: HashMap< + Participant, + Preprocess>::Addendum>, + >, + msg: &[u8], + ) -> Result<(Self::SignatureMachine, SignatureShare), FrostError> { + assert!(msg.is_empty()); + self.0.sign(preprocesses, &self.1) + } +} diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs deleted file mode 100644 index 1c4adabf8..000000000 --- a/processor/src/multisigs/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -#[allow(clippy::type_complexity)] -#[derive(Clone, Debug)] -pub enum MultisigEvent { - // Batches to publish - Batches(Option<(::G, ::G)>, Vec), - // Eventuality completion found on-chain - Completed(Vec, [u8; 32], N::Eventuality), -} From 336ffe2cdf6eda85d0e928d5f51f422f059aad92 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 9 Sep 2024 01:15:56 -0400 Subject: [PATCH 085/179] Have the coordinator task publish Batches --- processor/signers/src/batch/db.rs | 2 +- processor/signers/src/batch/mod.rs | 10 +++++++- processor/signers/src/coordinator/db.rs | 7 ++++++ .../{coordinator.rs => coordinator/mod.rs} | 23 ++++++++++++++++++- processor/signers/src/lib.rs | 2 +- processor/signers/src/transaction/db.rs | 2 +- 6 files changed, 41 insertions(+), 5 deletions(-) create mode 100644 processor/signers/src/coordinator/db.rs rename processor/signers/src/{coordinator.rs => coordinator/mod.rs} (75%) diff --git a/processor/signers/src/batch/db.rs b/processor/signers/src/batch/db.rs index fec0a8946..a895e0bbb 100644 --- a/processor/signers/src/batch/db.rs +++ b/processor/signers/src/batch/db.rs @@ -4,7 +4,7 @@ use serai_in_instructions_primitives::{Batch, SignedBatch}; use serai_db::{Get, DbTxn, create_db}; create_db! { - BatchSigner { + SignersBatch { ActiveSigningProtocols: (session: Session) -> Vec, Batches: (id: u32) -> Batch, SignedBatches: (id: u32) -> SignedBatch, diff --git a/processor/signers/src/batch/mod.rs b/processor/signers/src/batch/mod.rs index 410ca3781..f08fb5e29 100644 --- a/processor/signers/src/batch/mod.rs +++ b/processor/signers/src/batch/mod.rs @@ -6,7 +6,7 @@ use frost::dkg::ThresholdKeys; use serai_validator_sets_primitives::Session; use serai_in_instructions_primitives::{SignedBatch, batch_message}; -use serai_db::{DbTxn, Db}; +use serai_db::{Get, DbTxn, Db}; use messages::sign::VariantSignId; @@ -23,6 +23,14 @@ use crate::{ mod db; use db::*; +pub(crate) fn last_acknowledged_batch(getter: &impl Get) -> Option { + LastAcknowledgedBatch::get(getter) +} + +pub(crate) fn signed_batch(getter: &impl Get, id: u32) -> Option { + SignedBatches::get(getter, id) +} + // Fetches batches to sign and signs them. pub(crate) struct BatchSignerTask { db: D, diff --git a/processor/signers/src/coordinator/db.rs b/processor/signers/src/coordinator/db.rs new file mode 100644 index 000000000..c8235ede0 --- /dev/null +++ b/processor/signers/src/coordinator/db.rs @@ -0,0 +1,7 @@ +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersCoordinator { + LastPublishedBatch: () -> u32, + } +} diff --git a/processor/signers/src/coordinator.rs b/processor/signers/src/coordinator/mod.rs similarity index 75% rename from processor/signers/src/coordinator.rs rename to processor/signers/src/coordinator/mod.rs index c87dc4bb0..3255603d2 100644 --- a/processor/signers/src/coordinator.rs +++ b/processor/signers/src/coordinator/mod.rs @@ -10,6 +10,8 @@ use crate::{ Coordinator, }; +mod db; + // Fetches messages to send the coordinator and sends them. pub(crate) struct CoordinatorTask { db: D, @@ -93,7 +95,26 @@ impl ContinuallyRan for CoordinatorTask { } } - // TODO: For max(last acknowledged batch, last published batch) onwards, publish every batch + // Publish the signed Batches + { + let mut txn = self.db.txn(); + // The last acknowledged Batch may exceed the last Batch we published if we didn't sign for + // the prior Batch(es) (and accordingly didn't publish them) + let last_batch = + crate::batch::last_acknowledged_batch(&txn).max(db::LastPublishedBatch::get(&txn)); + let mut next_batch = last_batch.map_or(0, |id| id + 1); + while let Some(batch) = crate::batch::signed_batch(&txn, next_batch) { + iterated = true; + db::LastPublishedBatch::set(&mut txn, &batch.batch.id); + self + .coordinator + .publish_batch(batch) + .await + .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; + next_batch += 1; + } + txn.commit(); + } Ok(iterated) } diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index def6ef165..024badfaa 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -169,7 +169,7 @@ impl Signers { .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); } - // TODO: Batch signer, cosigner, slash report signers + // TODO: Cosigner and slash report signers let (batch_task, batch_handle) = Task::new(); tokio::spawn( diff --git a/processor/signers/src/transaction/db.rs b/processor/signers/src/transaction/db.rs index b77d38c7f..a91881e71 100644 --- a/processor/signers/src/transaction/db.rs +++ b/processor/signers/src/transaction/db.rs @@ -3,7 +3,7 @@ use serai_validator_sets_primitives::Session; use serai_db::{Get, DbTxn, create_db}; create_db! { - TransactionSigner { + SignersTransaction { ActiveSigningProtocols: (session: Session) -> Vec<[u8; 32]>, SerializedSignableTransactions: (id: [u8; 32]) -> Vec, SerializedTransactions: (id: [u8; 32]) -> Vec, From 26fb7c2b6d59687b3dcbfc296ea3771449e88535 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 9 Sep 2024 03:06:37 -0400 Subject: [PATCH 086/179] Tidy messages, publish all Batches to the coordinator Prior, we published SignedBatches, yet Batches are necessary for auditing purposes. --- processor/messages/src/lib.rs | 143 ++++++----------------- processor/scanner/src/db.rs | 31 ++++- processor/scanner/src/report/mod.rs | 3 +- processor/scanner/src/substrate/mod.rs | 15 +-- processor/signers/src/coordinator/mod.rs | 16 ++- processor/signers/src/lib.rs | 5 +- 6 files changed, 89 insertions(+), 124 deletions(-) diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index ef907f97f..4a191b686 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -46,12 +46,6 @@ pub mod key_gen { } } - impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - None - } - } - #[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { // Participated in the specified key generation protocol. @@ -133,10 +127,6 @@ pub mod sign { } impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - None - } - pub fn sign_id(&self) -> &SignId { match self { CoordinatorMessage::Preprocesses { id, .. } | @@ -160,6 +150,7 @@ pub mod sign { pub mod coordinator { use super::*; + // TODO: Why does this not simply take the block hash? pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec { const DST: &[u8] = b"Cosign"; let mut res = vec![u8::try_from(DST.len()).unwrap()]; @@ -169,36 +160,10 @@ pub mod coordinator { res } - #[derive( - Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, - )] - pub enum SubstrateSignableId { - CosigningSubstrateBlock([u8; 32]), - Batch(u32), - SlashReport, - } - - #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] - pub struct SubstrateSignId { - pub session: Session, - pub id: SubstrateSignableId, - pub attempt: u32, - } - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { - CosignSubstrateBlock { id: SubstrateSignId, block_number: u64 }, - SignSlashReport { id: SubstrateSignId, report: Vec<([u8; 32], u32)> }, - } - - impl CoordinatorMessage { - // The Coordinator will only send Batch messages once the Batch ID has been recognized - // The ID will only be recognized when the block is acknowledged by a super-majority of the - // network *and the local node* - // This synchrony obtained lets us ignore the synchrony requirement offered here - pub fn required_block(&self) -> Option { - None - } + CosignSubstrateBlock { session: Session, block_number: u64, block: [u8; 32] }, + SignSlashReport { session: Session, report: Vec<([u8; 32], u32)> }, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] @@ -209,14 +174,9 @@ pub mod coordinator { #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { - SubstrateBlockAck { block: u64, plans: Vec }, - InvalidParticipant { id: SubstrateSignId, participant: Participant }, - CosignPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> }, - // TODO: Remove BatchPreprocess? Why does this take a BlockHash here and not in its - // SubstrateSignId? - BatchPreprocess { id: SubstrateSignId, block: BlockHash, preprocesses: Vec<[u8; 64]> }, - // TODO: Make these signatures [u8; 64]? CosignedBlock { block_number: u64, block: [u8; 32], signature: Vec }, + SignedBatch { batch: SignedBatch }, + SubstrateBlockAck { block: u64, plans: Vec }, SignedSlashReport { session: Session, signature: Vec }, } } @@ -226,33 +186,23 @@ pub mod substrate { #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { - ConfirmKeyPair { - context: SubstrateContext, - session: Session, - key_pair: KeyPair, - }, - SubstrateBlock { - context: SubstrateContext, + /// Keys set on the Serai network. + SetKeys { serai_time: u64, session: Session, key_pair: KeyPair }, + /// The data from a block which acknowledged a Batch. + BlockWithBatchAcknowledgement { block: u64, + batch_id: u32, + in_instruction_succeededs: Vec, burns: Vec, - batches: Vec, + key_to_activate: Option, }, - } - - impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - let context = match self { - CoordinatorMessage::ConfirmKeyPair { context, .. } | - CoordinatorMessage::SubstrateBlock { context, .. } => context, - }; - Some(context.network_latest_finalized_block) - } + /// The data from a block which didn't acknowledge a Batch. + BlockWithoutBatchAcknowledgement { block: u64, burns: Vec }, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { Batch { batch: Batch }, - SignedBatch { batch: SignedBatch }, } } @@ -279,24 +229,6 @@ impl_from!(sign, CoordinatorMessage, Sign); impl_from!(coordinator, CoordinatorMessage, Coordinator); impl_from!(substrate, CoordinatorMessage, Substrate); -impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - let required = match self { - CoordinatorMessage::KeyGen(msg) => msg.required_block(), - CoordinatorMessage::Sign(msg) => msg.required_block(), - CoordinatorMessage::Coordinator(msg) => msg.required_block(), - CoordinatorMessage::Substrate(msg) => msg.required_block(), - }; - - // 0 is used when Serai hasn't acknowledged *any* block for this network, which also means - // there's no need to wait for the block in question - if required == Some(BlockHash([0; 32])) { - return None; - } - required - } -} - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { KeyGen(key_gen::ProcessorMessage), @@ -315,10 +247,10 @@ impl_from!(substrate, ProcessorMessage, Substrate); const COORDINATOR_UID: u8 = 0; const PROCESSOR_UID: u8 = 1; -const TYPE_KEY_GEN_UID: u8 = 2; -const TYPE_SIGN_UID: u8 = 3; -const TYPE_COORDINATOR_UID: u8 = 4; -const TYPE_SUBSTRATE_UID: u8 = 5; +const TYPE_KEY_GEN_UID: u8 = 0; +const TYPE_SIGN_UID: u8 = 1; +const TYPE_COORDINATOR_UID: u8 = 2; +const TYPE_SUBSTRATE_UID: u8 = 3; impl CoordinatorMessage { /// The intent for this message, which should be unique across the validator's entire system, @@ -359,11 +291,12 @@ impl CoordinatorMessage { } CoordinatorMessage::Coordinator(msg) => { let (sub, id) = match msg { - // Unique since this ID contains the hash of the block being cosigned - coordinator::CoordinatorMessage::CosignSubstrateBlock { id, .. } => (0, id.encode()), - // Unique since there's only one of these per session/attempt, and ID is inclusive to - // both - coordinator::CoordinatorMessage::SignSlashReport { id, .. } => (1, id.encode()), + // We only cosign a block once, and Reattempt is a separate message + coordinator::CoordinatorMessage::CosignSubstrateBlock { block_number, .. } => { + (0, block_number.encode()) + } + // We only sign one slash report, and Reattempt is a separate message + coordinator::CoordinatorMessage::SignSlashReport { session, .. } => (1, session.encode()), }; let mut res = vec![COORDINATOR_UID, TYPE_COORDINATOR_UID, sub]; @@ -372,9 +305,13 @@ impl CoordinatorMessage { } CoordinatorMessage::Substrate(msg) => { let (sub, id) = match msg { - // Unique since there's only one key pair for a session - substrate::CoordinatorMessage::ConfirmKeyPair { session, .. } => (0, session.encode()), - substrate::CoordinatorMessage::SubstrateBlock { block, .. } => (1, block.encode()), + substrate::CoordinatorMessage::SetKeys { session, .. } => (0, session.encode()), + substrate::CoordinatorMessage::BlockWithBatchAcknowledgement { block, .. } => { + (1, block.encode()) + } + substrate::CoordinatorMessage::BlockWithoutBatchAcknowledgement { block, .. } => { + (2, block.encode()) + } }; let mut res = vec![COORDINATOR_UID, TYPE_SUBSTRATE_UID, sub]; @@ -430,14 +367,10 @@ impl ProcessorMessage { } ProcessorMessage::Coordinator(msg) => { let (sub, id) = match msg { - coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (0, block.encode()), - // Unique since SubstrateSignId - coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()), - coordinator::ProcessorMessage::CosignPreprocess { id, .. } => (2, id.encode()), - coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (3, id.encode()), - // Unique since only one instance of a signature matters - coordinator::ProcessorMessage::CosignedBlock { block, .. } => (4, block.encode()), - coordinator::ProcessorMessage::SignedSlashReport { .. } => (5, vec![]), + coordinator::ProcessorMessage::CosignedBlock { block, .. } => (0, block.encode()), + coordinator::ProcessorMessage::SignedBatch { batch, .. } => (1, batch.batch.id.encode()), + coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (2, block.encode()), + coordinator::ProcessorMessage::SignedSlashReport { session, .. } => (3, session.encode()), }; let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub]; @@ -446,11 +379,7 @@ impl ProcessorMessage { } ProcessorMessage::Substrate(msg) => { let (sub, id) = match msg { - // Unique since network and ID binding - substrate::ProcessorMessage::Batch { batch } => (0, (batch.network, batch.id).encode()), - substrate::ProcessorMessage::SignedBatch { batch, .. } => { - (1, (batch.batch.network, batch.batch.id).encode()) - } + substrate::ProcessorMessage::Batch { batch } => (0, batch.id.encode()), }; let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub]; diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 3dd5a2e20..52a364193 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -525,12 +525,19 @@ db_channel! { pub(crate) struct SubstrateToEventualityDb; impl SubstrateToEventualityDb { - pub(crate) fn send_burns( + pub(crate) fn send_burns( txn: &mut impl DbTxn, acknowledged_block: u64, - burns: &Vec, + burns: Vec, ) { - Burns::send(txn, acknowledged_block, burns); + // Drop burns less than the dust + let burns = burns + .into_iter() + .filter(|burn| burn.balance.amount.0 >= S::dust(burn.balance.coin).0) + .collect::>(); + if !burns.is_empty() { + Burns::send(txn, acknowledged_block, &burns); + } } pub(crate) fn try_recv_burns( @@ -548,6 +555,7 @@ mod _public_db { db_channel! { ScannerPublic { + Batches: (empty_key: ()) -> Batch, BatchesToSign: (key: &[u8]) -> Batch, AcknowledgedBatches: (key: &[u8]) -> u32, CompletedEventualities: (key: &[u8]) -> [u8; 32], @@ -555,7 +563,24 @@ mod _public_db { } } +/// The batches to publish. +/// +/// This is used for auditing the Batches published to Serai. +pub struct Batches; +impl Batches { + pub(crate) fn send(txn: &mut impl DbTxn, batch: &Batch) { + _public_db::Batches::send(txn, (), batch); + } + + /// Receive a batch to publish. + pub fn try_recv(txn: &mut impl DbTxn) -> Option { + _public_db::Batches::try_recv(txn, ()) + } +} + /// The batches to sign and publish. +/// +/// This is used for publishing Batches onto Serai. pub struct BatchesToSign(PhantomData); impl BatchesToSign { pub(crate) fn send(txn: &mut impl DbTxn, key: &K, batch: &Batch) { diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs index 309b44aaa..5fd2c7eb7 100644 --- a/processor/scanner/src/report/mod.rs +++ b/processor/scanner/src/report/mod.rs @@ -8,7 +8,7 @@ use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; use primitives::task::ContinuallyRan; use crate::{ - db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToReportDb, BatchesToSign}, + db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToReportDb, Batches, BatchesToSign}, index, scan::next_to_scan_for_outputs_block, ScannerFeed, KeyFor, @@ -160,6 +160,7 @@ impl ContinuallyRan for ReportTask { } for batch in batches { + Batches::send(&mut txn, &batch); BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch); } } diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs index 76961c378..fc97daf33 100644 --- a/processor/scanner/src/substrate/mod.rs +++ b/processor/scanner/src/substrate/mod.rs @@ -144,16 +144,9 @@ impl ContinuallyRan for SubstrateTask { } } - // Drop burns less than the dust - let burns = burns - .into_iter() - .filter(|burn| burn.balance.amount.0 >= S::dust(burn.balance.coin).0) - .collect::>(); - if !burns.is_empty() { - // We send these Burns as stemming from this block we just acknowledged - // This causes them to be acted on after we accumulate the outputs from this block - SubstrateToEventualityDb::send_burns(&mut txn, block_number, &burns); - } + // We send these Burns as stemming from this block we just acknowledged + // This causes them to be acted on after we accumulate the outputs from this block + SubstrateToEventualityDb::send_burns::(&mut txn, block_number, burns); } Action::QueueBurns(burns) => { @@ -163,7 +156,7 @@ impl ContinuallyRan for SubstrateTask { let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(&txn) .expect("queueing Burns yet never acknowledged a block"); - SubstrateToEventualityDb::send_burns(&mut txn, queue_as_of, &burns); + SubstrateToEventualityDb::send_burns::(&mut txn, queue_as_of, burns); } } diff --git a/processor/signers/src/coordinator/mod.rs b/processor/signers/src/coordinator/mod.rs index 3255603d2..77cdef591 100644 --- a/processor/signers/src/coordinator/mod.rs +++ b/processor/signers/src/coordinator/mod.rs @@ -95,6 +95,20 @@ impl ContinuallyRan for CoordinatorTask { } } + // Publish the Batches + { + let mut txn = self.db.txn(); + while let Some(batch) = scanner::Batches::try_recv(&mut txn) { + iterated = true; + self + .coordinator + .publish_batch(batch) + .await + .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; + } + txn.commit(); + } + // Publish the signed Batches { let mut txn = self.db.txn(); @@ -108,7 +122,7 @@ impl ContinuallyRan for CoordinatorTask { db::LastPublishedBatch::set(&mut txn, &batch.batch.id); self .coordinator - .publish_batch(batch) + .publish_signed_batch(batch) .await .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; next_batch += 1; diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index 024badfaa..36e2db2e5 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -46,8 +46,11 @@ pub trait Coordinator: 'static + Send + Sync { /// Send a `messages::sign::ProcessorMessage`. async fn send(&mut self, message: ProcessorMessage) -> Result<(), Self::EphemeralError>; + /// Publish a `Batch`. + async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError>; + /// Publish a `SignedBatch`. - async fn publish_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError>; + async fn publish_signed_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError>; } /// An object capable of publishing a transaction. From 4963735709cacf2b5494fdd290203226c7c11ba4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 9 Sep 2024 03:23:55 -0400 Subject: [PATCH 087/179] Strongly type SlashReport, populate cosign/slash report tasks with work --- processor/messages/src/lib.rs | 4 +- processor/signers/src/db.rs | 5 ++- processor/signers/src/lib.rs | 41 ++++++++++++++++++- .../validator-sets/primitives/src/lib.rs | 20 ++++++++- 4 files changed, 64 insertions(+), 6 deletions(-) diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 4a191b686..dc7f2939a 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -9,7 +9,7 @@ use dkg::Participant; use serai_primitives::BlockHash; use in_instructions_primitives::{Batch, SignedBatch}; use coins_primitives::OutInstructionWithBalance; -use validator_sets_primitives::{Session, KeyPair}; +use validator_sets_primitives::{Session, KeyPair, Slash}; #[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct SubstrateContext { @@ -163,7 +163,7 @@ pub mod coordinator { #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { CosignSubstrateBlock { session: Session, block_number: u64, block: [u8; 32] }, - SignSlashReport { session: Session, report: Vec<([u8; 32], u32)> }, + SignSlashReport { session: Session, report: Vec }, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs index ae62c947d..668946212 100644 --- a/processor/signers/src/db.rs +++ b/processor/signers/src/db.rs @@ -1,4 +1,4 @@ -use serai_validator_sets_primitives::Session; +use serai_validator_sets_primitives::{Session, Slash}; use serai_db::{Get, DbTxn, create_db, db_channel}; @@ -15,6 +15,9 @@ create_db! { db_channel! { SignersGlobal { + Cosign: (session: Session) -> (u64, [u8; 32]), + SlashReport: (session: Session) -> Vec, + CoordinatorToCosignerMessages: (session: Session) -> CoordinatorMessage, CosignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index 36e2db2e5..de456296c 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -10,7 +10,7 @@ use zeroize::Zeroizing; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use frost::dkg::{ThresholdCore, ThresholdKeys}; -use serai_validator_sets_primitives::Session; +use serai_validator_sets_primitives::{Session, Slash}; use serai_in_instructions_primitives::SignedBatch; use serai_db::{DbTxn, Db}; @@ -139,6 +139,8 @@ impl Signers { while scanner::CompletedEventualities::try_recv(&mut txn, &external_key).is_some() {} // Drain our DB channels + while db::Cosign::try_recv(&mut txn, session).is_some() {} + while db::SlashReport::try_recv(&mut txn, session).is_some() {} while db::CoordinatorToCosignerMessages::try_recv(&mut txn, session).is_some() {} while db::CosignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} while db::CoordinatorToBatchSignerMessages::try_recv(&mut txn, session).is_some() {} @@ -276,7 +278,7 @@ impl Signers { /// Queue handling a message. /// - /// This is a cheap call and able to be done inline with a higher-level loop. + /// This is a cheap call and able to be done inline from a higher-level loop. pub fn queue_message(&mut self, txn: &mut impl DbTxn, message: &CoordinatorMessage) { let sign_id = message.sign_id(); let tasks = self.tasks.get(&sign_id.session); @@ -307,4 +309,39 @@ impl Signers { } } } + + /// Cosign a block. + /// + /// This is a cheap call and able to be done inline from a higher-level loop. + pub fn cosign_block( + &mut self, + mut txn: impl DbTxn, + session: Session, + block_number: u64, + block: [u8; 32], + ) { + db::Cosign::send(&mut txn, session, &(block_number, block)); + txn.commit(); + + if let Some(tasks) = self.tasks.get(&session) { + tasks.cosign.run_now(); + } + } + + /// Sign a slash report. + /// + /// This is a cheap call and able to be done inline from a higher-level loop. + pub fn sign_slash_report( + &mut self, + mut txn: impl DbTxn, + session: Session, + slash_report: Vec, + ) { + db::SlashReport::send(&mut txn, session, &slash_report); + txn.commit(); + + if let Some(tasks) = self.tasks.get(&session) { + tasks.slash_report.run_now(); + } + } } diff --git a/substrate/validator-sets/primitives/src/lib.rs b/substrate/validator-sets/primitives/src/lib.rs index 90d58c37c..341d211ff 100644 --- a/substrate/validator-sets/primitives/src/lib.rs +++ b/substrate/validator-sets/primitives/src/lib.rs @@ -103,7 +103,25 @@ pub fn set_keys_message(set: &ValidatorSet, key_pair: &KeyPair) -> Vec { (b"ValidatorSets-set_keys", set, key_pair).encode() } -pub fn report_slashes_message(set: &ValidatorSet, slashes: &[(Public, u32)]) -> Vec { +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Slash { + #[cfg_attr( + feature = "borsh", + borsh( + serialize_with = "serai_primitives::borsh_serialize_public", + deserialize_with = "serai_primitives::borsh_deserialize_public" + ) + )] + key: Public, + points: u32, +} +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SlashReport(pub BoundedVec>); + +pub fn report_slashes_message(set: &ValidatorSet, slashes: &SlashReport) -> Vec { (b"ValidatorSets-report_slashes", set, slashes).encode() } From d4ff05d375970200ea0625a96bd610f259bddb36 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 9 Sep 2024 04:18:54 -0400 Subject: [PATCH 088/179] SlashReport signing and signature publication --- Cargo.lock | 1 + processor/messages/src/lib.rs | 13 ++- processor/scanner/src/lib.rs | 2 +- processor/signers/Cargo.toml | 1 + processor/signers/src/coordinator/mod.rs | 30 ++++-- processor/signers/src/db.rs | 1 + processor/signers/src/lib.rs | 66 +++++++++---- processor/signers/src/slash_report.rs | 120 +++++++++++++++++++++++ 8 files changed, 200 insertions(+), 34 deletions(-) create mode 100644 processor/signers/src/slash_report.rs diff --git a/Cargo.lock b/Cargo.lock index 9db0bb741..81e3d1dec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8731,6 +8731,7 @@ dependencies = [ "rand_core", "serai-db", "serai-in-instructions-primitives", + "serai-primitives", "serai-processor-frost-attempt-manager", "serai-processor-messages", "serai-processor-primitives", diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index dc7f2939a..d9534293e 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -7,9 +7,9 @@ use borsh::{BorshSerialize, BorshDeserialize}; use dkg::Participant; use serai_primitives::BlockHash; -use in_instructions_primitives::{Batch, SignedBatch}; -use coins_primitives::OutInstructionWithBalance; use validator_sets_primitives::{Session, KeyPair, Slash}; +use coins_primitives::OutInstructionWithBalance; +use in_instructions_primitives::{Batch, SignedBatch}; #[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct SubstrateContext { @@ -84,7 +84,7 @@ pub mod sign { pub enum VariantSignId { Cosign([u8; 32]), Batch(u32), - SlashReport([u8; 32]), + SlashReport(Session), Transaction([u8; 32]), } impl fmt::Debug for VariantSignId { @@ -94,10 +94,9 @@ pub mod sign { f.debug_struct("VariantSignId::Cosign").field("0", &hex::encode(cosign)).finish() } Self::Batch(batch) => f.debug_struct("VariantSignId::Batch").field("0", &batch).finish(), - Self::SlashReport(slash_report) => f - .debug_struct("VariantSignId::SlashReport") - .field("0", &hex::encode(slash_report)) - .finish(), + Self::SlashReport(session) => { + f.debug_struct("VariantSignId::SlashReport").field("0", &session).finish() + } Self::Transaction(tx) => { f.debug_struct("VariantSignId::Transaction").field("0", &hex::encode(tx)).finish() } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index e5b39cdd2..5919ff7ea 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -21,7 +21,7 @@ pub use lifetime::LifetimeStage; // Database schema definition and associated functions. mod db; use db::ScannerGlobalDb; -pub use db::{BatchesToSign, AcknowledgedBatches, CompletedEventualities}; +pub use db::{Batches, BatchesToSign, AcknowledgedBatches, CompletedEventualities}; // Task to index the blockchain, ensuring we don't reorganize finalized blocks. mod index; // Scans blocks for received coins. diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml index 91192a9e3..7b7ef0980 100644 --- a/processor/signers/Cargo.toml +++ b/processor/signers/Cargo.toml @@ -31,6 +31,7 @@ frost-schnorrkel = { path = "../../crypto/schnorrkel", default-features = false scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std"] } diff --git a/processor/signers/src/coordinator/mod.rs b/processor/signers/src/coordinator/mod.rs index 77cdef591..0b1ee4677 100644 --- a/processor/signers/src/coordinator/mod.rs +++ b/processor/signers/src/coordinator/mod.rs @@ -1,14 +1,9 @@ +use scale::Decode; use serai_db::{DbTxn, Db}; use primitives::task::ContinuallyRan; -use crate::{ - db::{ - RegisteredKeys, CosignerToCoordinatorMessages, BatchSignerToCoordinatorMessages, - SlashReportSignerToCoordinatorMessages, TransactionSignerToCoordinatorMessages, - }, - Coordinator, -}; +use crate::{db::*, Coordinator}; mod db; @@ -30,6 +25,7 @@ impl ContinuallyRan for CoordinatorTask { let mut iterated = false; for session in RegisteredKeys::get(&self.db).unwrap_or(vec![]) { + // Publish the messages generated by this key's signers loop { let mut txn = self.db.txn(); let Some(msg) = CosignerToCoordinatorMessages::try_recv(&mut txn, session) else { @@ -93,6 +89,26 @@ impl ContinuallyRan for CoordinatorTask { txn.commit(); } + + // If this session signed its slash report, publish its signature + { + let mut txn = self.db.txn(); + if let Some(slash_report_signature) = SlashReportSignature::try_recv(&mut txn, session) { + iterated = true; + + self + .coordinator + .publish_slash_report_signature( + <_>::decode(&mut slash_report_signature.as_slice()).unwrap(), + ) + .await + .map_err(|e| { + format!("couldn't send slash report signature to the coordinator: {e:?}") + })?; + + txn.commit(); + } + } } // Publish the Batches diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs index 668946212..ea022fcac 100644 --- a/processor/signers/src/db.rs +++ b/processor/signers/src/db.rs @@ -17,6 +17,7 @@ db_channel! { SignersGlobal { Cosign: (session: Session) -> (u64, [u8; 32]), SlashReport: (session: Session) -> Vec, + SlashReportSignature: (session: Session) -> Vec, CoordinatorToCosignerMessages: (session: Session) -> CoordinatorMessage, CosignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index de456296c..cc40ce257 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -10,8 +10,9 @@ use zeroize::Zeroizing; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use frost::dkg::{ThresholdCore, ThresholdKeys}; +use serai_primitives::Signature; use serai_validator_sets_primitives::{Session, Slash}; -use serai_in_instructions_primitives::SignedBatch; +use serai_in_instructions_primitives::{Batch, SignedBatch}; use serai_db::{DbTxn, Db}; @@ -19,6 +20,7 @@ use messages::sign::{VariantSignId, ProcessorMessage, CoordinatorMessage}; use primitives::task::{Task, TaskHandle, ContinuallyRan}; use scheduler::{Transaction, SignableTransaction, TransactionFor}; +use scanner::{ScannerFeed, Scheduler}; mod wrapped_schnorrkel; pub(crate) use wrapped_schnorrkel::WrappedSchnorrkelMachine; @@ -31,6 +33,9 @@ use coordinator::CoordinatorTask; mod batch; use batch::BatchSignerTask; +mod slash_report; +use slash_report::SlashReportSignerTask; + mod transaction; use transaction::TransactionSignerTask; @@ -51,6 +56,12 @@ pub trait Coordinator: 'static + Send + Sync { /// Publish a `SignedBatch`. async fn publish_signed_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError>; + + /// Publish a slash report's signature. + async fn publish_slash_report_signature( + &mut self, + signature: Signature, + ) -> Result<(), Self::EphemeralError>; } /// An object capable of publishing a transaction. @@ -81,12 +92,17 @@ struct Tasks { /// The signers used by a processor. #[allow(non_snake_case)] -pub struct Signers { +pub struct Signers> { coordinator_handle: TaskHandle, tasks: HashMap, - _ST: PhantomData, + _Sch: PhantomData, + _S: PhantomData, } +type CiphersuiteFor = + <>::SignableTransaction as SignableTransaction>::Ciphersuite; +type SignableTransactionFor = >::SignableTransaction; + /* This is completely outside of consensus, so the worst that can happen is: @@ -99,14 +115,14 @@ pub struct Signers { completion comes in *before* we registered a key, the signer will hold the signing protocol in memory until the session is retired entirely. */ -impl Signers { +impl> Signers { /// Initialize the signers. /// /// This will spawn tasks for any historically registered keys. pub fn new( mut db: impl Db, coordinator: impl Coordinator, - publisher: &impl TransactionPublisher>, + publisher: &impl TransactionPublisher>>, ) -> Self { /* On boot, perform any database cleanup which was queued. @@ -120,8 +136,7 @@ impl Signers { let mut txn = db.txn(); for (session, external_key_bytes) in db::ToCleanup::get(&txn).unwrap_or(vec![]) { let mut external_key_bytes = external_key_bytes.as_slice(); - let external_key = - ::read_G(&mut external_key_bytes).unwrap(); + let external_key = CiphersuiteFor::::read_G(&mut external_key_bytes).unwrap(); assert!(external_key_bytes.is_empty()); // Drain the Batches to sign @@ -133,7 +148,12 @@ impl Signers { // Drain the transactions to sign // This will be fully populated by the scheduler before retiry - while scheduler::TransactionsToSign::::try_recv(&mut txn, &external_key).is_some() {} + while scheduler::TransactionsToSign::>::try_recv( + &mut txn, + &external_key, + ) + .is_some() + {} // Drain the completed Eventualities while scanner::CompletedEventualities::try_recv(&mut txn, &external_key).is_some() {} @@ -170,11 +190,12 @@ impl Signers { while !buf.is_empty() { substrate_keys .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); - external_keys - .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); + external_keys.push(ThresholdKeys::from( + ThresholdCore::>::read(&mut buf).unwrap(), + )); } - // TODO: Cosigner and slash report signers + // TODO: Cosigner let (batch_task, batch_handle) = Task::new(); tokio::spawn( @@ -187,9 +208,15 @@ impl Signers { .continually_run(batch_task, vec![coordinator_handle.clone()]), ); + let (slash_report_task, slash_report_handle) = Task::new(); + tokio::spawn( + SlashReportSignerTask::<_, S>::new(db.clone(), session, substrate_keys.clone()) + .continually_run(slash_report_task, vec![coordinator_handle.clone()]), + ); + let (transaction_task, transaction_handle) = Task::new(); tokio::spawn( - TransactionSignerTask::<_, ST, _>::new( + TransactionSignerTask::<_, SignableTransactionFor, _>::new( db.clone(), publisher.clone(), session, @@ -203,13 +230,13 @@ impl Signers { Tasks { cosigner: todo!("TODO"), batch: batch_handle, - slash_report: todo!("TODO"), + slash_report: slash_report_handle, transaction: transaction_handle, }, ); } - Self { coordinator_handle, tasks, _ST: PhantomData } + Self { coordinator_handle, tasks, _Sch: PhantomData, _S: PhantomData } } /// Register a set of keys to sign with. @@ -220,7 +247,7 @@ impl Signers { txn: &mut impl DbTxn, session: Session, substrate_keys: Vec>, - network_keys: Vec>, + network_keys: Vec>>, ) { // Don't register already retired keys if Some(session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { @@ -246,7 +273,8 @@ impl Signers { /// Retire the signers for a session. /// /// This MUST be called in order, for every session (even if we didn't register keys for this - /// session). + /// session). This MUST only be called after slash report publication, or after that process + /// times out (not once the key is done with regards to the external network). pub fn retire_session( &mut self, txn: &mut impl DbTxn, @@ -324,7 +352,7 @@ impl Signers { txn.commit(); if let Some(tasks) = self.tasks.get(&session) { - tasks.cosign.run_now(); + tasks.cosigner.run_now(); } } @@ -335,9 +363,9 @@ impl Signers { &mut self, mut txn: impl DbTxn, session: Session, - slash_report: Vec, + slash_report: &Vec, ) { - db::SlashReport::send(&mut txn, session, &slash_report); + db::SlashReport::send(&mut txn, session, slash_report); txn.commit(); if let Some(tasks) = self.tasks.get(&session) { diff --git a/processor/signers/src/slash_report.rs b/processor/signers/src/slash_report.rs new file mode 100644 index 000000000..bdb6cdba6 --- /dev/null +++ b/processor/signers/src/slash_report.rs @@ -0,0 +1,120 @@ +use core::marker::PhantomData; + +use ciphersuite::Ristretto; +use frost::dkg::ThresholdKeys; + +use scale::Encode; +use serai_primitives::Signature; +use serai_validator_sets_primitives::{ + Session, ValidatorSet, SlashReport as SlashReportStruct, report_slashes_message, +}; + +use serai_db::{DbTxn, Db}; + +use messages::sign::VariantSignId; + +use primitives::task::ContinuallyRan; +use scanner::ScannerFeed; + +use frost_attempt_manager::*; + +use crate::{ + db::{ + SlashReport, SlashReportSignature, CoordinatorToSlashReportSignerMessages, + SlashReportSignerToCoordinatorMessages, + }, + WrappedSchnorrkelMachine, +}; + +// Fetches slash_reportes to sign and signs them. +#[allow(non_snake_case)] +pub(crate) struct SlashReportSignerTask { + db: D, + _S: PhantomData, + + session: Session, + keys: Vec>, + + has_slash_report: bool, + attempt_manager: AttemptManager, +} + +impl SlashReportSignerTask { + pub(crate) fn new(db: D, session: Session, keys: Vec>) -> Self { + let attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a slash_report signer with 0 keys").params().i(), + ); + + Self { db, _S: PhantomData, session, keys, has_slash_report: false, attempt_manager } + } +} + +#[async_trait::async_trait] +impl ContinuallyRan for SlashReportSignerTask { + async fn run_iteration(&mut self) -> Result { + let mut iterated = false; + + // Check for the slash report to sign + if !self.has_slash_report { + let mut txn = self.db.txn(); + let Some(slash_report) = SlashReport::try_recv(&mut txn, self.session) else { + return Ok(false); + }; + // We only commit this upon successfully signing this slash report + drop(txn); + iterated = true; + + self.has_slash_report = true; + + let mut machines = Vec::with_capacity(self.keys.len()); + { + let message = report_slashes_message( + &ValidatorSet { network: S::NETWORK, session: self.session }, + &SlashReportStruct(slash_report.try_into().unwrap()), + ); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone())); + } + } + let mut txn = self.db.txn(); + for msg in self.attempt_manager.register(VariantSignId::SlashReport(self.session), machines) { + SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + txn.commit(); + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, self.session) + else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::SlashReport(session) = id else { + panic!("SlashReportSignerTask signed a non-SlashReport") + }; + assert_eq!(session, self.session); + // Drain the channel + SlashReport::try_recv(&mut txn, self.session).unwrap(); + // Send the signature + SlashReportSignature::send(&mut txn, session, &Signature::from(signature).encode()); + } + } + + txn.commit(); + } + + Ok(iterated) + } +} From 9669a89f4508a452235d8b6b6bdfc55ebdc01af5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 9 Sep 2024 16:20:04 -0400 Subject: [PATCH 089/179] Add CosignerTask to signers, completing it --- processor/messages/src/lib.rs | 4 +- processor/signers/src/coordinator/mod.rs | 15 +++ processor/signers/src/cosign/db.rs | 9 ++ processor/signers/src/cosign/mod.rs | 122 ++++++++++++++++++ processor/signers/src/db.rs | 5 +- processor/signers/src/lib.rs | 151 ++++++++++++++++------- processor/signers/src/slash_report.rs | 4 +- 7 files changed, 259 insertions(+), 51 deletions(-) create mode 100644 processor/signers/src/cosign/db.rs create mode 100644 processor/signers/src/cosign/mod.rs diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index d9534293e..998c7cea8 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -82,7 +82,7 @@ pub mod sign { #[derive(Clone, Copy, PartialEq, Eq, Hash, Encode, Decode, BorshSerialize, BorshDeserialize)] pub enum VariantSignId { - Cosign([u8; 32]), + Cosign(u64), Batch(u32), SlashReport(Session), Transaction([u8; 32]), @@ -91,7 +91,7 @@ pub mod sign { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match self { Self::Cosign(cosign) => { - f.debug_struct("VariantSignId::Cosign").field("0", &hex::encode(cosign)).finish() + f.debug_struct("VariantSignId::Cosign").field("0", &cosign).finish() } Self::Batch(batch) => f.debug_struct("VariantSignId::Batch").field("0", &batch).finish(), Self::SlashReport(session) => { diff --git a/processor/signers/src/coordinator/mod.rs b/processor/signers/src/coordinator/mod.rs index 0b1ee4677..a3163922e 100644 --- a/processor/signers/src/coordinator/mod.rs +++ b/processor/signers/src/coordinator/mod.rs @@ -90,6 +90,21 @@ impl ContinuallyRan for CoordinatorTask { txn.commit(); } + // Publish the cosigns from this session + { + let mut txn = self.db.txn(); + while let Some(((block_number, block_id), signature)) = Cosign::try_recv(&mut txn, session) + { + iterated = true; + self + .coordinator + .publish_cosign(block_number, block_id, <_>::decode(&mut signature.as_slice()).unwrap()) + .await + .map_err(|e| format!("couldn't publish Cosign: {e:?}"))?; + } + txn.commit(); + } + // If this session signed its slash report, publish its signature { let mut txn = self.db.txn(); diff --git a/processor/signers/src/cosign/db.rs b/processor/signers/src/cosign/db.rs new file mode 100644 index 000000000..01a42446a --- /dev/null +++ b/processor/signers/src/cosign/db.rs @@ -0,0 +1,9 @@ +use serai_validator_sets_primitives::Session; + +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersCosigner { + LatestCosigned: (session: Session) -> u64, + } +} diff --git a/processor/signers/src/cosign/mod.rs b/processor/signers/src/cosign/mod.rs new file mode 100644 index 000000000..41db80504 --- /dev/null +++ b/processor/signers/src/cosign/mod.rs @@ -0,0 +1,122 @@ +use ciphersuite::Ristretto; +use frost::dkg::ThresholdKeys; + +use scale::Encode; +use serai_primitives::Signature; +use serai_validator_sets_primitives::Session; + +use serai_db::{DbTxn, Db}; + +use messages::{sign::VariantSignId, coordinator::cosign_block_msg}; + +use primitives::task::ContinuallyRan; + +use frost_attempt_manager::*; + +use crate::{ + db::{ToCosign, Cosign, CoordinatorToCosignerMessages, CosignerToCoordinatorMessages}, + WrappedSchnorrkelMachine, +}; + +mod db; +use db::LatestCosigned; + +/// Fetches the latest cosign information and works on it. +/// +/// Only the latest cosign attempt is kept. We don't work on historical attempts as later cosigns +/// supersede them. +#[allow(non_snake_case)] +pub(crate) struct CosignerTask { + db: D, + + session: Session, + keys: Vec>, + + current_cosign: Option<(u64, [u8; 32])>, + attempt_manager: AttemptManager, +} + +impl CosignerTask { + pub(crate) fn new(db: D, session: Session, keys: Vec>) -> Self { + let attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a cosigner with 0 keys").params().i(), + ); + + Self { db, session, keys, current_cosign: None, attempt_manager } + } +} + +#[async_trait::async_trait] +impl ContinuallyRan for CosignerTask { + async fn run_iteration(&mut self) -> Result { + let mut iterated = false; + + // Check the cosign to work on + { + let mut txn = self.db.txn(); + if let Some(cosign) = ToCosign::get(&txn, self.session) { + // If this wasn't already signed for... + if LatestCosigned::get(&txn, self.session) < Some(cosign.0) { + // If this isn't the cosign we're currently working on, meaning it's fresh + if self.current_cosign != Some(cosign) { + // Retire the current cosign + if let Some(current_cosign) = self.current_cosign { + assert!(current_cosign.0 < cosign.0); + self.attempt_manager.retire(&mut txn, VariantSignId::Cosign(current_cosign.0)); + } + + // Set the cosign being worked on + self.current_cosign = Some(cosign); + + let mut machines = Vec::with_capacity(self.keys.len()); + { + let message = cosign_block_msg(cosign.0, cosign.1); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone())); + } + } + for msg in self.attempt_manager.register(VariantSignId::Cosign(cosign.0), machines) { + CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + + txn.commit(); + } + } + } + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToCosignerMessages::try_recv(&mut txn, self.session) else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::Cosign(block_number) = id else { + panic!("CosignerTask signed a non-Cosign") + }; + assert_eq!(Some(block_number), self.current_cosign.map(|cosign| cosign.0)); + + let cosign = self.current_cosign.take().unwrap(); + LatestCosigned::set(&mut txn, self.session, &cosign.0); + // Send the cosign + Cosign::send(&mut txn, self.session, &(cosign, Signature::from(signature).encode())); + } + } + + txn.commit(); + } + + Ok(iterated) + } +} diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs index ea022fcac..b4de78d9e 100644 --- a/processor/signers/src/db.rs +++ b/processor/signers/src/db.rs @@ -10,12 +10,15 @@ create_db! { SerializedKeys: (session: Session) -> Vec, LatestRetiredSession: () -> Session, ToCleanup: () -> Vec<(Session, Vec)>, + + ToCosign: (session: Session) -> (u64, [u8; 32]), } } db_channel! { SignersGlobal { - Cosign: (session: Session) -> (u64, [u8; 32]), + Cosign: (session: Session) -> ((u64, [u8; 32]), Vec), + SlashReport: (session: Session) -> Vec, SlashReportSignature: (session: Session) -> Vec, diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index cc40ce257..881205f81 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -30,6 +30,9 @@ pub(crate) mod db; mod coordinator; use coordinator::CoordinatorTask; +mod cosign; +use cosign::CosignerTask; + mod batch; use batch::BatchSignerTask; @@ -51,6 +54,14 @@ pub trait Coordinator: 'static + Send + Sync { /// Send a `messages::sign::ProcessorMessage`. async fn send(&mut self, message: ProcessorMessage) -> Result<(), Self::EphemeralError>; + /// Publish a cosign. + async fn publish_cosign( + &mut self, + block_number: u64, + block_id: [u8; 32], + signature: Signature, + ) -> Result<(), Self::EphemeralError>; + /// Publish a `Batch`. async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError>; @@ -92,7 +103,14 @@ struct Tasks { /// The signers used by a processor. #[allow(non_snake_case)] -pub struct Signers> { +pub struct Signers< + D: Db, + S: ScannerFeed, + Sch: Scheduler, + P: TransactionPublisher>>, +> { + db: D, + publisher: P, coordinator_handle: TaskHandle, tasks: HashMap, _Sch: PhantomData, @@ -115,15 +133,66 @@ type SignableTransactionFor = >::SignableTransaction completion comes in *before* we registered a key, the signer will hold the signing protocol in memory until the session is retired entirely. */ -impl> Signers { +impl< + D: Db, + S: ScannerFeed, + Sch: Scheduler, + P: TransactionPublisher>>, + > Signers +{ + fn tasks( + db: D, + publisher: P, + coordinator_handle: TaskHandle, + session: Session, + substrate_keys: Vec>, + external_keys: Vec>>, + ) -> Tasks { + let (cosign_task, cosign_handle) = Task::new(); + tokio::spawn( + CosignerTask::new(db.clone(), session, substrate_keys.clone()) + .continually_run(cosign_task, vec![coordinator_handle.clone()]), + ); + + let (batch_task, batch_handle) = Task::new(); + tokio::spawn( + BatchSignerTask::new( + db.clone(), + session, + external_keys[0].group_key(), + substrate_keys.clone(), + ) + .continually_run(batch_task, vec![coordinator_handle.clone()]), + ); + + let (slash_report_task, slash_report_handle) = Task::new(); + tokio::spawn( + SlashReportSignerTask::<_, S>::new(db.clone(), session, substrate_keys) + .continually_run(slash_report_task, vec![coordinator_handle.clone()]), + ); + + let (transaction_task, transaction_handle) = Task::new(); + tokio::spawn( + TransactionSignerTask::<_, SignableTransactionFor, _>::new( + db, + publisher, + session, + external_keys, + ) + .continually_run(transaction_task, vec![coordinator_handle]), + ); + + Tasks { + cosigner: cosign_handle, + batch: batch_handle, + slash_report: slash_report_handle, + transaction: transaction_handle, + } + } /// Initialize the signers. /// /// This will spawn tasks for any historically registered keys. - pub fn new( - mut db: impl Db, - coordinator: impl Coordinator, - publisher: &impl TransactionPublisher>>, - ) -> Self { + pub fn new(mut db: D, coordinator: impl Coordinator, publisher: P) -> Self { /* On boot, perform any database cleanup which was queued. @@ -158,6 +227,8 @@ impl> Signers { // Drain the completed Eventualities while scanner::CompletedEventualities::try_recv(&mut txn, &external_key).is_some() {} + // Delete the cosign this session should be working on + db::ToCosign::del(&mut txn, session); // Drain our DB channels while db::Cosign::try_recv(&mut txn, session).is_some() {} while db::SlashReport::try_recv(&mut txn, session).is_some() {} @@ -195,48 +266,20 @@ impl> Signers { )); } - // TODO: Cosigner - - let (batch_task, batch_handle) = Task::new(); - tokio::spawn( - BatchSignerTask::new( - db.clone(), - session, - external_keys[0].group_key(), - substrate_keys.clone(), - ) - .continually_run(batch_task, vec![coordinator_handle.clone()]), - ); - - let (slash_report_task, slash_report_handle) = Task::new(); - tokio::spawn( - SlashReportSignerTask::<_, S>::new(db.clone(), session, substrate_keys.clone()) - .continually_run(slash_report_task, vec![coordinator_handle.clone()]), - ); - - let (transaction_task, transaction_handle) = Task::new(); - tokio::spawn( - TransactionSignerTask::<_, SignableTransactionFor, _>::new( + tasks.insert( + session, + Self::tasks( db.clone(), publisher.clone(), + coordinator_handle.clone(), session, + substrate_keys, external_keys, - ) - .continually_run(transaction_task, vec![coordinator_handle.clone()]), - ); - - tasks.insert( - session, - Tasks { - cosigner: todo!("TODO"), - batch: batch_handle, - slash_report: slash_report_handle, - transaction: transaction_handle, - }, + ), ); } - Self { coordinator_handle, tasks, _Sch: PhantomData, _S: PhantomData } + Self { db, publisher, coordinator_handle, tasks, _Sch: PhantomData, _S: PhantomData } } /// Register a set of keys to sign with. @@ -247,7 +290,7 @@ impl> Signers { txn: &mut impl DbTxn, session: Session, substrate_keys: Vec>, - network_keys: Vec>>, + external_keys: Vec>>, ) { // Don't register already retired keys if Some(session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { @@ -262,12 +305,25 @@ impl> Signers { { let mut buf = Zeroizing::new(Vec::with_capacity(2 * substrate_keys.len() * 128)); - for (substrate_keys, network_keys) in substrate_keys.into_iter().zip(network_keys) { + for (substrate_keys, external_keys) in substrate_keys.iter().zip(&external_keys) { buf.extend(&*substrate_keys.serialize()); - buf.extend(&*network_keys.serialize()); + buf.extend(&*external_keys.serialize()); } db::SerializedKeys::set(txn, session, &buf); } + + // Spawn the tasks + self.tasks.insert( + session, + Self::tasks( + self.db.clone(), + self.publisher.clone(), + self.coordinator_handle.clone(), + session, + substrate_keys, + external_keys, + ), + ); } /// Retire the signers for a session. @@ -302,6 +358,9 @@ impl> Signers { let mut to_cleanup = db::ToCleanup::get(txn).unwrap_or(vec![]); to_cleanup.push((session, external_key.to_bytes().as_ref().to_vec())); db::ToCleanup::set(txn, &to_cleanup); + + // Drop the task handles, which will cause the tasks to close + self.tasks.remove(&session); } /// Queue handling a message. @@ -348,7 +407,7 @@ impl> Signers { block_number: u64, block: [u8; 32], ) { - db::Cosign::send(&mut txn, session, &(block_number, block)); + db::ToCosign::set(&mut txn, session, &(block_number, block)); txn.commit(); if let Some(tasks) = self.tasks.get(&session) { diff --git a/processor/signers/src/slash_report.rs b/processor/signers/src/slash_report.rs index bdb6cdba6..19a2523b9 100644 --- a/processor/signers/src/slash_report.rs +++ b/processor/signers/src/slash_report.rs @@ -26,7 +26,7 @@ use crate::{ WrappedSchnorrkelMachine, }; -// Fetches slash_reportes to sign and signs them. +// Fetches slash reports to sign and signs them. #[allow(non_snake_case)] pub(crate) struct SlashReportSignerTask { db: D, @@ -44,7 +44,7 @@ impl SlashReportSignerTask { let attempt_manager = AttemptManager::new( db.clone(), session, - keys.first().expect("creating a slash_report signer with 0 keys").params().i(), + keys.first().expect("creating a slash report signer with 0 keys").params().i(), ); Self { db, _S: PhantomData, session, keys, has_slash_report: false, attempt_manager } From 1143b679a309fddeeda68ae14c33185fb2c35597 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 9 Sep 2024 16:51:30 -0400 Subject: [PATCH 090/179] Remove old signer impls --- processor/src/batch_signer.rs | 421 ----------------- processor/src/cosigner.rs | 296 ------------ processor/src/signer.rs | 654 --------------------------- processor/src/slash_report_signer.rs | 293 ------------ 4 files changed, 1664 deletions(-) delete mode 100644 processor/src/batch_signer.rs delete mode 100644 processor/src/cosigner.rs delete mode 100644 processor/src/signer.rs delete mode 100644 processor/src/slash_report_signer.rs diff --git a/processor/src/batch_signer.rs b/processor/src/batch_signer.rs deleted file mode 100644 index 41f50322c..000000000 --- a/processor/src/batch_signer.rs +++ /dev/null @@ -1,421 +0,0 @@ -use core::{marker::PhantomData, fmt}; -use std::collections::HashMap; - -use rand_core::OsRng; - -use frost::{ - curve::Ristretto, - ThresholdKeys, FrostError, - algorithm::Algorithm, - sign::{ - Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, - AlgorithmSignMachine, AlgorithmSignatureMachine, - }, -}; -use frost_schnorrkel::Schnorrkel; - -use log::{info, debug, warn}; - -use serai_client::{ - primitives::{NetworkId, BlockHash}, - in_instructions::primitives::{Batch, SignedBatch, batch_message}, - validator_sets::primitives::Session, -}; - -use messages::coordinator::*; -use crate::{Get, DbTxn, Db, create_db}; - -create_db!( - BatchSignerDb { - CompletedDb: (id: u32) -> (), - AttemptDb: (id: u32, attempt: u32) -> (), - BatchDb: (block: BlockHash) -> SignedBatch - } -); - -type Preprocess = as PreprocessMachine>::Preprocess; -type SignatureShare = as SignMachine< - >::Signature, ->>::SignatureShare; - -pub struct BatchSigner { - db: PhantomData, - - network: NetworkId, - session: Session, - keys: Vec>, - - signable: HashMap, - attempt: HashMap, - #[allow(clippy::type_complexity)] - preprocessing: HashMap>, Vec)>, - #[allow(clippy::type_complexity)] - signing: HashMap, Vec)>, -} - -impl fmt::Debug for BatchSigner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("BatchSigner") - .field("signable", &self.signable) - .field("attempt", &self.attempt) - .finish_non_exhaustive() - } -} - -impl BatchSigner { - pub fn new( - network: NetworkId, - session: Session, - keys: Vec>, - ) -> BatchSigner { - assert!(!keys.is_empty()); - BatchSigner { - db: PhantomData, - - network, - session, - keys, - - signable: HashMap::new(), - attempt: HashMap::new(), - preprocessing: HashMap::new(), - signing: HashMap::new(), - } - } - - fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, u32, u32), ()> { - let SubstrateSignId { session, id, attempt } = id; - let SubstrateSignableId::Batch(id) = id else { panic!("BatchSigner handed non-Batch") }; - - assert_eq!(session, &self.session); - - // Check the attempt lines up - match self.attempt.get(id) { - // If we don't have an attempt logged, it's because the coordinator is faulty OR because we - // rebooted OR we detected the signed batch on chain - // The latter is the expected flow for batches not actively being participated in - None => { - warn!("not attempting batch {id} #{attempt}"); - Err(())?; - } - Some(our_attempt) => { - if attempt != our_attempt { - warn!("sent signing data for batch {id} #{attempt} yet we have attempt #{our_attempt}"); - Err(())?; - } - } - } - - Ok((*session, *id, *attempt)) - } - - #[must_use] - fn attempt( - &mut self, - txn: &mut D::Transaction<'_>, - id: u32, - attempt: u32, - ) -> Option { - // See above commentary for why this doesn't emit SignedBatch - if CompletedDb::get(txn, id).is_some() { - return None; - } - - // Check if we're already working on this attempt - if let Some(curr_attempt) = self.attempt.get(&id) { - if curr_attempt >= &attempt { - warn!("told to attempt {id} #{attempt} yet we're already working on {curr_attempt}"); - return None; - } - } - - // Start this attempt - let block = if let Some(batch) = self.signable.get(&id) { - batch.block - } else { - warn!("told to attempt signing a batch we aren't currently signing for"); - return None; - }; - - // Delete any existing machines - self.preprocessing.remove(&id); - self.signing.remove(&id); - - // Update the attempt number - self.attempt.insert(id, attempt); - - info!("signing batch {id} #{attempt}"); - - // If we reboot mid-sign, the current design has us abort all signs and wait for latter - // attempts/new signing protocols - // This is distinct from the DKG which will continue DKG sessions, even on reboot - // This is because signing is tolerant of failures of up to 1/3rd of the group - // The DKG requires 100% participation - // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for - // reboots, it's not worth the complexity when messing up here leaks our secret share - // - // Despite this, on reboot, we'll get told of active signing items, and may be in this - // branch again for something we've already attempted - // - // Only run if this hasn't already been attempted - // TODO: This isn't complete as this txn may not be committed with the expected timing - if AttemptDb::get(txn, id, attempt).is_some() { - warn!( - "already attempted batch {id}, attempt #{attempt}. this is an error if we didn't reboot" - ); - return None; - } - AttemptDb::set(txn, id, attempt, &()); - - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &self.keys { - // b"substrate" is a literal from sp-core - let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); - preprocesses.push(preprocess); - } - self.preprocessing.insert(id, (machines, preprocesses)); - - let id = SubstrateSignId { session: self.session, id: SubstrateSignableId::Batch(id), attempt }; - - // Broadcast our preprocesses - Some(ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses }) - } - - #[must_use] - pub fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) -> Option { - debug_assert_eq!(self.network, batch.network); - let id = batch.id; - if CompletedDb::get(txn, id).is_some() { - debug!("Sign batch order for ID we've already completed signing"); - // See batch_signed for commentary on why this simply returns - return None; - } - - self.signable.insert(id, batch); - self.attempt(txn, id, 0) - } - - #[must_use] - pub fn handle( - &mut self, - txn: &mut D::Transaction<'_>, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::CosignSubstrateBlock { .. } => { - panic!("BatchSigner passed CosignSubstrateBlock") - } - - CoordinatorMessage::SignSlashReport { .. } => { - panic!("Cosigner passed SignSlashReport") - } - - CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { - let (session, id, attempt) = self.verify_id(&id).ok()?; - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt }; - - let (machines, our_preprocesses) = match self.preprocessing.remove(&id) { - // Either rebooted or RPC error, or some invariant - None => { - warn!("not preprocessing for {id}. this is an error if we didn't reboot"); - return None; - } - Some(preprocess) => preprocess, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - }; - if !preprocess_ref.is_empty() { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - let (machine, share) = match machine - .sign(preprocesses, &batch_message(&self.signable[&id])) - { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - - let mut share_bytes = [0; 32]; - share_bytes.copy_from_slice(&share.serialize()); - serialized_shares.push(share_bytes); - - shares.push(share); - } - self.signing.insert(id, (signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some( - (ProcessorMessage::SubstrateShare { id: substrate_sign_id, shares: serialized_shares }) - .into(), - ) - } - - CoordinatorMessage::SubstrateShares { id, shares } => { - let (session, id, attempt) = self.verify_id(&id).ok()?; - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt }; - - let (machine, our_shares) = match self.signing.remove(&id) { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.contains_key(&id) { - panic!("never preprocessed yet signing?"); - } - - warn!("not preprocessing for {id}. this is an error if we didn't reboot"); - return None; - } - Some(signing) => signing, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - }; - if !share_ref.is_empty() { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let sig = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ) - } - }, - }; - - info!("signed batch {id} with attempt #{attempt}"); - - let batch = - SignedBatch { batch: self.signable.remove(&id).unwrap(), signature: sig.into() }; - - // Save the batch in case it's needed for recovery - BatchDb::set(txn, batch.batch.block, &batch); - CompletedDb::set(txn, id, &()); - - // Stop trying to sign for this batch - assert!(self.attempt.remove(&id).is_some()); - assert!(self.preprocessing.remove(&id).is_none()); - assert!(self.signing.remove(&id).is_none()); - - Some((messages::substrate::ProcessorMessage::SignedBatch { batch }).into()) - } - - CoordinatorMessage::BatchReattempt { id } => { - let SubstrateSignableId::Batch(batch_id) = id.id else { - panic!("BatchReattempt passed non-Batch ID") - }; - self.attempt(txn, batch_id, id.attempt).map(Into::into) - } - } - } - - pub fn batch_signed(&mut self, txn: &mut D::Transaction<'_>, id: u32) { - // Stop trying to sign for this batch - CompletedDb::set(txn, id, &()); - - self.signable.remove(&id); - self.attempt.remove(&id); - self.preprocessing.remove(&id); - self.signing.remove(&id); - - // This doesn't emit SignedBatch because it doesn't have access to the SignedBatch - // This function is expected to only be called once Substrate acknowledges this block, - // which means its batch must have been signed - // While a successive batch's signing would also cause this block to be acknowledged, Substrate - // guarantees a batch's ordered inclusion - - // This also doesn't return any messages since all mutation from the Batch being signed happens - // on the substrate::CoordinatorMessage::SubstrateBlock message (which SignedBatch is meant to - // end up triggering) - } -} diff --git a/processor/src/cosigner.rs b/processor/src/cosigner.rs deleted file mode 100644 index a9fb6cccc..000000000 --- a/processor/src/cosigner.rs +++ /dev/null @@ -1,296 +0,0 @@ -use core::fmt; -use std::collections::HashMap; - -use rand_core::OsRng; - -use frost::{ - curve::Ristretto, - ThresholdKeys, FrostError, - algorithm::Algorithm, - sign::{ - Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, - AlgorithmSignMachine, AlgorithmSignatureMachine, - }, -}; -use frost_schnorrkel::Schnorrkel; - -use log::{info, warn}; - -use serai_client::validator_sets::primitives::Session; - -use messages::coordinator::*; -use crate::{Get, DbTxn, create_db}; - -create_db! { - CosignerDb { - Completed: (id: [u8; 32]) -> (), - Attempt: (id: [u8; 32], attempt: u32) -> (), - } -} - -type Preprocess = as PreprocessMachine>::Preprocess; -type SignatureShare = as SignMachine< - >::Signature, ->>::SignatureShare; - -pub struct Cosigner { - session: Session, - keys: Vec>, - - block_number: u64, - id: [u8; 32], - attempt: u32, - #[allow(clippy::type_complexity)] - preprocessing: Option<(Vec>, Vec)>, - #[allow(clippy::type_complexity)] - signing: Option<(AlgorithmSignatureMachine, Vec)>, -} - -impl fmt::Debug for Cosigner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Cosigner") - .field("session", &self.session) - .field("block_number", &self.block_number) - .field("id", &self.id) - .field("attempt", &self.attempt) - .field("preprocessing", &self.preprocessing.is_some()) - .field("signing", &self.signing.is_some()) - .finish_non_exhaustive() - } -} - -impl Cosigner { - pub fn new( - txn: &mut impl DbTxn, - session: Session, - keys: Vec>, - block_number: u64, - id: [u8; 32], - attempt: u32, - ) -> Option<(Cosigner, ProcessorMessage)> { - assert!(!keys.is_empty()); - - if Completed::get(txn, id).is_some() { - return None; - } - - if Attempt::get(txn, id, attempt).is_some() { - warn!( - "already attempted cosigning {}, attempt #{}. this is an error if we didn't reboot", - hex::encode(id), - attempt, - ); - return None; - } - Attempt::set(txn, id, attempt, &()); - - info!("cosigning block {} with attempt #{}", hex::encode(id), attempt); - - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &keys { - // b"substrate" is a literal from sp-core - let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); - preprocesses.push(preprocess); - } - let preprocessing = Some((machines, preprocesses)); - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::CosigningSubstrateBlock(id), attempt }; - - Some(( - Cosigner { session, keys, block_number, id, attempt, preprocessing, signing: None }, - ProcessorMessage::CosignPreprocess { - id: substrate_sign_id, - preprocesses: serialized_preprocesses, - }, - )) - } - - #[must_use] - pub fn handle( - &mut self, - txn: &mut impl DbTxn, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::CosignSubstrateBlock { .. } => { - panic!("Cosigner passed CosignSubstrateBlock") - } - - CoordinatorMessage::SignSlashReport { .. } => { - panic!("Cosigner passed SignSlashReport") - } - - CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { - assert_eq!(id.session, self.session); - let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { - panic!("cosigner passed Batch") - }; - if block != self.id { - panic!("given preprocesses for a distinct block than cosigner is signing") - } - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than cosigner is signing") - } - - let (machines, our_preprocesses) = match self.preprocessing.take() { - // Either rebooted or RPC error, or some invariant - None => { - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(block), - ); - return None; - } - Some(preprocess) => preprocess, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !preprocess_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - let (machine, share) = - match machine.sign(preprocesses, &cosign_block_msg(self.block_number, self.id)) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - - let mut share_bytes = [0; 32]; - share_bytes.copy_from_slice(&share.serialize()); - serialized_shares.push(share_bytes); - - shares.push(share); - } - self.signing = Some((signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares }) - } - - CoordinatorMessage::SubstrateShares { id, shares } => { - assert_eq!(id.session, self.session); - let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { - panic!("cosigner passed Batch") - }; - if block != self.id { - panic!("given preprocesses for a distinct block than cosigner is signing") - } - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than cosigner is signing") - } - - let (machine, our_shares) = match self.signing.take() { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.is_some() { - panic!("never preprocessed yet signing?"); - } - - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(block) - ); - return None; - } - Some(signing) => signing, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !share_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let sig = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - - info!("cosigned {} with attempt #{}", hex::encode(block), id.attempt); - - Completed::set(txn, block, &()); - - Some(ProcessorMessage::CosignedBlock { - block_number: self.block_number, - block, - signature: sig.to_bytes().to_vec(), - }) - } - CoordinatorMessage::BatchReattempt { .. } => panic!("BatchReattempt passed to Cosigner"), - } - } -} diff --git a/processor/src/signer.rs b/processor/src/signer.rs deleted file mode 100644 index cab0bceb1..000000000 --- a/processor/src/signer.rs +++ /dev/null @@ -1,654 +0,0 @@ -use core::{marker::PhantomData, fmt}; -use std::collections::HashMap; - -use rand_core::OsRng; -use frost::{ - ThresholdKeys, FrostError, - sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, -}; - -use log::{info, debug, warn, error}; - -use serai_client::validator_sets::primitives::Session; -use messages::sign::*; - -pub use serai_db::*; - -use crate::{ - Get, DbTxn, Db, - networks::{Eventuality, Network}, -}; - -create_db!( - SignerDb { - CompletionsDb: (id: [u8; 32]) -> Vec, - EventualityDb: (id: [u8; 32]) -> Vec, - AttemptDb: (id: &SignId) -> (), - CompletionDb: (claim: &[u8]) -> Vec, - ActiveSignsDb: () -> Vec<[u8; 32]>, - CompletedOnChainDb: (id: &[u8; 32]) -> (), - } -); - -impl ActiveSignsDb { - fn add_active_sign(txn: &mut impl DbTxn, id: &[u8; 32]) { - if CompletedOnChainDb::get(txn, id).is_some() { - return; - } - let mut active = ActiveSignsDb::get(txn).unwrap_or_default(); - active.push(*id); - ActiveSignsDb::set(txn, &active); - } -} - -impl CompletedOnChainDb { - fn complete_on_chain(txn: &mut impl DbTxn, id: &[u8; 32]) { - CompletedOnChainDb::set(txn, id, &()); - ActiveSignsDb::set( - txn, - &ActiveSignsDb::get(txn) - .unwrap_or_default() - .into_iter() - .filter(|active| active != id) - .collect::>(), - ); - } -} -impl CompletionsDb { - fn completions( - getter: &impl Get, - id: [u8; 32], - ) -> Vec<::Claim> { - let Some(completions) = Self::get(getter, id) else { return vec![] }; - - // If this was set yet is empty, it's because it's the encoding of a claim with a length of 0 - if completions.is_empty() { - let default = ::Claim::default(); - assert_eq!(default.as_ref().len(), 0); - return vec![default]; - } - - let mut completions_ref = completions.as_slice(); - let mut res = vec![]; - while !completions_ref.is_empty() { - let mut id = ::Claim::default(); - let id_len = id.as_ref().len(); - id.as_mut().copy_from_slice(&completions_ref[.. id_len]); - completions_ref = &completions_ref[id_len ..]; - res.push(id); - } - res - } - - fn complete( - txn: &mut impl DbTxn, - id: [u8; 32], - completion: &::Completion, - ) { - // Completions can be completed by multiple signatures - // Save every solution in order to be robust - CompletionDb::save_completion::(txn, completion); - - let claim = N::Eventuality::claim(completion); - let claim: &[u8] = claim.as_ref(); - - // If claim has a 0-byte encoding, the set key, even if empty, is the claim - if claim.is_empty() { - Self::set(txn, id, &vec![]); - return; - } - - let mut existing = Self::get(txn, id).unwrap_or_default(); - assert_eq!(existing.len() % claim.len(), 0); - - // Don't add this completion if it's already present - let mut i = 0; - while i < existing.len() { - if &existing[i .. (i + claim.len())] == claim { - return; - } - i += claim.len(); - } - - existing.extend(claim); - Self::set(txn, id, &existing); - } -} - -impl EventualityDb { - fn save_eventuality( - txn: &mut impl DbTxn, - id: [u8; 32], - eventuality: &N::Eventuality, - ) { - txn.put(Self::key(id), eventuality.serialize()); - } - - fn eventuality(getter: &impl Get, id: [u8; 32]) -> Option { - Some(N::Eventuality::read(&mut getter.get(Self::key(id))?.as_slice()).unwrap()) - } -} - -impl CompletionDb { - fn save_completion( - txn: &mut impl DbTxn, - completion: &::Completion, - ) { - let claim = N::Eventuality::claim(completion); - let claim: &[u8] = claim.as_ref(); - Self::set(txn, claim, &N::Eventuality::serialize_completion(completion)); - } - - fn completion( - getter: &impl Get, - claim: &::Claim, - ) -> Option<::Completion> { - Self::get(getter, claim.as_ref()) - .map(|completion| N::Eventuality::read_completion::<&[u8]>(&mut completion.as_ref()).unwrap()) - } -} - -type PreprocessFor = <::TransactionMachine as PreprocessMachine>::Preprocess; -type SignMachineFor = <::TransactionMachine as PreprocessMachine>::SignMachine; -type SignatureShareFor = as SignMachine< - <::Eventuality as Eventuality>::Completion, ->>::SignatureShare; -type SignatureMachineFor = as SignMachine< - <::Eventuality as Eventuality>::Completion, ->>::SignatureMachine; - -pub struct Signer { - db: PhantomData, - - network: N, - - session: Session, - keys: Vec>, - - signable: HashMap<[u8; 32], N::SignableTransaction>, - attempt: HashMap<[u8; 32], u32>, - #[allow(clippy::type_complexity)] - preprocessing: HashMap<[u8; 32], (Vec>, Vec>)>, - #[allow(clippy::type_complexity)] - signing: HashMap<[u8; 32], (SignatureMachineFor, Vec>)>, -} - -impl fmt::Debug for Signer { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Signer") - .field("network", &self.network) - .field("signable", &self.signable) - .field("attempt", &self.attempt) - .finish_non_exhaustive() - } -} - -impl Signer { - /// Rebroadcast already signed TXs which haven't had their completions mined into a sufficiently - /// confirmed block. - pub async fn rebroadcast_task(db: D, network: N) { - log::info!("rebroadcasting transactions for plans whose completions yet to be confirmed..."); - loop { - for active in ActiveSignsDb::get(&db).unwrap_or_default() { - for claim in CompletionsDb::completions::(&db, active) { - log::info!("rebroadcasting completion with claim {}", hex::encode(claim.as_ref())); - // TODO: Don't drop the error entirely. Check for invariants - let _ = - network.publish_completion(&CompletionDb::completion::(&db, &claim).unwrap()).await; - } - } - // Only run every five minutes so we aren't frequently loading tens to hundreds of KB from - // the DB - tokio::time::sleep(core::time::Duration::from_secs(5 * 60)).await; - } - } - pub fn new(network: N, session: Session, keys: Vec>) -> Signer { - assert!(!keys.is_empty()); - Signer { - db: PhantomData, - - network, - - session, - keys, - - signable: HashMap::new(), - attempt: HashMap::new(), - preprocessing: HashMap::new(), - signing: HashMap::new(), - } - } - - fn verify_id(&self, id: &SignId) -> Result<(), ()> { - // Check the attempt lines up - match self.attempt.get(&id.id) { - // If we don't have an attempt logged, it's because the coordinator is faulty OR because we - // rebooted OR we detected the signed transaction on chain, so there's notable network - // latency/a malicious validator - None => { - warn!( - "not attempting {} #{}. this is an error if we didn't reboot", - hex::encode(id.id), - id.attempt - ); - Err(())?; - } - Some(attempt) => { - if attempt != &id.attempt { - warn!( - "sent signing data for {} #{} yet we have attempt #{}", - hex::encode(id.id), - id.attempt, - attempt - ); - Err(())?; - } - } - } - - Ok(()) - } - - #[must_use] - fn already_completed(txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { - if !CompletionsDb::completions::(txn, id).is_empty() { - debug!( - "SignTransaction/Reattempt order for {}, which we've already completed signing", - hex::encode(id) - ); - - true - } else { - false - } - } - - #[must_use] - fn complete( - &mut self, - id: [u8; 32], - claim: &::Claim, - ) -> ProcessorMessage { - // Assert we're actively signing for this TX - assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); - assert!(self.attempt.remove(&id).is_some(), "attempt had an ID signable didn't have"); - // If we weren't selected to participate, we'll have a preprocess - self.preprocessing.remove(&id); - // If we were selected, the signature will only go through if we contributed a share - // Despite this, we then need to get everyone's shares, and we may get a completion before - // we get everyone's shares - // This would be if the coordinator fails and we find the eventuality completion on-chain - self.signing.remove(&id); - - // Emit the event for it - ProcessorMessage::Completed { session: self.session, id, tx: claim.as_ref().to_vec() } - } - - #[must_use] - pub fn completed( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - completion: &::Completion, - ) -> Option { - let first_completion = !Self::already_completed(txn, id); - - // Save this completion to the DB - CompletedOnChainDb::complete_on_chain(txn, &id); - CompletionsDb::complete::(txn, id, completion); - - if first_completion { - Some(self.complete(id, &N::Eventuality::claim(completion))) - } else { - None - } - } - - /// Returns Some if the first completion. - // Doesn't use any loops/retries since we'll eventually get this from the Scanner anyways - #[must_use] - async fn claimed_eventuality_completion( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - claim: &::Claim, - ) -> Option { - if let Some(eventuality) = EventualityDb::eventuality::(txn, id) { - match self.network.confirm_completion(&eventuality, claim).await { - Ok(Some(completion)) => { - info!( - "signer eventuality for {} resolved in {}", - hex::encode(id), - hex::encode(claim.as_ref()) - ); - - let first_completion = !Self::already_completed(txn, id); - - // Save this completion to the DB - CompletionsDb::complete::(txn, id, &completion); - - if first_completion { - return Some(self.complete(id, claim)); - } - } - Ok(None) => { - warn!( - "a validator claimed {} completed {} when it did not", - hex::encode(claim.as_ref()), - hex::encode(id), - ); - } - Err(_) => { - // Transaction hasn't hit our mempool/was dropped for a different signature - // The latter can happen given certain latency conditions/a single malicious signer - // In the case of a single malicious signer, they can drag multiple honest validators down - // with them, so we unfortunately can't slash on this case - warn!( - "a validator claimed {} completed {} yet we couldn't check that claim", - hex::encode(claim.as_ref()), - hex::encode(id), - ); - } - } - } else { - warn!( - "informed of completion {} for eventuality {}, when we didn't have that eventuality", - hex::encode(claim.as_ref()), - hex::encode(id), - ); - } - None - } - - #[must_use] - async fn attempt( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - attempt: u32, - ) -> Option { - if Self::already_completed(txn, id) { - return None; - } - - // Check if we're already working on this attempt - if let Some(curr_attempt) = self.attempt.get(&id) { - if curr_attempt >= &attempt { - warn!( - "told to attempt {} #{} yet we're already working on {}", - hex::encode(id), - attempt, - curr_attempt - ); - return None; - } - } - - // Start this attempt - // Clone the TX so we don't have an immutable borrow preventing the below mutable actions - // (also because we do need an owned tx anyways) - let Some(tx) = self.signable.get(&id).cloned() else { - warn!("told to attempt a TX we aren't currently signing for"); - return None; - }; - - // Delete any existing machines - self.preprocessing.remove(&id); - self.signing.remove(&id); - - // Update the attempt number - self.attempt.insert(id, attempt); - - let id = SignId { session: self.session, id, attempt }; - - info!("signing for {} #{}", hex::encode(id.id), id.attempt); - - // If we reboot mid-sign, the current design has us abort all signs and wait for latter - // attempts/new signing protocols - // This is distinct from the DKG which will continue DKG sessions, even on reboot - // This is because signing is tolerant of failures of up to 1/3rd of the group - // The DKG requires 100% participation - // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for - // reboots, it's not worth the complexity when messing up here leaks our secret share - // - // Despite this, on reboot, we'll get told of active signing items, and may be in this - // branch again for something we've already attempted - // - // Only run if this hasn't already been attempted - // TODO: This isn't complete as this txn may not be committed with the expected timing - if AttemptDb::get(txn, &id).is_some() { - warn!( - "already attempted {} #{}. this is an error if we didn't reboot", - hex::encode(id.id), - id.attempt - ); - return None; - } - AttemptDb::set(txn, &id, &()); - - // Attempt to create the TX - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &self.keys { - let machine = match self.network.attempt_sign(keys.clone(), tx.clone()).await { - Err(e) => { - error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e); - return None; - } - Ok(machine) => machine, - }; - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize()); - preprocesses.push(preprocess); - } - - self.preprocessing.insert(id.id, (machines, preprocesses)); - - // Broadcast our preprocess - Some(ProcessorMessage::Preprocess { id, preprocesses: serialized_preprocesses }) - } - - #[must_use] - pub async fn sign_transaction( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - tx: N::SignableTransaction, - eventuality: &N::Eventuality, - ) -> Option { - // The caller is expected to re-issue sign orders on reboot - // This is solely used by the rebroadcast task - ActiveSignsDb::add_active_sign(txn, &id); - - if Self::already_completed(txn, id) { - return None; - } - - EventualityDb::save_eventuality::(txn, id, eventuality); - - self.signable.insert(id, tx); - self.attempt(txn, id, 0).await - } - - #[must_use] - pub async fn handle( - &mut self, - txn: &mut D::Transaction<'_>, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::Preprocesses { id, preprocesses } => { - if self.verify_id(&id).is_err() { - return None; - } - - let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) { - // Either rebooted or RPC error, or some invariant - None => { - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(id.id) - ); - return None; - } - Some(machine) => machine, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !preprocess_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - // Use an empty message, as expected of TransactionMachines - let (machine, share) = match machine.sign(preprocesses, &[]) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - serialized_shares.push(share.serialize()); - shares.push(share); - } - self.signing.insert(id.id, (signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some(ProcessorMessage::Share { id, shares: serialized_shares }) - } - - CoordinatorMessage::Shares { id, shares } => { - if self.verify_id(&id).is_err() { - return None; - } - - let (machine, our_shares) = match self.signing.remove(&id.id) { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.contains_key(&id.id) { - panic!("never preprocessed yet signing?"); - } - - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(id.id) - ); - return None; - } - Some(machine) => machine, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !share_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let completion = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - - // Save the completion in case it's needed for recovery - CompletionsDb::complete::(txn, id.id, &completion); - - // Publish it - if let Err(e) = self.network.publish_completion(&completion).await { - error!("couldn't publish completion for plan {}: {:?}", hex::encode(id.id), e); - } else { - info!("published completion for plan {}", hex::encode(id.id)); - } - - // Stop trying to sign for this TX - Some(self.complete(id.id, &N::Eventuality::claim(&completion))) - } - - CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await, - - CoordinatorMessage::Completed { session: _, id, tx: mut claim_vec } => { - let mut claim = ::Claim::default(); - if claim.as_ref().len() != claim_vec.len() { - let true_len = claim_vec.len(); - claim_vec.truncate(2 * claim.as_ref().len()); - warn!( - "a validator claimed {}... (actual length {}) completed {} yet {}", - hex::encode(&claim_vec), - true_len, - hex::encode(id), - "that's not a valid Claim", - ); - return None; - } - claim.as_mut().copy_from_slice(&claim_vec); - - self.claimed_eventuality_completion(txn, id, &claim).await - } - } - } -} diff --git a/processor/src/slash_report_signer.rs b/processor/src/slash_report_signer.rs deleted file mode 100644 index b7b2d55ce..000000000 --- a/processor/src/slash_report_signer.rs +++ /dev/null @@ -1,293 +0,0 @@ -use core::fmt; -use std::collections::HashMap; - -use rand_core::OsRng; - -use frost::{ - curve::Ristretto, - ThresholdKeys, FrostError, - algorithm::Algorithm, - sign::{ - Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, - AlgorithmSignMachine, AlgorithmSignatureMachine, - }, -}; -use frost_schnorrkel::Schnorrkel; - -use log::{info, warn}; - -use serai_client::{ - Public, - primitives::NetworkId, - validator_sets::primitives::{Session, ValidatorSet, report_slashes_message}, -}; - -use messages::coordinator::*; -use crate::{Get, DbTxn, create_db}; - -create_db! { - SlashReportSignerDb { - Completed: (session: Session) -> (), - Attempt: (session: Session, attempt: u32) -> (), - } -} - -type Preprocess = as PreprocessMachine>::Preprocess; -type SignatureShare = as SignMachine< - >::Signature, ->>::SignatureShare; - -pub struct SlashReportSigner { - network: NetworkId, - session: Session, - keys: Vec>, - report: Vec<([u8; 32], u32)>, - - attempt: u32, - #[allow(clippy::type_complexity)] - preprocessing: Option<(Vec>, Vec)>, - #[allow(clippy::type_complexity)] - signing: Option<(AlgorithmSignatureMachine, Vec)>, -} - -impl fmt::Debug for SlashReportSigner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("SlashReportSigner") - .field("session", &self.session) - .field("report", &self.report) - .field("attempt", &self.attempt) - .field("preprocessing", &self.preprocessing.is_some()) - .field("signing", &self.signing.is_some()) - .finish_non_exhaustive() - } -} - -impl SlashReportSigner { - pub fn new( - txn: &mut impl DbTxn, - network: NetworkId, - session: Session, - keys: Vec>, - report: Vec<([u8; 32], u32)>, - attempt: u32, - ) -> Option<(SlashReportSigner, ProcessorMessage)> { - assert!(!keys.is_empty()); - - if Completed::get(txn, session).is_some() { - return None; - } - - if Attempt::get(txn, session, attempt).is_some() { - warn!( - "already attempted signing slash report for session {:?}, attempt #{}. {}", - session, attempt, "this is an error if we didn't reboot", - ); - return None; - } - Attempt::set(txn, session, attempt, &()); - - info!("signing slash report for session {:?} with attempt #{}", session, attempt); - - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &keys { - // b"substrate" is a literal from sp-core - let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); - preprocesses.push(preprocess); - } - let preprocessing = Some((machines, preprocesses)); - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::SlashReport, attempt }; - - Some(( - SlashReportSigner { network, session, keys, report, attempt, preprocessing, signing: None }, - ProcessorMessage::SlashReportPreprocess { - id: substrate_sign_id, - preprocesses: serialized_preprocesses, - }, - )) - } - - #[must_use] - pub fn handle( - &mut self, - txn: &mut impl DbTxn, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::CosignSubstrateBlock { .. } => { - panic!("SlashReportSigner passed CosignSubstrateBlock") - } - - CoordinatorMessage::SignSlashReport { .. } => { - panic!("SlashReportSigner passed SignSlashReport") - } - - CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { - assert_eq!(id.session, self.session); - assert_eq!(id.id, SubstrateSignableId::SlashReport); - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing") - } - - let (machines, our_preprocesses) = match self.preprocessing.take() { - // Either rebooted or RPC error, or some invariant - None => { - warn!("not preprocessing. this is an error if we didn't reboot"); - return None; - } - Some(preprocess) => preprocess, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !preprocess_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - let (machine, share) = match machine.sign( - preprocesses, - &report_slashes_message( - &ValidatorSet { network: self.network, session: self.session }, - &self - .report - .clone() - .into_iter() - .map(|(validator, points)| (Public(validator), points)) - .collect::>(), - ), - ) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - - let mut share_bytes = [0; 32]; - share_bytes.copy_from_slice(&share.serialize()); - serialized_shares.push(share_bytes); - - shares.push(share); - } - self.signing = Some((signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares }) - } - - CoordinatorMessage::SubstrateShares { id, shares } => { - assert_eq!(id.session, self.session); - assert_eq!(id.id, SubstrateSignableId::SlashReport); - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing") - } - - let (machine, our_shares) = match self.signing.take() { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.is_some() { - panic!("never preprocessed yet signing?"); - } - - warn!("not preprocessing. this is an error if we didn't reboot"); - return None; - } - Some(signing) => signing, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !share_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let sig = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - - info!("signed slash report for session {:?} with attempt #{}", self.session, id.attempt); - - Completed::set(txn, self.session, &()); - - Some(ProcessorMessage::SignedSlashReport { - session: self.session, - signature: sig.to_bytes().to_vec(), - }) - } - CoordinatorMessage::BatchReattempt { .. } => { - panic!("BatchReattempt passed to SlashReportSigner") - } - } - } -} From fe9ca4c1c4423df9a1cc69238990d47dad0179ae Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 10 Sep 2024 03:48:06 -0400 Subject: [PATCH 091/179] Bitcoin Output/Transaction definitions --- Cargo.lock | 12 +- processor/bitcoin/Cargo.toml | 14 +- processor/bitcoin/src/block.rs | 0 processor/bitcoin/src/lib.rs | 198 ++-------------------- processor/bitcoin/src/output.rs | 133 +++++++++++++++ processor/bitcoin/src/transaction.rs | 170 +++++++++++++++++++ processor/primitives/src/lib.rs | 19 ++- processor/primitives/src/output.rs | 17 +- processor/primitives/src/payment.rs | 4 +- processor/scanner/src/db.rs | 15 +- processor/scanner/src/lib.rs | 5 +- processor/scanner/src/report/db.rs | 7 +- processor/scheduler/primitives/src/lib.rs | 9 +- processor/signers/src/transaction/mod.rs | 2 + substrate/client/Cargo.toml | 1 + substrate/client/src/networks/bitcoin.rs | 191 ++++++++++++--------- substrate/primitives/src/lib.rs | 6 +- 17 files changed, 504 insertions(+), 299 deletions(-) create mode 100644 processor/bitcoin/src/block.rs create mode 100644 processor/bitcoin/src/output.rs create mode 100644 processor/bitcoin/src/transaction.rs diff --git a/Cargo.lock b/Cargo.lock index 81e3d1dec..ee8c8a998 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8127,18 +8127,21 @@ dependencies = [ "async-trait", "bitcoin-serai", "borsh", - "const-hex", + "ciphersuite", "env_logger", - "hex", - "k256", + "flexible-transcript", "log", + "modular-frost", "parity-scale-codec", + "rand_core", "secp256k1", + "serai-client", "serai-db", "serai-env", "serai-message-queue", "serai-processor-messages", - "serde_json", + "serai-processor-primitives", + "serai-processor-scheduler-primitives", "tokio", "zalloc", ] @@ -8151,6 +8154,7 @@ dependencies = [ "bitcoin", "bitvec", "blake2", + "borsh", "ciphersuite", "dockertest", "frame-system", diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index a57495427..656c7c40c 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -18,14 +18,15 @@ workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } +rand_core = { version = "0.6", default-features = false } -const-hex = { version = "1", default-features = false } -hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -serde_json = { version = "1", default-features = false, features = ["std"] } -k256 = { version = "^0.13.1", default-features = false, features = ["std"] } +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } + secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"] } bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["std"] } @@ -37,8 +38,13 @@ zalloc = { path = "../../common/zalloc" } serai-db = { path = "../../common/db" } serai-env = { path = "../../common/env" } +serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } + messages = { package = "serai-processor-messages", path = "../messages" } +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } + message-queue = { package = "serai-message-queue", path = "../../message-queue" } [features] diff --git a/processor/bitcoin/src/block.rs b/processor/bitcoin/src/block.rs new file mode 100644 index 000000000..e69de29bb diff --git a/processor/bitcoin/src/lib.rs b/processor/bitcoin/src/lib.rs index bccdc2861..112d8fd35 100644 --- a/processor/bitcoin/src/lib.rs +++ b/processor/bitcoin/src/lib.rs @@ -2,7 +2,15 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] -use std::{sync::OnceLock, time::Duration, io, collections::HashMap}; +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + +mod output; +mod transaction; + +/* +use std::{sync::LazyLock, time::Duration, io, collections::HashMap}; use async_trait::async_trait; @@ -49,127 +57,9 @@ use serai_client::{ primitives::{MAX_DATA_LEN, Coin, NetworkId, Amount, Balance}, networks::bitcoin::Address, }; +*/ -use crate::{ - networks::{ - NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, - Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, - }, - Payment, - multisigs::scheduler::utxo::Scheduler, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct OutputId(pub [u8; 36]); -impl Default for OutputId { - fn default() -> Self { - Self([0; 36]) - } -} -impl AsRef<[u8]> for OutputId { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} -impl AsMut<[u8]> for OutputId { - fn as_mut(&mut self) -> &mut [u8] { - self.0.as_mut() - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Output { - kind: OutputType, - presumed_origin: Option

, - output: ReceivedOutput, - data: Vec, -} - -impl OutputTrait for Output { - type Id = OutputId; - - fn kind(&self) -> OutputType { - self.kind - } - - fn id(&self) -> Self::Id { - let mut res = OutputId::default(); - self.output.outpoint().consensus_encode(&mut res.as_mut()).unwrap(); - debug_assert_eq!( - { - let mut outpoint = vec![]; - self.output.outpoint().consensus_encode(&mut outpoint).unwrap(); - outpoint - }, - res.as_ref().to_vec() - ); - res - } - - fn tx_id(&self) -> [u8; 32] { - let mut hash = *self.output.outpoint().txid.as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - fn key(&self) -> ProjectivePoint { - let script = &self.output.output().script_pubkey; - assert!(script.is_p2tr()); - let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else { - panic!("last item in v1 Taproot script wasn't bytes") - }; - let key = XOnlyPublicKey::from_slice(key.as_ref()) - .expect("last item in v1 Taproot script wasn't x-only public key"); - Secp256k1::read_G(&mut key.public_key(Parity::Even).serialize().as_slice()).unwrap() - - (ProjectivePoint::GENERATOR * self.output.offset()) - } - - fn presumed_origin(&self) -> Option
{ - self.presumed_origin.clone() - } - - fn balance(&self) -> Balance { - Balance { coin: Coin::Bitcoin, amount: Amount(self.output.value()) } - } - - fn data(&self) -> &[u8] { - &self.data - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - self.kind.write(writer)?; - let presumed_origin: Option> = self.presumed_origin.clone().map(Into::into); - writer.write_all(&presumed_origin.encode())?; - self.output.write(writer)?; - writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?; - writer.write_all(&self.data) - } - - fn read(mut reader: &mut R) -> io::Result { - Ok(Output { - kind: OutputType::read(reader)?, - presumed_origin: { - let mut io_reader = scale::IoReader(reader); - let res = Option::>::decode(&mut io_reader) - .unwrap() - .map(|address| Address::try_from(address).unwrap()); - reader = io_reader.0; - res - }, - output: ReceivedOutput::read(reader)?, - data: { - let mut data_len = [0; 2]; - reader.read_exact(&mut data_len)?; - - let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; - reader.read_exact(&mut data)?; - data - }, - }) - } -} - +/* #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Fee(u64); @@ -201,71 +91,6 @@ impl TransactionTrait for Transaction { } } -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Eventuality([u8; 32]); - -#[derive(Clone, PartialEq, Eq, Default, Debug)] -pub struct EmptyClaim; -impl AsRef<[u8]> for EmptyClaim { - fn as_ref(&self) -> &[u8] { - &[] - } -} -impl AsMut<[u8]> for EmptyClaim { - fn as_mut(&mut self) -> &mut [u8] { - &mut [] - } -} - -impl EventualityTrait for Eventuality { - type Claim = EmptyClaim; - type Completion = Transaction; - - fn lookup(&self) -> Vec { - self.0.to_vec() - } - - fn read(reader: &mut R) -> io::Result { - let mut id = [0; 32]; - reader - .read_exact(&mut id) - .map_err(|_| io::Error::other("couldn't decode ID in eventuality"))?; - Ok(Eventuality(id)) - } - fn serialize(&self) -> Vec { - self.0.to_vec() - } - - fn claim(_: &Transaction) -> EmptyClaim { - EmptyClaim - } - fn serialize_completion(completion: &Transaction) -> Vec { - let mut buf = vec![]; - completion.consensus_encode(&mut buf).unwrap(); - buf - } - fn read_completion(reader: &mut R) -> io::Result { - Transaction::consensus_decode(&mut io::BufReader::with_capacity(0, reader)) - .map_err(|e| io::Error::other(format!("{e}"))) - } -} - -#[derive(Clone, Debug)] -pub struct SignableTransaction { - actual: BSignableTransaction, -} -impl PartialEq for SignableTransaction { - fn eq(&self, other: &SignableTransaction) -> bool { - self.actual == other.actual - } -} -impl Eq for SignableTransaction {} -impl SignableTransactionTrait for SignableTransaction { - fn fee(&self) -> u64 { - self.actual.fee() - } -} - #[async_trait] impl BlockTrait for Block { type Id = [u8; 32]; @@ -944,3 +769,4 @@ impl Network for Bitcoin { impl UtxoNetwork for Bitcoin { const MAX_INPUTS: usize = MAX_INPUTS; } +*/ diff --git a/processor/bitcoin/src/output.rs b/processor/bitcoin/src/output.rs new file mode 100644 index 000000000..cc6243192 --- /dev/null +++ b/processor/bitcoin/src/output.rs @@ -0,0 +1,133 @@ +use std::io; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::{ + hashes::Hash as HashTrait, + key::{Parity, XOnlyPublicKey}, + consensus::Encodable, + script::Instruction, + }, + wallet::ReceivedOutput as WalletOutput, +}; + +use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{ + primitives::{Coin, Amount, Balance, ExternalAddress}, + networks::bitcoin::Address, +}; + +use primitives::{OutputType, ReceivedOutput}; + +#[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] +pub(crate) struct OutputId([u8; 36]); +impl Default for OutputId { + fn default() -> Self { + Self([0; 36]) + } +} +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Output { + kind: OutputType, + presumed_origin: Option
, + output: WalletOutput, + data: Vec, +} + +impl ReceivedOutput<::G, Address> for Output { + type Id = OutputId; + type TransactionId = [u8; 32]; + + fn kind(&self) -> OutputType { + self.kind + } + + fn id(&self) -> Self::Id { + let mut id = OutputId::default(); + self.output.outpoint().consensus_encode(&mut id.as_mut()).unwrap(); + id + } + + fn transaction_id(&self) -> Self::TransactionId { + self.output.outpoint().txid.to_raw_hash().to_byte_array() + } + + fn key(&self) -> ::G { + // We read the key from the script pubkey so we don't have to independently store it + let script = &self.output.output().script_pubkey; + + // These assumptions are safe since it's an output we successfully scanned + assert!(script.is_p2tr()); + let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else { + panic!("last item in v1 Taproot script wasn't bytes") + }; + let key = XOnlyPublicKey::from_slice(key.as_ref()) + .expect("last item in v1 Taproot script wasn't a valid x-only public key"); + + // Convert to a full key + let key = key.public_key(Parity::Even); + // Convert to a k256 key (from libsecp256k1) + let output_key = Secp256k1::read_G(&mut key.serialize().as_slice()).unwrap(); + // The output's key minus the output's offset is the root key + output_key - (::G::GENERATOR * self.output.offset()) + } + + fn presumed_origin(&self) -> Option
{ + self.presumed_origin.clone() + } + + fn balance(&self) -> Balance { + Balance { coin: Coin::Bitcoin, amount: Amount(self.output.value()) } + } + + fn data(&self) -> &[u8] { + &self.data + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + self.kind.write(writer)?; + let presumed_origin: Option = self.presumed_origin.clone().map(Into::into); + writer.write_all(&presumed_origin.encode())?; + self.output.write(writer)?; + writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?; + writer.write_all(&self.data) + } + + fn read(mut reader: &mut R) -> io::Result { + Ok(Output { + kind: OutputType::read(reader)?, + presumed_origin: { + Option::::decode(&mut IoReader(&mut reader)) + .map_err(|e| io::Error::other(format!("couldn't decode ExternalAddress: {e:?}")))? + .map(|address| { + Address::try_from(address) + .map_err(|()| io::Error::other("couldn't decode Address from ExternalAddress")) + }) + .transpose()? + }, + output: WalletOutput::read(reader)?, + data: { + let mut data_len = [0; 2]; + reader.read_exact(&mut data_len)?; + + let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; + reader.read_exact(&mut data)?; + data + }, + }) + } +} diff --git a/processor/bitcoin/src/transaction.rs b/processor/bitcoin/src/transaction.rs new file mode 100644 index 000000000..ef48d3f01 --- /dev/null +++ b/processor/bitcoin/src/transaction.rs @@ -0,0 +1,170 @@ +use std::io; + +use rand_core::{RngCore, CryptoRng}; + +use transcript::{Transcript, RecommendedTranscript}; +use ciphersuite::Secp256k1; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; + +use bitcoin_serai::{ + bitcoin::{ + consensus::{Encodable, Decodable}, + ScriptBuf, Transaction as BTransaction, + }, + wallet::{ + ReceivedOutput, TransactionError, SignableTransaction as BSignableTransaction, + TransactionMachine, + }, +}; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::networks::bitcoin::Address; + +use crate::output::OutputId; + +#[derive(Clone, Debug)] +pub(crate) struct Transaction(BTransaction); + +impl From for Transaction { + fn from(tx: BTransaction) -> Self { + Self(tx) + } +} + +impl scheduler::Transaction for Transaction { + fn read(reader: &mut impl io::Read) -> io::Result { + let tx = + BTransaction::consensus_decode(&mut io::BufReader::new(reader)).map_err(io::Error::other)?; + Ok(Self(tx)) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + let mut writer = io::BufWriter::new(writer); + self.0.consensus_encode(&mut writer)?; + writer.into_inner()?; + Ok(()) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SignableTransaction { + inputs: Vec, + payments: Vec<(Address, u64)>, + change: Option
, + data: Option>, + fee_per_vbyte: u64, +} + +impl SignableTransaction { + fn signable(self) -> Result { + BSignableTransaction::new( + self.inputs, + &self + .payments + .iter() + .cloned() + .map(|(address, amount)| (ScriptBuf::from(address), amount)) + .collect::>(), + self.change.map(ScriptBuf::from), + self.data, + self.fee_per_vbyte, + ) + } +} + +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine(SignableTransaction, ThresholdKeys); +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = ::Signature; + type SignMachine = ::SignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + self + .0 + .signable() + .expect("signing an invalid SignableTransaction") + .multisig(&self.1, RecommendedTranscript::new(b"Serai Processor Bitcoin Transaction")) + .expect("incorrect keys used for SignableTransaction") + .preprocess(rng) + } +} + +impl scheduler::SignableTransaction for SignableTransaction { + type Transaction = Transaction; + type Ciphersuite = Secp256k1; + type PreprocessMachine = ClonableTransctionMachine; + + fn read(reader: &mut impl io::Read) -> io::Result { + let inputs = { + let mut input_len = [0; 4]; + reader.read_exact(&mut input_len)?; + let mut inputs = vec![]; + for _ in 0 .. u32::from_le_bytes(input_len) { + inputs.push(ReceivedOutput::read(reader)?); + } + inputs + }; + + let payments = <_>::deserialize_reader(reader)?; + let change = <_>::deserialize_reader(reader)?; + let data = <_>::deserialize_reader(reader)?; + let fee_per_vbyte = <_>::deserialize_reader(reader)?; + + Ok(Self { inputs, payments, change, data, fee_per_vbyte }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; + for input in &self.inputs { + input.write(writer)?; + } + + self.payments.serialize(writer)?; + self.change.serialize(writer)?; + self.data.serialize(writer)?; + self.fee_per_vbyte.serialize(writer)?; + + Ok(()) + } + + fn id(&self) -> [u8; 32] { + self.clone().signable().unwrap().txid() + } + + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { + ClonableTransctionMachine(self, keys) + } +} + +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub(crate) struct Eventuality { + txid: [u8; 32], + singular_spent_output: Option, +} + +impl primitives::Eventuality for Eventuality { + type OutputId = OutputId; + + fn id(&self) -> [u8; 32] { + self.txid + } + + // We define the lookup as our ID since the resolving transaction only has a singular possible ID + fn lookup(&self) -> Vec { + self.txid.to_vec() + } + + fn singular_spent_output(&self) -> Option { + self.singular_spent_output.clone() + } + + fn read(reader: &mut impl io::Read) -> io::Result { + Self::deserialize_reader(reader) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.serialize(writer) + } +} diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs index 4e45fa8f3..cc915ca2f 100644 --- a/processor/primitives/src/lib.rs +++ b/processor/primitives/src/lib.rs @@ -46,7 +46,24 @@ pub trait Id: + BorshDeserialize { } -impl Id for [u8; N] where [u8; N]: Default {} +impl< + I: Send + + Sync + + Clone + + Default + + PartialEq + + Eq + + Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Debug + + Encode + + Decode + + BorshSerialize + + BorshDeserialize, + > Id for I +{ +} /// A wrapper for a group element which implements the scale/borsh traits. #[derive(Clone, Copy, PartialEq, Eq, Debug)] diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs index cbfe59f3f..76acde600 100644 --- a/processor/primitives/src/output.rs +++ b/processor/primitives/src/output.rs @@ -19,10 +19,19 @@ pub trait Address: + BorshSerialize + BorshDeserialize { - /// Write this address. - fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; - /// Read an address. - fn read(reader: &mut impl io::Read) -> io::Result; +} +// This casts a wide net, yet it only implements `Address` for things `Into` so +// it should only implement this for addresses +impl< + A: Send + + Sync + + Clone + + Into + + TryFrom + + BorshSerialize + + BorshDeserialize, + > Address for A +{ } /// The type of the output. diff --git a/processor/primitives/src/payment.rs b/processor/primitives/src/payment.rs index 67a5bbadd..4c1e04f47 100644 --- a/processor/primitives/src/payment.rs +++ b/processor/primitives/src/payment.rs @@ -48,7 +48,7 @@ impl Payment { /// Read a Payment. pub fn read(reader: &mut impl io::Read) -> io::Result { - let address = A::read(reader)?; + let address = A::deserialize_reader(reader)?; let reader = &mut IoReader(reader); let balance = Balance::decode(reader).map_err(io::Error::other)?; let data = Option::>::decode(reader).map_err(io::Error::other)?; @@ -56,7 +56,7 @@ impl Payment { } /// Write the Payment. pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { - self.address.write(writer).unwrap(); + self.address.serialize(writer)?; self.balance.encode_to(writer); self.data.encode_to(writer); Ok(()) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 52a364193..ef37ef383 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -10,7 +10,7 @@ use serai_db::{Get, DbTxn, create_db, db_channel}; use serai_in_instructions_primitives::{InInstructionWithBalance, Batch}; use serai_coins_primitives::OutInstructionWithBalance; -use primitives::{EncodableG, Address, ReceivedOutput}; +use primitives::{EncodableG, ReceivedOutput}; use crate::{ lifetime::{LifetimeStage, Lifetime}, @@ -49,7 +49,7 @@ impl OutputWithInInstruction { let mut opt = [0xff]; reader.read_exact(&mut opt)?; assert!((opt[0] == 0) || (opt[0] == 1)); - (opt[0] == 1).then(|| AddressFor::::read(reader)).transpose()? + (opt[0] == 1).then(|| AddressFor::::deserialize_reader(reader)).transpose()? }; let in_instruction = InInstructionWithBalance::decode(&mut IoReader(reader)).map_err(io::Error::other)?; @@ -59,7 +59,7 @@ impl OutputWithInInstruction { self.output.write(writer)?; if let Some(return_address) = &self.return_address { writer.write_all(&[1])?; - return_address.write(writer)?; + return_address.serialize(writer)?; } else { writer.write_all(&[0])?; } @@ -278,7 +278,7 @@ impl ScannerGlobalDb { buf.read_exact(&mut opt).unwrap(); assert!((opt[0] == 0) || (opt[0] == 1)); - let address = (opt[0] == 1).then(|| AddressFor::::read(&mut buf).unwrap()); + let address = (opt[0] == 1).then(|| AddressFor::::deserialize_reader(&mut buf).unwrap()); Some((address, InInstructionWithBalance::decode(&mut IoReader(buf)).unwrap())) } } @@ -338,7 +338,7 @@ impl ScanToEventualityDb { let mut buf = vec![]; if let Some(address) = &forward.return_address { buf.write_all(&[1]).unwrap(); - address.write(&mut buf).unwrap(); + address.serialize(&mut buf).unwrap(); } else { buf.write_all(&[0]).unwrap(); } @@ -435,7 +435,8 @@ impl Returnable { reader.read_exact(&mut opt).unwrap(); assert!((opt[0] == 0) || (opt[0] == 1)); - let return_address = (opt[0] == 1).then(|| AddressFor::::read(reader)).transpose()?; + let return_address = + (opt[0] == 1).then(|| AddressFor::::deserialize_reader(reader)).transpose()?; let in_instruction = InInstructionWithBalance::decode(&mut IoReader(reader)).map_err(io::Error::other)?; @@ -444,7 +445,7 @@ impl Returnable { fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { if let Some(return_address) = &self.return_address { writer.write_all(&[1])?; - return_address.write(writer)?; + return_address.serialize(writer)?; } else { writer.write_all(&[0])?; } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 5919ff7ea..9831d41a0 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -7,6 +7,7 @@ use std::{io, collections::HashMap}; use group::GroupEncoding; +use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, Db}; use serai_primitives::{NetworkId, Coin, Amount}; @@ -179,12 +180,12 @@ pub struct Return { impl Return { pub(crate) fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { - self.address.write(writer)?; + self.address.serialize(writer)?; self.output.write(writer) } pub(crate) fn read(reader: &mut impl io::Read) -> io::Result { - let address = AddressFor::::read(reader)?; + let address = AddressFor::::deserialize_reader(reader)?; let output = OutputFor::::read(reader)?; Ok(Return { address, output }) } diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs index 052397791..10a3f6bb5 100644 --- a/processor/scanner/src/report/db.rs +++ b/processor/scanner/src/report/db.rs @@ -4,12 +4,11 @@ use std::io::{Read, Write}; use group::GroupEncoding; use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; use serai_db::{Get, DbTxn, create_db}; use serai_primitives::Balance; -use primitives::Address; - use crate::{ScannerFeed, KeyFor, AddressFor}; create_db!( @@ -92,7 +91,7 @@ impl ReportDb { for return_information in return_information { if let Some(ReturnInformation { address, balance }) = return_information { buf.write_all(&[1]).unwrap(); - address.write(&mut buf).unwrap(); + address.serialize(&mut buf).unwrap(); balance.encode_to(&mut buf); } else { buf.write_all(&[0]).unwrap(); @@ -115,7 +114,7 @@ impl ReportDb { assert!((opt[0] == 0) || (opt[0] == 1)); res.push((opt[0] == 1).then(|| { - let address = AddressFor::::read(&mut buf).unwrap(); + let address = AddressFor::::deserialize_reader(&mut buf).unwrap(); let balance = Balance::decode(&mut IoReader(&mut buf)).unwrap(); ReturnInformation { address, balance } })); diff --git a/processor/scheduler/primitives/src/lib.rs b/processor/scheduler/primitives/src/lib.rs index f146027d5..3c214d159 100644 --- a/processor/scheduler/primitives/src/lib.rs +++ b/processor/scheduler/primitives/src/lib.rs @@ -11,7 +11,7 @@ use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; use serai_db::DbTxn; /// A transaction. -pub trait Transaction: Sized { +pub trait Transaction: Sized + Send { /// Read a `Transaction`. fn read(reader: &mut impl io::Read) -> io::Result; /// Write a `Transaction`. @@ -20,10 +20,12 @@ pub trait Transaction: Sized { /// A signable transaction. pub trait SignableTransaction: 'static + Sized + Send + Sync + Clone { + /// The underlying transaction type. + type Transaction: Transaction; /// The ciphersuite used to sign this transaction. type Ciphersuite: Ciphersuite; /// The preprocess machine for the signing protocol for this transaction. - type PreprocessMachine: Clone + PreprocessMachine; + type PreprocessMachine: Clone + PreprocessMachine>; /// Read a `SignableTransaction`. fn read(reader: &mut impl io::Read) -> io::Result; @@ -42,8 +44,7 @@ pub trait SignableTransaction: 'static + Sized + Send + Sync + Clone { } /// The transaction type for a SignableTransaction. -pub type TransactionFor = - <::PreprocessMachine as PreprocessMachine>::Signature; +pub type TransactionFor = ::Transaction; mod db { use serai_db::{Get, DbTxn, create_db, db_channel}; diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index 9311eb32b..b9b62e753 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -185,6 +185,8 @@ impl> } } Response::Signature { id, signature: signed_tx } => { + let signed_tx: TransactionFor = signed_tx.into(); + // Save this transaction to the database { let mut buf = Vec::with_capacity(256); diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index e653c9af6..5cba05f07 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -24,6 +24,7 @@ bitvec = { version = "1", default-features = false, features = ["alloc", "serde" hex = "0.4" scale = { package = "parity-scale-codec", version = "3" } +borsh = { version = "1" } serde = { version = "1", features = ["derive"], optional = true } serde_json = { version = "1", optional = true } diff --git a/substrate/client/src/networks/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs index 502bfb440..28f660536 100644 --- a/substrate/client/src/networks/bitcoin.rs +++ b/substrate/client/src/networks/bitcoin.rs @@ -1,6 +1,7 @@ use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; use bitcoin::{ hashes::{Hash as HashTrait, hash160::Hash}, @@ -10,47 +11,10 @@ use bitcoin::{ address::{AddressType, NetworkChecked, Address as BAddress}, }; -#[derive(Clone, Eq, Debug)] -pub struct Address(ScriptBuf); - -impl PartialEq for Address { - fn eq(&self, other: &Self) -> bool { - // Since Serai defines the Bitcoin-address specification as a variant of the script alone, - // define equivalency as the script alone - self.0 == other.0 - } -} - -impl From
for ScriptBuf { - fn from(addr: Address) -> ScriptBuf { - addr.0 - } -} - -impl FromStr for Address { - type Err = (); - fn from_str(str: &str) -> Result { - Address::new( - BAddress::from_str(str) - .map_err(|_| ())? - .require_network(Network::Bitcoin) - .map_err(|_| ())? - .script_pubkey(), - ) - .ok_or(()) - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - BAddress::::from_script(&self.0, Network::Bitcoin) - .map_err(|_| fmt::Error)? - .fmt(f) - } -} +use crate::primitives::ExternalAddress; -// SCALE-encoded variant of Monero addresses. -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +// SCALE-encodable representation of Bitcoin addresses, used internally. +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] enum EncodedAddress { P2PKH([u8; 20]), P2SH([u8; 20]), @@ -59,34 +23,13 @@ enum EncodedAddress { P2TR([u8; 32]), } -impl TryFrom> for Address { +impl TryFrom<&ScriptBuf> for EncodedAddress { type Error = (); - fn try_from(data: Vec) -> Result { - Ok(Address(match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { - EncodedAddress::P2PKH(hash) => { - ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2SH(hash) => { - ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2WPKH(hash) => { - ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) - } - EncodedAddress::P2WSH(hash) => { - ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) - } - EncodedAddress::P2TR(key) => { - ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap()) - } - })) - } -} - -fn try_to_vec(addr: &Address) -> Result, ()> { - let parsed_addr = - BAddress::::from_script(&addr.0, Network::Bitcoin).map_err(|_| ())?; - Ok( - (match parsed_addr.address_type() { + fn try_from(script_buf: &ScriptBuf) -> Result { + // This uses mainnet as our encodings don't specify a network. + let parsed_addr = + BAddress::::from_script(script_buf, Network::Bitcoin).map_err(|_| ())?; + Ok(match parsed_addr.address_type() { Some(AddressType::P2pkh) => { EncodedAddress::P2PKH(*parsed_addr.pubkey_hash().unwrap().as_raw_hash().as_byte_array()) } @@ -110,23 +53,119 @@ fn try_to_vec(addr: &Address) -> Result, ()> { } _ => Err(())?, }) - .encode(), - ) + } } -impl From
for Vec { - fn from(addr: Address) -> Vec { +impl From for ScriptBuf { + fn from(encoded: EncodedAddress) -> Self { + match encoded { + EncodedAddress::P2PKH(hash) => { + ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) + } + EncodedAddress::P2SH(hash) => { + ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) + } + EncodedAddress::P2WPKH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2WSH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2TR(key) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap()) + } + } + } +} + +/// A Bitcoin address usable with Serai. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Address(ScriptBuf); + +// Support consuming into the underlying ScriptBuf. +impl From
for ScriptBuf { + fn from(addr: Address) -> ScriptBuf { + addr.0 + } +} + +impl From<&Address> for BAddress { + fn from(addr: &Address) -> BAddress { + // This fails if the script doesn't have an address representation, yet all our representable + // addresses' scripts do + BAddress::::from_script(&addr.0, Network::Bitcoin).unwrap() + } +} + +// Support converting a string into an address. +impl FromStr for Address { + type Err = (); + fn from_str(str: &str) -> Result { + Address::new( + BAddress::from_str(str) + .map_err(|_| ())? + .require_network(Network::Bitcoin) + .map_err(|_| ())? + .script_pubkey(), + ) + .ok_or(()) + } +} + +// Support converting an address into a string. +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + BAddress::from(self).fmt(f) + } +} + +impl TryFrom for Address { + type Error = (); + fn try_from(data: ExternalAddress) -> Result { + // Decode as an EncodedAddress, then map to a ScriptBuf + let mut data = data.as_ref(); + let encoded = EncodedAddress::decode(&mut data).map_err(|_| ())?; + if !data.is_empty() { + Err(())? + } + Ok(Address(ScriptBuf::from(encoded))) + } +} + +impl From
for EncodedAddress { + fn from(addr: Address) -> EncodedAddress { // Safe since only encodable addresses can be created - try_to_vec(&addr).unwrap() + EncodedAddress::try_from(&addr.0).unwrap() + } +} + +impl From
for ExternalAddress { + fn from(addr: Address) -> ExternalAddress { + // Safe since all variants are fixed-length and fit into MAX_ADDRESS_LEN + ExternalAddress::new(EncodedAddress::from(addr).encode()).unwrap() + } +} + +impl BorshSerialize for Address { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + EncodedAddress::from(self.clone()).serialize(writer) + } +} + +impl BorshDeserialize for Address { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + Ok(Self(ScriptBuf::from(EncodedAddress::deserialize_reader(reader)?))) } } impl Address { - pub fn new(address: ScriptBuf) -> Option { - let res = Self(address); - if try_to_vec(&res).is_ok() { - return Some(res); + /// Create a new Address from a ScriptBuf. + pub fn new(script_buf: ScriptBuf) -> Option { + // If we can represent this Script, it's an acceptable address + if EncodedAddress::try_from(&script_buf).is_ok() { + return Some(Self(script_buf)); } + // Else, it isn't acceptable None } } diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index d2c52219e..2cf37e009 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -62,7 +62,7 @@ pub fn borsh_deserialize_bounded_vec &[u8] { - self.0.as_ref() - } - #[cfg(feature = "std")] pub fn consume(self) -> Vec { self.0.into_inner() From 526574444619b2b604016eac1cca8009d3350b83 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 10 Sep 2024 06:25:21 -0400 Subject: [PATCH 092/179] Add bitcoin Block trait impl --- processor/bitcoin/src/block.rs | 70 ++++++++++++++++ processor/bitcoin/src/lib.rs | 64 +++------------ processor/bitcoin/src/output.rs | 21 ++++- processor/bitcoin/src/scanner.rs | 131 ++++++++++++++++++++++++++++++ processor/primitives/src/block.rs | 22 ++--- 5 files changed, 239 insertions(+), 69 deletions(-) create mode 100644 processor/bitcoin/src/scanner.rs diff --git a/processor/bitcoin/src/block.rs b/processor/bitcoin/src/block.rs index e69de29bb..304f19e31 100644 --- a/processor/bitcoin/src/block.rs +++ b/processor/bitcoin/src/block.rs @@ -0,0 +1,70 @@ +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::bitcoin::block::{Header, Block as BBlock}; + +use serai_client::networks::bitcoin::Address; + +use primitives::{ReceivedOutput, EventualityTracker}; + +use crate::{hash_bytes, scanner::scanner, output::Output, transaction::Eventuality}; + +#[derive(Clone, Debug)] +pub(crate) struct BlockHeader(Header); +impl primitives::BlockHeader for BlockHeader { + fn id(&self) -> [u8; 32] { + hash_bytes(self.0.block_hash().to_raw_hash()) + } + fn parent(&self) -> [u8; 32] { + hash_bytes(self.0.prev_blockhash.to_raw_hash()) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct Block(BBlock); + +#[async_trait::async_trait] +impl primitives::Block for Block { + type Header = BlockHeader; + + type Key = ::G; + type Address = Address; + type Output = Output; + type Eventuality = Eventuality; + + fn id(&self) -> [u8; 32] { + primitives::BlockHeader::id(&BlockHeader(self.0.header)) + } + + fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { + let scanner = scanner(key); + + let mut res = vec![]; + // We skip the coinbase transaction as its burdened by maturity + for tx in &self.0.txdata[1 ..] { + for output in scanner.scan_transaction(tx) { + res.push(Output::new(key, tx, output)); + } + } + res + } + + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + > { + let mut res = HashMap::new(); + for tx in &self.0.txdata[1 ..] { + let id = hash_bytes(tx.compute_txid().to_raw_hash()); + if let Some(eventuality) = eventualities.active_eventualities.remove(id.as_slice()) { + res.insert(id, eventuality); + } + } + res + } +} diff --git a/processor/bitcoin/src/lib.rs b/processor/bitcoin/src/lib.rs index 112d8fd35..03c9e9031 100644 --- a/processor/bitcoin/src/lib.rs +++ b/processor/bitcoin/src/lib.rs @@ -6,8 +6,19 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); +mod scanner; + mod output; mod transaction; +mod block; + +pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> [u8; 32] { + use bitcoin_serai::bitcoin::hashes::Hash; + + let mut res = hash.to_byte_array(); + res.reverse(); + res +} /* use std::{sync::LazyLock, time::Duration, io, collections::HashMap}; @@ -299,59 +310,6 @@ impl Bitcoin { } } - // Expected script has to start with SHA256 PUSH MSG_HASH OP_EQUALVERIFY .. - fn segwit_data_pattern(script: &ScriptBuf) -> Option { - let mut ins = script.instructions(); - - // first item should be SHA256 code - if ins.next()?.ok()?.opcode()? != OP_SHA256 { - return Some(false); - } - - // next should be a data push - ins.next()?.ok()?.push_bytes()?; - - // next should be a equality check - if ins.next()?.ok()?.opcode()? != OP_EQUALVERIFY { - return Some(false); - } - - Some(true) - } - - fn extract_serai_data(tx: &Transaction) -> Vec { - // check outputs - let mut data = (|| { - for output in &tx.output { - if output.script_pubkey.is_op_return() { - match output.script_pubkey.instructions_minimal().last() { - Some(Ok(Instruction::PushBytes(data))) => return data.as_bytes().to_vec(), - _ => continue, - } - } - } - vec![] - })(); - - // check inputs - if data.is_empty() { - for input in &tx.input { - let witness = input.witness.to_vec(); - // expected witness at least has to have 2 items, msg and the redeem script. - if witness.len() >= 2 { - let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); - if Self::segwit_data_pattern(&redeem_script) == Some(true) { - data.clone_from(&witness[witness.len() - 2]); // len() - 1 is the redeem_script - break; - } - } - } - } - - data.truncate(MAX_DATA_LEN.try_into().unwrap()); - data - } - #[cfg(test)] pub fn sign_btc_input_for_p2pkh( tx: &Transaction, diff --git a/processor/bitcoin/src/output.rs b/processor/bitcoin/src/output.rs index cc6243192..c7ed060fb 100644 --- a/processor/bitcoin/src/output.rs +++ b/processor/bitcoin/src/output.rs @@ -8,6 +8,7 @@ use bitcoin_serai::{ key::{Parity, XOnlyPublicKey}, consensus::Encodable, script::Instruction, + transaction::Transaction, }, wallet::ReceivedOutput as WalletOutput, }; @@ -22,6 +23,8 @@ use serai_client::{ use primitives::{OutputType, ReceivedOutput}; +use crate::scanner::{offsets_for_key, presumed_origin, extract_serai_data}; + #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] pub(crate) struct OutputId([u8; 36]); impl Default for OutputId { @@ -48,6 +51,20 @@ pub(crate) struct Output { data: Vec, } +impl Output { + pub fn new(key: ::G, tx: &Transaction, output: WalletOutput) -> Self { + Self { + kind: offsets_for_key(key) + .into_iter() + .find_map(|(kind, offset)| (offset == output.offset()).then_some(kind)) + .expect("scanned output for unknown offset"), + presumed_origin: presumed_origin(tx), + output, + data: extract_serai_data(tx), + } + } +} + impl ReceivedOutput<::G, Address> for Output { type Id = OutputId; type TransactionId = [u8; 32]; @@ -63,7 +80,9 @@ impl ReceivedOutput<::G, Address> for Output { } fn transaction_id(&self) -> Self::TransactionId { - self.output.outpoint().txid.to_raw_hash().to_byte_array() + let mut res = self.output.outpoint().txid.to_raw_hash().to_byte_array(); + res.reverse(); + res } fn key(&self) -> ::G { diff --git a/processor/bitcoin/src/scanner.rs b/processor/bitcoin/src/scanner.rs new file mode 100644 index 000000000..43518b57f --- /dev/null +++ b/processor/bitcoin/src/scanner.rs @@ -0,0 +1,131 @@ +use std::{sync::LazyLock, collections::HashMap}; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::{ + blockdata::opcodes, + script::{Instruction, ScriptBuf}, + Transaction, + }, + wallet::Scanner, +}; + +use serai_client::networks::bitcoin::Address; + +use primitives::OutputType; + +const KEY_DST: &[u8] = b"Serai Bitcoin Processor Key Offset"; +static BRANCH_BASE_OFFSET: LazyLock<::F> = + LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"branch")); +static CHANGE_BASE_OFFSET: LazyLock<::F> = + LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"change")); +static FORWARD_BASE_OFFSET: LazyLock<::F> = + LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"forward")); + +// Unfortunately, we have per-key offsets as it's the root key plus the base offset may not be +// even. While we could tweak the key until all derivations are even, that'd require significantly +// more tweaking. This algorithmic complexity is preferred. +pub(crate) fn offsets_for_key( + key: ::G, +) -> HashMap::F> { + let mut offsets = HashMap::from([(OutputType::External, ::F::ZERO)]); + + // We create an actual Bitcoin scanner as upon adding an offset, it yields the tweaked offset + // actually used + let mut scanner = Scanner::new(key).unwrap(); + let mut register = |kind, offset| { + let tweaked_offset = scanner.register_offset(offset).expect("offset collision"); + offsets.insert(kind, tweaked_offset); + }; + + register(OutputType::Branch, *BRANCH_BASE_OFFSET); + register(OutputType::Change, *CHANGE_BASE_OFFSET); + register(OutputType::Forwarded, *FORWARD_BASE_OFFSET); + + offsets +} + +pub(crate) fn scanner(key: ::G) -> Scanner { + let mut scanner = Scanner::new(key).unwrap(); + for (_, offset) in offsets_for_key(key) { + let tweaked_offset = scanner.register_offset(offset).unwrap(); + assert_eq!(tweaked_offset, offset); + } + scanner +} + +pub(crate) fn presumed_origin(tx: &Transaction) -> Option
{ + todo!("TODO") + + /* + let spent_output = { + let input = &tx.input[0]; + let mut spent_tx = input.previous_output.txid.as_raw_hash().to_byte_array(); + spent_tx.reverse(); + let mut tx; + while { + tx = self.rpc.get_transaction(&spent_tx).await; + tx.is_err() + } { + log::error!("couldn't get transaction from bitcoin node: {tx:?}"); + sleep(Duration::from_secs(5)).await; + } + tx.unwrap().output.swap_remove(usize::try_from(input.previous_output.vout).unwrap()) + }; + Address::new(spent_output.script_pubkey) + */ +} + +// Checks if this script matches SHA256 PUSH MSG_HASH OP_EQUALVERIFY .. +fn matches_segwit_data(script: &ScriptBuf) -> Option { + let mut ins = script.instructions(); + + // first item should be SHA256 code + if ins.next()?.ok()?.opcode()? != opcodes::all::OP_SHA256 { + return Some(false); + } + + // next should be a data push + ins.next()?.ok()?.push_bytes()?; + + // next should be a equality check + if ins.next()?.ok()?.opcode()? != opcodes::all::OP_EQUALVERIFY { + return Some(false); + } + + Some(true) +} + +// Extract the data for Serai from a transaction +pub(crate) fn extract_serai_data(tx: &Transaction) -> Vec { + // Check for an OP_RETURN output + let mut data = (|| { + for output in &tx.output { + if output.script_pubkey.is_op_return() { + match output.script_pubkey.instructions_minimal().last() { + Some(Ok(Instruction::PushBytes(data))) => return Some(data.as_bytes().to_vec()), + _ => continue, + } + } + } + None + })(); + + // Check the inputs + if data.is_none() { + for input in &tx.input { + let witness = input.witness.to_vec(); + // The witness has to have at least 2 items, msg and the redeem script + if witness.len() >= 2 { + let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); + if matches_segwit_data(&redeem_script) == Some(true) { + data = Some(witness[witness.len() - 2].clone()); // len() - 1 is the redeem_script + break; + } + } + } + } + + data.unwrap_or(vec![]) +} diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs index 89dff54ff..4f721d027 100644 --- a/processor/primitives/src/block.rs +++ b/processor/primitives/src/block.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use group::{Group, GroupEncoding}; -use crate::{Id, Address, ReceivedOutput, Eventuality, EventualityTracker}; +use crate::{Address, ReceivedOutput, Eventuality, EventualityTracker}; /// A block header from an external network. pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { @@ -16,12 +16,6 @@ pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { fn parent(&self) -> [u8; 32]; } -/// A transaction from an external network. -pub trait Transaction: Send + Sync + Sized { - /// The type used to identify transactions on this external network. - type Id: Id; -} - /// A block from an external network. /// /// A block is defined as a consensus event associated with a set of transactions. It is not @@ -37,14 +31,8 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { type Key: Group + GroupEncoding; /// The type used to represent addresses on this external network. type Address: Address; - /// The type used to represent transactions on this external network. - type Transaction: Transaction; /// The type used to represent received outputs on this external network. - type Output: ReceivedOutput< - Self::Key, - Self::Address, - TransactionId = ::Id, - >; + type Output: ReceivedOutput; /// The type used to represent an Eventuality for a transaction on this external network. type Eventuality: Eventuality< OutputId = >::Id, @@ -64,8 +52,12 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { /// /// Returns tbe resolved Eventualities, indexed by the ID of the transactions which resolved /// them. + #[allow(clippy::type_complexity)] fn check_for_eventuality_resolutions( &self, eventualities: &mut EventualityTracker, - ) -> HashMap<::Id, Self::Eventuality>; + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + >; } From 3c331a3a4b0fed792389c7c7274c552ff712c8e0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 10 Sep 2024 06:40:41 -0400 Subject: [PATCH 093/179] Remove bound that WINDOW_LENGTH < CONFIRMATIONS It's unnecessary and not valuable. --- processor/scanner/src/db.rs | 2 +- processor/scanner/src/eventuality/mod.rs | 5 ++--- processor/scanner/src/lib.rs | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index ef37ef383..5fcdc160d 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -120,7 +120,7 @@ impl ScannerGlobalDb { /// A new key MUST NOT be queued to activate a block preceding the finishing of the key prior to /// its prior. There MUST only be two keys active at one time. /// - /// activation_block_number is inclusive, so the key will be scanned for starting at the + /// `activation_block_number` is inclusive, so the key will be scanned for starting at the /// specified block. pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: KeyFor) { // Set the block which has a key activate as notable diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index be5b45554..5d139c6de 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -123,8 +123,8 @@ impl> EventualityTask { block_number: u64, ) -> (Vec>>, Vec<(KeyFor, LifetimeStage)>) { /* - This is proper as the keys for the next-to-scan block (at most `WINDOW_LENGTH` ahead, - which is `<= CONFIRMATIONS`) will be the keys to use here, with only minor edge cases. + This is proper as the keys for the next-to-scan block (at most `WINDOW_LENGTH` ahead) will be + the keys to use here, with only minor edge cases. This may include a key which has yet to activate by our perception. We can simply drop those. @@ -136,7 +136,6 @@ impl> EventualityTask { This also may include a key we've retired which has yet to officially retire. That's fine as we'll do nothing with it, and the Scheduler traits document this behavior. */ - assert!(S::WINDOW_LENGTH <= S::CONFIRMATIONS); let mut keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) .expect("scanning for a blockchain without any keys set"); // Since the next-to-scan block is ahead of us, drop keys which have yet to actually activate diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 9831d41a0..2c56db35a 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -71,8 +71,8 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// The amount of blocks to process in parallel. /// - /// This must be at least `1`. This must be less than or equal to `CONFIRMATIONS`. This value - /// should be the worst-case latency to handle a block divided by the expected block time. + /// This must be at least `1`. This value should be the worst-case latency to handle a block + /// divided by the expected block time. const WINDOW_LENGTH: u64; /// The amount of blocks which will occur in 10 minutes (approximate). From eadf2bb871ee0db59b7b6df934ce21487d3f85da Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 10 Sep 2024 07:07:09 -0400 Subject: [PATCH 094/179] Bitcoin ScannerFeed --- Cargo.lock | 1 + processor/bitcoin/Cargo.toml | 1 + processor/bitcoin/src/block.rs | 6 +- processor/bitcoin/src/lib.rs | 9 ++- processor/bitcoin/src/output.rs | 2 +- processor/bitcoin/src/{scanner.rs => scan.rs} | 0 processor/bitcoin/src/scanner_feed.rs | 62 +++++++++++++++++++ processor/scanner/src/lib.rs | 10 ++- 8 files changed, 84 insertions(+), 7 deletions(-) rename processor/bitcoin/src/{scanner.rs => scan.rs} (100%) create mode 100644 processor/bitcoin/src/scanner_feed.rs diff --git a/Cargo.lock b/Cargo.lock index ee8c8a998..b35cda503 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8141,6 +8141,7 @@ dependencies = [ "serai-message-queue", "serai-processor-messages", "serai-processor-primitives", + "serai-processor-scanner", "serai-processor-scheduler-primitives", "tokio", "zalloc", diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index 656c7c40c..ff14890e2 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -44,6 +44,7 @@ messages = { package = "serai-processor-messages", path = "../messages" } primitives = { package = "serai-processor-primitives", path = "../primitives" } scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } message-queue = { package = "serai-message-queue", path = "../../message-queue" } diff --git a/processor/bitcoin/src/block.rs b/processor/bitcoin/src/block.rs index 304f19e31..24cccec9b 100644 --- a/processor/bitcoin/src/block.rs +++ b/processor/bitcoin/src/block.rs @@ -8,10 +8,10 @@ use serai_client::networks::bitcoin::Address; use primitives::{ReceivedOutput, EventualityTracker}; -use crate::{hash_bytes, scanner::scanner, output::Output, transaction::Eventuality}; +use crate::{hash_bytes, scan::scanner, output::Output, transaction::Eventuality}; #[derive(Clone, Debug)] -pub(crate) struct BlockHeader(Header); +pub(crate) struct BlockHeader(pub(crate) Header); impl primitives::BlockHeader for BlockHeader { fn id(&self) -> [u8; 32] { hash_bytes(self.0.block_hash().to_raw_hash()) @@ -22,7 +22,7 @@ impl primitives::BlockHeader for BlockHeader { } #[derive(Clone, Debug)] -pub(crate) struct Block(BBlock); +pub(crate) struct Block(pub(crate) BBlock); #[async_trait::async_trait] impl primitives::Block for Block { diff --git a/processor/bitcoin/src/lib.rs b/processor/bitcoin/src/lib.rs index 03c9e9031..bba8629e8 100644 --- a/processor/bitcoin/src/lib.rs +++ b/processor/bitcoin/src/lib.rs @@ -6,12 +6,19 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); -mod scanner; +// Internal utilities for scanning transactions +mod scan; +// Output trait satisfaction mod output; +// Transaction/SignableTransaction/Eventuality trait satisfaction mod transaction; +// Block trait satisfaction mod block; +// ScannerFeed trait satisfaction +mod scanner_feed; + pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> [u8; 32] { use bitcoin_serai::bitcoin::hashes::Hash; diff --git a/processor/bitcoin/src/output.rs b/processor/bitcoin/src/output.rs index c7ed060fb..a783792dd 100644 --- a/processor/bitcoin/src/output.rs +++ b/processor/bitcoin/src/output.rs @@ -23,7 +23,7 @@ use serai_client::{ use primitives::{OutputType, ReceivedOutput}; -use crate::scanner::{offsets_for_key, presumed_origin, extract_serai_data}; +use crate::scan::{offsets_for_key, presumed_origin, extract_serai_data}; #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] pub(crate) struct OutputId([u8; 36]); diff --git a/processor/bitcoin/src/scanner.rs b/processor/bitcoin/src/scan.rs similarity index 100% rename from processor/bitcoin/src/scanner.rs rename to processor/bitcoin/src/scan.rs diff --git a/processor/bitcoin/src/scanner_feed.rs b/processor/bitcoin/src/scanner_feed.rs new file mode 100644 index 000000000..73265bfed --- /dev/null +++ b/processor/bitcoin/src/scanner_feed.rs @@ -0,0 +1,62 @@ +use bitcoin_serai::rpc::{RpcError, Rpc as BRpc}; + +use serai_client::primitives::{NetworkId, Coin, Amount}; + +use scanner::ScannerFeed; + +use crate::block::{BlockHeader, Block}; + +#[derive(Clone)] +pub(crate) struct Rpc(BRpc); + +#[async_trait::async_trait] +impl ScannerFeed for Rpc { + const NETWORK: NetworkId = NetworkId::Bitcoin; + const CONFIRMATIONS: u64 = 6; + const WINDOW_LENGTH: u64 = 6; + + const TEN_MINUTES: u64 = 1; + + type Block = Block; + + type EphemeralError = RpcError; + + async fn latest_finalized_block_number(&self) -> Result { + u64::try_from(self.0.get_latest_block_number().await?) + .unwrap() + .checked_sub(Self::CONFIRMATIONS) + .ok_or(RpcError::ConnectionError) + } + + async fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> Result<::Header, Self::EphemeralError> { + Ok(BlockHeader( + self.0.get_block(&self.0.get_block_hash(number.try_into().unwrap()).await?).await?.header, + )) + } + + async fn unchecked_block_by_number( + &self, + number: u64, + ) -> Result { + Ok(Block(self.0.get_block(&self.0.get_block_hash(number.try_into().unwrap()).await?).await?)) + } + + fn dust(coin: Coin) -> Amount { + assert_eq!(coin, Coin::Bitcoin); + // 10,000 satoshis, or $5 if 1 BTC = 50,000 USD + Amount(10_000) + } + + async fn cost_to_aggregate( + &self, + coin: Coin, + _reference_block: &Self::Block, + ) -> Result { + assert_eq!(coin, Coin::Bitcoin); + // TODO + Ok(Amount(0)) + } +} diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 2c56db35a..4f30f5e71 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -71,8 +71,14 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// The amount of blocks to process in parallel. /// - /// This must be at least `1`. This value should be the worst-case latency to handle a block - /// divided by the expected block time. + /// This must be at least `1`. This value MUST be at least the worst-case latency to publish a + /// Batch for a block divided by the expected block time. Setting this value too low will risk a + /// backlog forming. Setting this value too high will only delay key rotation and forwarded + /// outputs. + // The latency to publish a Batch for a block is the latency of a provided transaction + // (1 minute), the latency of a signing protocol (1 minute), the latency of Serai to finalize a + // block (1 minute), and the latency to cosign such a block (5 minutes for the cosign distance + // plus 1 minute). Accordingly, this should be at least ~30 minutes, ideally 60 minutes. const WINDOW_LENGTH: u64; /// The amount of blocks which will occur in 10 minutes (approximate). From d3c4e0195c356d85b6520902d4086412eea1d815 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 00:01:40 -0400 Subject: [PATCH 095/179] Satisfy Scheduler for Bitcoin --- Cargo.lock | 3 +- networks/bitcoin/src/wallet/send.rs | 14 +- networks/bitcoin/tests/wallet.rs | 6 +- processor/bitcoin/Cargo.toml | 2 + processor/bitcoin/src/lib.rs | 334 +----------------- processor/bitcoin/src/output.rs | 2 +- processor/bitcoin/src/scanner_feed.rs | 34 +- processor/bitcoin/src/scheduler.rs | 177 ++++++++++ processor/bitcoin/src/transaction.rs | 19 +- .../scheduler/utxo/primitives/Cargo.toml | 2 - .../scheduler/utxo/primitives/src/lib.rs | 1 - processor/scheduler/utxo/standard/src/lib.rs | 3 + .../utxo/transaction-chaining/src/lib.rs | 5 +- 13 files changed, 245 insertions(+), 357 deletions(-) create mode 100644 processor/bitcoin/src/scheduler.rs diff --git a/Cargo.lock b/Cargo.lock index b35cda503..7ae3a0f25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8143,6 +8143,8 @@ dependencies = [ "serai-processor-primitives", "serai-processor-scanner", "serai-processor-scheduler-primitives", + "serai-processor-transaction-chaining-scheduler", + "serai-processor-utxo-scheduler-primitives", "tokio", "zalloc", ] @@ -8809,7 +8811,6 @@ dependencies = [ name = "serai-processor-utxo-scheduler-primitives" version = "0.1.0" dependencies = [ - "async-trait", "borsh", "serai-primitives", "serai-processor-primitives", diff --git a/networks/bitcoin/src/wallet/send.rs b/networks/bitcoin/src/wallet/send.rs index ccb020b21..276f536ed 100644 --- a/networks/bitcoin/src/wallet/send.rs +++ b/networks/bitcoin/src/wallet/send.rs @@ -44,7 +44,7 @@ pub enum TransactionError { #[error("fee was too low to pass the default minimum fee rate")] TooLowFee, #[error("not enough funds for these payments")] - NotEnoughFunds, + NotEnoughFunds { inputs: u64, payments: u64, fee: u64 }, #[error("transaction was too large")] TooLargeTransaction, } @@ -213,7 +213,11 @@ impl SignableTransaction { } if input_sat < (payment_sat + needed_fee) { - Err(TransactionError::NotEnoughFunds)?; + Err(TransactionError::NotEnoughFunds { + inputs: input_sat, + payments: payment_sat, + fee: needed_fee, + })?; } // If there's a change address, check if there's change to give it @@ -258,9 +262,9 @@ impl SignableTransaction { res } - /// Returns the outputs this transaction will create. - pub fn outputs(&self) -> &[TxOut] { - &self.tx.output + /// Returns the transaction, sans witness, this will create if signed. + pub fn transaction(&self) -> &Transaction { + &self.tx } /// Create a multisig machine for this transaction. diff --git a/networks/bitcoin/tests/wallet.rs b/networks/bitcoin/tests/wallet.rs index a290122b4..45371414b 100644 --- a/networks/bitcoin/tests/wallet.rs +++ b/networks/bitcoin/tests/wallet.rs @@ -195,10 +195,10 @@ async_sequential! { Err(TransactionError::TooLowFee), ); - assert_eq!( + assert!(matches!( SignableTransaction::new(inputs.clone(), &[(addr(), inputs[0].value() * 2)], None, None, FEE), - Err(TransactionError::NotEnoughFunds), - ); + Err(TransactionError::NotEnoughFunds { .. }), + )); assert_eq!( SignableTransaction::new(inputs, &vec![(addr(), 1000); 10000], None, None, FEE), diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index ff14890e2..91813bac9 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -45,6 +45,8 @@ messages = { package = "serai-processor-messages", path = "../messages" } primitives = { package = "serai-processor-primitives", path = "../primitives" } scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } scanner = { package = "serai-processor-scanner", path = "../scanner" } +utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" } +transaction-chaining-scheduler = { package = "serai-processor-transaction-chaining-scheduler", path = "../scheduler/utxo/transaction-chaining" } message-queue = { package = "serai-message-queue", path = "../../message-queue" } diff --git a/processor/bitcoin/src/lib.rs b/processor/bitcoin/src/lib.rs index bba8629e8..cbf650932 100644 --- a/processor/bitcoin/src/lib.rs +++ b/processor/bitcoin/src/lib.rs @@ -9,15 +9,14 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = // Internal utilities for scanning transactions mod scan; -// Output trait satisfaction +// Primitive trait satisfactions mod output; -// Transaction/SignableTransaction/Eventuality trait satisfaction mod transaction; -// Block trait satisfaction mod block; -// ScannerFeed trait satisfaction +// App-logic trait satisfactions mod scanner_feed; +mod scheduler; pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> [u8; 32] { use bitcoin_serai::bitcoin::hashes::Hash; @@ -28,21 +27,6 @@ pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> } /* -use std::{sync::LazyLock, time::Duration, io, collections::HashMap}; - -use async_trait::async_trait; - -use scale::{Encode, Decode}; - -use ciphersuite::group::ff::PrimeField; -use k256::{ProjectivePoint, Scalar}; -use frost::{ - curve::{Curve, Secp256k1}, - ThresholdKeys, -}; - -use tokio::time::sleep; - use bitcoin_serai::{ bitcoin::{ hashes::Hash as HashTrait, @@ -111,19 +95,6 @@ impl TransactionTrait for Transaction { #[async_trait] impl BlockTrait for Block { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - let mut hash = *self.block_hash().as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - fn parent(&self) -> Self::Id { - let mut hash = *self.header.prev_blockhash.as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - async fn time(&self, rpc: &Bitcoin) -> u64 { // Use the network median time defined in BIP-0113 since the in-block time isn't guaranteed to // be monotonic @@ -152,51 +123,6 @@ impl BlockTrait for Block { } } -const KEY_DST: &[u8] = b"Serai Bitcoin Output Offset"; -static BRANCH_OFFSET: OnceLock = OnceLock::new(); -static CHANGE_OFFSET: OnceLock = OnceLock::new(); -static FORWARD_OFFSET: OnceLock = OnceLock::new(); - -// Always construct the full scanner in order to ensure there's no collisions -fn scanner( - key: ProjectivePoint, -) -> (Scanner, HashMap, HashMap, OutputType>) { - let mut scanner = Scanner::new(key).unwrap(); - let mut offsets = HashMap::from([(OutputType::External, Scalar::ZERO)]); - - let zero = Scalar::ZERO.to_repr(); - let zero_ref: &[u8] = zero.as_ref(); - let mut kinds = HashMap::from([(zero_ref.to_vec(), OutputType::External)]); - - let mut register = |kind, offset| { - let offset = scanner.register_offset(offset).expect("offset collision"); - offsets.insert(kind, offset); - - let offset = offset.to_repr(); - let offset_ref: &[u8] = offset.as_ref(); - kinds.insert(offset_ref.to_vec(), kind); - }; - - register( - OutputType::Branch, - *BRANCH_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"branch")), - ); - register( - OutputType::Change, - *CHANGE_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"change")), - ); - register( - OutputType::Forwarded, - *FORWARD_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"forward")), - ); - - (scanner, offsets, kinds) -} - -#[derive(Clone, Debug)] -pub struct Bitcoin { - pub(crate) rpc: Rpc, -} // Shim required for testing/debugging purposes due to generic arguments also necessitating trait // bounds impl PartialEq for Bitcoin { @@ -355,20 +281,6 @@ impl Bitcoin { } } -// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) -// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes -// While our inputs are entirely SegWit, such fine tuning is not necessary and could create -// issues in the future (if the size decreases or we misevaluate it) -// It also offers a minimal amount of benefit when we are able to logarithmically accumulate -// inputs -// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and -// 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 -// bytes -// 100,000 / 192 = 520 -// 520 * 192 leaves 160 bytes of overhead for the transaction structure itself -const MAX_INPUTS: usize = 520; -const MAX_OUTPUTS: usize = 520; - fn address_from_key(key: ProjectivePoint) -> Address { Address::new( p2tr_script_buf(key).expect("creating address from key which isn't properly tweaked"), @@ -378,59 +290,8 @@ fn address_from_key(key: ProjectivePoint) -> Address { #[async_trait] impl Network for Bitcoin { - type Curve = Secp256k1; - - type Transaction = Transaction; - type Block = Block; - - type Output = Output; - type SignableTransaction = SignableTransaction; - type Eventuality = Eventuality; - type TransactionMachine = TransactionMachine; - type Scheduler = Scheduler; - type Address = Address; - - const NETWORK: NetworkId = NetworkId::Bitcoin; - const ID: &'static str = "Bitcoin"; - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 600; - const CONFIRMATIONS: usize = 6; - - /* - A Taproot input is: - - 36 bytes for the OutPoint - - 0 bytes for the script (+1 byte for the length) - - 4 bytes for the sequence - Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format - - There's also: - - 1 byte for the witness length - - 1 byte for the signature length - - 64 bytes for the signature - which have the SegWit discount. - - (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units - 230 ceil div 4 = 57 vbytes - - Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are: - - 1000 sat/kilo-vbyte for a transaction to be relayed - - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte - The DUST constant needs to be determined by the latter. - Since these are solely relay rules, and may be raised, we require all outputs be spendable - under a 5000 sat/kilo-vbyte fee rate. - - 5000 sat/kilo-vbyte = 5 sat/vbyte - 5 * 57 = 285 sats/spent-output - - Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding - 100 vbytes, tripling the transaction size, then the sats/tx would be < 1000. - - Increase by an order of magnitude, in order to ensure this is actually worth our time, and we - get 10,000 satoshis. - */ - const DUST: u64 = 10_000; - // 2 inputs should be 2 * 230 = 460 weight units // The output should be ~36 bytes, or 144 weight units // The overhead should be ~20 bytes at most, or 80 weight units @@ -467,195 +328,6 @@ impl Network for Bitcoin { Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded]))) } - async fn get_latest_block_number(&self) -> Result { - self.rpc.get_latest_block_number().await.map_err(|_| NetworkError::ConnectionError) - } - - async fn get_block(&self, number: usize) -> Result { - let block_hash = - self.rpc.get_block_hash(number).await.map_err(|_| NetworkError::ConnectionError)?; - self.rpc.get_block(&block_hash).await.map_err(|_| NetworkError::ConnectionError) - } - - async fn get_outputs(&self, block: &Self::Block, key: ProjectivePoint) -> Vec { - let (scanner, _, kinds) = scanner(key); - - let mut outputs = vec![]; - // Skip the coinbase transaction which is burdened by maturity - for tx in &block.txdata[1 ..] { - for output in scanner.scan_transaction(tx) { - let offset_repr = output.offset().to_repr(); - let offset_repr_ref: &[u8] = offset_repr.as_ref(); - let kind = kinds[offset_repr_ref]; - - let output = Output { kind, presumed_origin: None, output, data: vec![] }; - assert_eq!(output.tx_id(), tx.id()); - outputs.push(output); - } - - if outputs.is_empty() { - continue; - } - - // populate the outputs with the origin and data - let presumed_origin = { - // This may identify the P2WSH output *embedding the InInstruction* as the origin, which - // would be a bit trickier to spend that a traditional output... - // There's no risk of the InInstruction going missing as it'd already be on-chain though - // We *could* parse out the script *without the InInstruction prefix* and declare that the - // origin - // TODO - let spent_output = { - let input = &tx.input[0]; - let mut spent_tx = input.previous_output.txid.as_raw_hash().to_byte_array(); - spent_tx.reverse(); - let mut tx; - while { - tx = self.rpc.get_transaction(&spent_tx).await; - tx.is_err() - } { - log::error!("couldn't get transaction from bitcoin node: {tx:?}"); - sleep(Duration::from_secs(5)).await; - } - tx.unwrap().output.swap_remove(usize::try_from(input.previous_output.vout).unwrap()) - }; - Address::new(spent_output.script_pubkey) - }; - let data = Self::extract_serai_data(tx); - for output in &mut outputs { - if output.kind == OutputType::External { - output.data.clone_from(&data); - } - output.presumed_origin.clone_from(&presumed_origin); - } - } - - outputs - } - - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Self::Block, - ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { - let mut res = HashMap::new(); - if eventualities.map.is_empty() { - return res; - } - - fn check_block( - eventualities: &mut EventualitiesTracker, - block: &Block, - res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, - ) { - for tx in &block.txdata[1 ..] { - if let Some((plan, _)) = eventualities.map.remove(tx.id().as_slice()) { - res.insert(plan, (eventualities.block_number, tx.id(), tx.clone())); - } - } - - eventualities.block_number += 1; - } - - let this_block_hash = block.id(); - let this_block_num = (async { - loop { - match self.rpc.get_block_number(&this_block_hash).await { - Ok(number) => return number, - Err(e) => { - log::error!("couldn't get the block number for {}: {}", hex::encode(this_block_hash), e) - } - } - sleep(Duration::from_secs(60)).await; - } - }) - .await; - - for block_num in (eventualities.block_number + 1) .. this_block_num { - let block = { - let mut block; - while { - block = self.get_block(block_num).await; - block.is_err() - } { - log::error!("couldn't get block {}: {}", block_num, block.err().unwrap()); - sleep(Duration::from_secs(60)).await; - } - block.unwrap() - }; - - check_block(eventualities, &block, &mut res); - } - - // Also check the current block - check_block(eventualities, block, &mut res); - assert_eq!(eventualities.block_number, this_block_num); - - res - } - - async fn needed_fee( - &self, - block_number: usize, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - ) -> Result, NetworkError> { - Ok( - self - .make_signable_transaction(block_number, inputs, payments, change, true) - .await? - .map(|signable| signable.needed_fee()), - ) - } - - async fn signable_transaction( - &self, - block_number: usize, - _plan_id: &[u8; 32], - _key: ProjectivePoint, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - (): &(), - ) -> Result, NetworkError> { - Ok(self.make_signable_transaction(block_number, inputs, payments, change, false).await?.map( - |signable| { - let eventuality = Eventuality(signable.txid()); - (SignableTransaction { actual: signable }, eventuality) - }, - )) - } - - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: Self::SignableTransaction, - ) -> Result { - Ok(transaction.actual.clone().multisig(&keys).expect("used the wrong keys")) - } - - async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { - match self.rpc.send_raw_transaction(tx).await { - Ok(_) => (), - Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, - // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs - // invalid transaction - Err(e) => panic!("failed to publish TX {}: {e}", tx.compute_txid()), - } - Ok(()) - } - - async fn confirm_completion( - &self, - eventuality: &Self::Eventuality, - _: &EmptyClaim, - ) -> Result, NetworkError> { - Ok(Some( - self.rpc.get_transaction(&eventuality.0).await.map_err(|_| NetworkError::ConnectionError)?, - )) - } - #[cfg(test)] async fn get_block_number(&self, id: &[u8; 32]) -> usize { self.rpc.get_block_number(id).await.unwrap() diff --git a/processor/bitcoin/src/output.rs b/processor/bitcoin/src/output.rs index a783792dd..dc5413504 100644 --- a/processor/bitcoin/src/output.rs +++ b/processor/bitcoin/src/output.rs @@ -47,7 +47,7 @@ impl AsMut<[u8]> for OutputId { pub(crate) struct Output { kind: OutputType, presumed_origin: Option
, - output: WalletOutput, + pub(crate) output: WalletOutput, data: Vec, } diff --git a/processor/bitcoin/src/scanner_feed.rs b/processor/bitcoin/src/scanner_feed.rs index 73265bfed..5a3c491cc 100644 --- a/processor/bitcoin/src/scanner_feed.rs +++ b/processor/bitcoin/src/scanner_feed.rs @@ -46,7 +46,39 @@ impl ScannerFeed for Rpc { fn dust(coin: Coin) -> Amount { assert_eq!(coin, Coin::Bitcoin); - // 10,000 satoshis, or $5 if 1 BTC = 50,000 USD + + /* + A Taproot input is: + - 36 bytes for the OutPoint + - 0 bytes for the script (+1 byte for the length) + - 4 bytes for the sequence + Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format + + There's also: + - 1 byte for the witness length + - 1 byte for the signature length + - 64 bytes for the signature + which have the SegWit discount. + + (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units + 230 ceil div 4 = 57 vbytes + + Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are: + - 1000 sat/kilo-vbyte for a transaction to be relayed + - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte + The DUST constant needs to be determined by the latter. + Since these are solely relay rules, and may be raised, we require all outputs be spendable + under a 5000 sat/kilo-vbyte fee rate. + + 5000 sat/kilo-vbyte = 5 sat/vbyte + 5 * 57 = 285 sats/spent-output + + Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding + 100 vbytes, tripling the transaction size, then the sats/tx would be < 1000. + + Increase by an order of magnitude, in order to ensure this is actually worth our time, and we + get 10,000 satoshis. This is $5 if 1 BTC = 50,000 USD. + */ Amount(10_000) } diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs new file mode 100644 index 000000000..0c1debdb7 --- /dev/null +++ b/processor/bitcoin/src/scheduler.rs @@ -0,0 +1,177 @@ +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::ScriptBuf, + wallet::{TransactionError, SignableTransaction as BSignableTransaction, p2tr_script_buf}, +}; + +use serai_client::{ + primitives::{Coin, Amount}, + networks::bitcoin::Address, +}; + +use primitives::{OutputType, ReceivedOutput, Payment}; +use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; +use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; +use transaction_chaining_scheduler::{EffectedReceivedOutputs, Scheduler as GenericScheduler}; + +use crate::{ + scan::{offsets_for_key, scanner}, + output::Output, + transaction::{SignableTransaction, Eventuality}, + scanner_feed::Rpc, +}; + +fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { + let offset = ::G::GENERATOR * offsets_for_key(key)[&kind]; + Address::new( + p2tr_script_buf(key + offset) + .expect("creating address from Serai key which wasn't properly tweaked"), + ) + .expect("couldn't create Serai-representable address for P2TR script") +} + +fn signable_transaction( + fee_per_vbyte: u64, + inputs: Vec>, + payments: Vec>>, + change: Option>, +) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> { + assert!(inputs.len() < Planner::MAX_INPUTS); + assert!((payments.len() + usize::from(u8::from(change.is_some()))) < Planner::MAX_OUTPUTS); + + let inputs = inputs.into_iter().map(|input| input.output).collect::>(); + let payments = payments + .into_iter() + .map(|payment| { + (payment.address().clone(), { + let balance = payment.balance(); + assert_eq!(balance.coin, Coin::Bitcoin); + balance.amount.0 + }) + }) + .collect::>(); + let change = change.map(Planner::change_address); + + // TODO: ACP output + BSignableTransaction::new( + inputs.clone(), + &payments + .iter() + .cloned() + .map(|(address, amount)| (ScriptBuf::from(address), amount)) + .collect::>(), + change.clone().map(ScriptBuf::from), + None, + fee_per_vbyte, + ) + .map(|bst| (SignableTransaction { inputs, payments, change, fee_per_vbyte }, bst)) +} + +pub(crate) struct Planner; +impl TransactionPlanner> for Planner { + type FeeRate = u64; + + type SignableTransaction = SignableTransaction; + + /* + Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT). + + A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes. While + our inputs are entirely SegWit, such fine tuning is not necessary and could create issues in + the future (if the size decreases or we misevaluate it). It also offers a minimal amount of + benefit when we are able to logarithmically accumulate inputs/fulfill payments. + + For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and + 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 + bytes. + + 100,000 / 192 = 520 + 520 * 192 leaves 160 bytes of overhead for the transaction structure itself. + */ + const MAX_INPUTS: usize = 520; + // We always reserve one output to create an anyone-can-spend output enabling anyone to use CPFP + // to unstick any transactions which had too low of a fee. + const MAX_OUTPUTS: usize = 519; + + fn fee_rate(block: &BlockFor, coin: Coin) -> Self::FeeRate { + assert_eq!(coin, Coin::Bitcoin); + // TODO + 1 + } + + fn branch_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Branch) + } + fn change_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Change) + } + fn forwarding_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Forwarded) + } + + fn calculate_fee( + fee_rate: Self::FeeRate, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> Amount { + match signable_transaction(fee_rate, inputs, payments, change) { + Ok(tx) => Amount(tx.1.needed_fee()), + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to calculate_fee"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { fee, .. }) => Amount(fee), + } + } + + fn plan( + fee_rate: Self::FeeRate, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> PlannedTransaction> { + let key = inputs.first().unwrap().key(); + for input in &inputs { + assert_eq!(key, input.key()); + } + + let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); + match signable_transaction(fee_rate, inputs, payments, change) { + Ok(tx) => PlannedTransaction { + signable: tx.0, + eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, + auxilliary: EffectedReceivedOutputs({ + let tx = tx.1.transaction(); + let scanner = scanner(key); + + let mut res = vec![]; + for output in scanner.scan_transaction(tx) { + res.push(Output::new(key, tx, output)); + } + res + }), + }, + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to plan"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { .. }) => { + panic!("plan called for a transaction without enough funds") + } + } + } +} + +pub(crate) type Scheduler = GenericScheduler; diff --git a/processor/bitcoin/src/transaction.rs b/processor/bitcoin/src/transaction.rs index ef48d3f01..f529b1782 100644 --- a/processor/bitcoin/src/transaction.rs +++ b/processor/bitcoin/src/transaction.rs @@ -48,11 +48,10 @@ impl scheduler::Transaction for Transaction { #[derive(Clone, Debug)] pub(crate) struct SignableTransaction { - inputs: Vec, - payments: Vec<(Address, u64)>, - change: Option
, - data: Option>, - fee_per_vbyte: u64, + pub(crate) inputs: Vec, + pub(crate) payments: Vec<(Address, u64)>, + pub(crate) change: Option
, + pub(crate) fee_per_vbyte: u64, } impl SignableTransaction { @@ -66,7 +65,7 @@ impl SignableTransaction { .map(|(address, amount)| (ScriptBuf::from(address), amount)) .collect::>(), self.change.map(ScriptBuf::from), - self.data, + None, self.fee_per_vbyte, ) } @@ -111,10 +110,9 @@ impl scheduler::SignableTransaction for SignableTransaction { let payments = <_>::deserialize_reader(reader)?; let change = <_>::deserialize_reader(reader)?; - let data = <_>::deserialize_reader(reader)?; let fee_per_vbyte = <_>::deserialize_reader(reader)?; - Ok(Self { inputs, payments, change, data, fee_per_vbyte }) + Ok(Self { inputs, payments, change, fee_per_vbyte }) } fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; @@ -124,7 +122,6 @@ impl scheduler::SignableTransaction for SignableTransaction { self.payments.serialize(writer)?; self.change.serialize(writer)?; - self.data.serialize(writer)?; self.fee_per_vbyte.serialize(writer)?; Ok(()) @@ -141,8 +138,8 @@ impl scheduler::SignableTransaction for SignableTransaction { #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub(crate) struct Eventuality { - txid: [u8; 32], - singular_spent_output: Option, + pub(crate) txid: [u8; 32], + pub(crate) singular_spent_output: Option, } impl primitives::Eventuality for Eventuality { diff --git a/processor/scheduler/utxo/primitives/Cargo.toml b/processor/scheduler/utxo/primitives/Cargo.toml index 85935ae0f..80b1f22e5 100644 --- a/processor/scheduler/utxo/primitives/Cargo.toml +++ b/processor/scheduler/utxo/primitives/Cargo.toml @@ -17,8 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } - borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index 2f51e9e09..e48221a1c 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -39,7 +39,6 @@ pub struct AmortizePlannedTransaction: 'static + Send + Sync { /// The type representing a fee rate to use for transactions. type FeeRate: Clone + Copy; diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs index 10e40f150..3ae855e73 100644 --- a/processor/scheduler/utxo/standard/src/lib.rs +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -203,6 +203,9 @@ impl> Scheduler { // Fetch the operating costs/outputs let mut operating_costs = Db::::operating_costs(txn, coin).0; let outputs = Db::::outputs(txn, key, coin).unwrap(); + if outputs.is_empty() { + continue; + } // Fetch the fulfillable payments let payments = Self::fulfillable_payments( diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index d11e4ac26..e43f5fecb 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -23,7 +23,7 @@ mod db; use db::Db; /// The outputs which will be effected by a PlannedTransaction and received by Serai. -pub struct EffectedReceivedOutputs(Vec>); +pub struct EffectedReceivedOutputs(pub Vec>); /// A scheduler of transactions for networks premised on the UTXO model which support /// transaction chaining. @@ -179,6 +179,9 @@ impl>> Sched // Fetch the operating costs/outputs let mut operating_costs = Db::::operating_costs(txn, coin).0; let outputs = Db::::outputs(txn, key, coin).unwrap(); + if outputs.is_empty() { + continue; + } // Fetch the fulfillable payments let payments = Self::fulfillable_payments( From 87abac11651d801c6e68e9b04b5172b96bc305a6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 00:48:52 -0400 Subject: [PATCH 096/179] Bitcoin TransactionPublisher --- Cargo.lock | 1 + processor/bitcoin/Cargo.toml | 1 + processor/bitcoin/src/lib.rs | 2 +- processor/bitcoin/src/{scanner_feed.rs => rpc.rs} | 15 ++++++++++++++- processor/bitcoin/src/scheduler.rs | 2 +- processor/bitcoin/src/transaction.rs | 2 +- 6 files changed, 19 insertions(+), 4 deletions(-) rename processor/bitcoin/src/{scanner_feed.rs => rpc.rs} (88%) diff --git a/Cargo.lock b/Cargo.lock index 7ae3a0f25..1839cc98b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8143,6 +8143,7 @@ dependencies = [ "serai-processor-primitives", "serai-processor-scanner", "serai-processor-scheduler-primitives", + "serai-processor-signers", "serai-processor-transaction-chaining-scheduler", "serai-processor-utxo-scheduler-primitives", "tokio", diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index 91813bac9..54ace26f6 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -47,6 +47,7 @@ scheduler = { package = "serai-processor-scheduler-primitives", path = "../sched scanner = { package = "serai-processor-scanner", path = "../scanner" } utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" } transaction-chaining-scheduler = { package = "serai-processor-transaction-chaining-scheduler", path = "../scheduler/utxo/transaction-chaining" } +signers = { package = "serai-processor-signers", path = "../signers" } message-queue = { package = "serai-message-queue", path = "../../message-queue" } diff --git a/processor/bitcoin/src/lib.rs b/processor/bitcoin/src/lib.rs index cbf650932..281b73582 100644 --- a/processor/bitcoin/src/lib.rs +++ b/processor/bitcoin/src/lib.rs @@ -15,7 +15,7 @@ mod transaction; mod block; // App-logic trait satisfactions -mod scanner_feed; +mod rpc; mod scheduler; pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> [u8; 32] { diff --git a/processor/bitcoin/src/scanner_feed.rs b/processor/bitcoin/src/rpc.rs similarity index 88% rename from processor/bitcoin/src/scanner_feed.rs rename to processor/bitcoin/src/rpc.rs index 5a3c491cc..8af821215 100644 --- a/processor/bitcoin/src/scanner_feed.rs +++ b/processor/bitcoin/src/rpc.rs @@ -3,8 +3,12 @@ use bitcoin_serai::rpc::{RpcError, Rpc as BRpc}; use serai_client::primitives::{NetworkId, Coin, Amount}; use scanner::ScannerFeed; +use signers::TransactionPublisher; -use crate::block::{BlockHeader, Block}; +use crate::{ + transaction::Transaction, + block::{BlockHeader, Block}, +}; #[derive(Clone)] pub(crate) struct Rpc(BRpc); @@ -92,3 +96,12 @@ impl ScannerFeed for Rpc { Ok(Amount(0)) } } + +#[async_trait::async_trait] +impl TransactionPublisher for Rpc { + type EphemeralError = RpcError; + + async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> { + self.0.send_raw_transaction(&tx.0).await.map(|_| ()) + } +} diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs index 0c1debdb7..c48f9a690 100644 --- a/processor/bitcoin/src/scheduler.rs +++ b/processor/bitcoin/src/scheduler.rs @@ -19,7 +19,7 @@ use crate::{ scan::{offsets_for_key, scanner}, output::Output, transaction::{SignableTransaction, Eventuality}, - scanner_feed::Rpc, + rpc::Rpc, }; fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { diff --git a/processor/bitcoin/src/transaction.rs b/processor/bitcoin/src/transaction.rs index f529b1782..5fca0b91b 100644 --- a/processor/bitcoin/src/transaction.rs +++ b/processor/bitcoin/src/transaction.rs @@ -24,7 +24,7 @@ use serai_client::networks::bitcoin::Address; use crate::output::OutputId; #[derive(Clone, Debug)] -pub(crate) struct Transaction(BTransaction); +pub(crate) struct Transaction(pub(crate) BTransaction); impl From for Transaction { fn from(tx: BTransaction) -> Self { From ad3b07c2956d5873affd5997a163368b6f2845e0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 00:52:01 -0400 Subject: [PATCH 097/179] Bitcoin processor lib.rs -> main.rs --- processor/bitcoin/src/{lib.rs => main.rs} | 3 +++ 1 file changed, 3 insertions(+) rename processor/bitcoin/src/{lib.rs => main.rs} (99%) diff --git a/processor/bitcoin/src/lib.rs b/processor/bitcoin/src/main.rs similarity index 99% rename from processor/bitcoin/src/lib.rs rename to processor/bitcoin/src/main.rs index 281b73582..653e8b5a0 100644 --- a/processor/bitcoin/src/lib.rs +++ b/processor/bitcoin/src/main.rs @@ -26,6 +26,9 @@ pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> res } +#[tokio::main] +async fn main() {} + /* use bitcoin_serai::{ bitcoin::{ From 876327d6b7648136a92af9bf3125fcf22a8e95bc Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 02:46:18 -0400 Subject: [PATCH 098/179] Implement presumed_origin Before we yield a block for scanning, we save all of the contained script public keys. Then, when we want the address credited for creating an output, we read the script public key of the spent output from the database. Fixes #559. --- processor/bitcoin/src/block.rs | 21 +++++--- processor/bitcoin/src/db.rs | 8 +++ processor/bitcoin/src/main.rs | 4 ++ processor/bitcoin/src/output.rs | 27 +++++++++- processor/bitcoin/src/rpc.rs | 27 ++++++---- processor/bitcoin/src/scan.rs | 32 +++++------- processor/bitcoin/src/scheduler.rs | 64 +++++++++++++++--------- processor/bitcoin/src/txindex.rs | 80 ++++++++++++++++++++++++++++++ 8 files changed, 200 insertions(+), 63 deletions(-) create mode 100644 processor/bitcoin/src/db.rs create mode 100644 processor/bitcoin/src/txindex.rs diff --git a/processor/bitcoin/src/block.rs b/processor/bitcoin/src/block.rs index 24cccec9b..8221c8b56 100644 --- a/processor/bitcoin/src/block.rs +++ b/processor/bitcoin/src/block.rs @@ -1,3 +1,4 @@ +use core::fmt; use std::collections::HashMap; use ciphersuite::{Ciphersuite, Secp256k1}; @@ -6,6 +7,7 @@ use bitcoin_serai::bitcoin::block::{Header, Block as BBlock}; use serai_client::networks::bitcoin::Address; +use serai_db::Db; use primitives::{ReceivedOutput, EventualityTracker}; use crate::{hash_bytes, scan::scanner, output::Output, transaction::Eventuality}; @@ -21,11 +23,16 @@ impl primitives::BlockHeader for BlockHeader { } } -#[derive(Clone, Debug)] -pub(crate) struct Block(pub(crate) BBlock); +#[derive(Clone)] +pub(crate) struct Block(pub(crate) D, pub(crate) BBlock); +impl fmt::Debug for Block { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Block").field("1", &self.1).finish_non_exhaustive() + } +} #[async_trait::async_trait] -impl primitives::Block for Block { +impl primitives::Block for Block { type Header = BlockHeader; type Key = ::G; @@ -34,7 +41,7 @@ impl primitives::Block for Block { type Eventuality = Eventuality; fn id(&self) -> [u8; 32] { - primitives::BlockHeader::id(&BlockHeader(self.0.header)) + primitives::BlockHeader::id(&BlockHeader(self.1.header)) } fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { @@ -42,9 +49,9 @@ impl primitives::Block for Block { let mut res = vec![]; // We skip the coinbase transaction as its burdened by maturity - for tx in &self.0.txdata[1 ..] { + for tx in &self.1.txdata[1 ..] { for output in scanner.scan_transaction(tx) { - res.push(Output::new(key, tx, output)); + res.push(Output::new(&self.0, key, tx, output)); } } res @@ -59,7 +66,7 @@ impl primitives::Block for Block { Self::Eventuality, > { let mut res = HashMap::new(); - for tx in &self.0.txdata[1 ..] { + for tx in &self.1.txdata[1 ..] { let id = hash_bytes(tx.compute_txid().to_raw_hash()); if let Some(eventuality) = eventualities.active_eventualities.remove(id.as_slice()) { res.insert(id, eventuality); diff --git a/processor/bitcoin/src/db.rs b/processor/bitcoin/src/db.rs new file mode 100644 index 000000000..1d73ebfee --- /dev/null +++ b/processor/bitcoin/src/db.rs @@ -0,0 +1,8 @@ +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + BitcoinProcessor { + LatestBlockToYieldAsFinalized: () -> u64, + ScriptPubKey: (tx: [u8; 32], vout: u32) -> Vec, + } +} diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index 653e8b5a0..941cc0dc9 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -18,6 +18,10 @@ mod block; mod rpc; mod scheduler; +// Our custom code for Bitcoin +mod db; +mod txindex; + pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> [u8; 32] { use bitcoin_serai::bitcoin::hashes::Hash; diff --git a/processor/bitcoin/src/output.rs b/processor/bitcoin/src/output.rs index dc5413504..2ed037057 100644 --- a/processor/bitcoin/src/output.rs +++ b/processor/bitcoin/src/output.rs @@ -15,6 +15,7 @@ use bitcoin_serai::{ use scale::{Encode, Decode, IoReader}; use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::Get; use serai_client::{ primitives::{Coin, Amount, Balance, ExternalAddress}, @@ -52,13 +53,35 @@ pub(crate) struct Output { } impl Output { - pub fn new(key: ::G, tx: &Transaction, output: WalletOutput) -> Self { + pub fn new( + getter: &impl Get, + key: ::G, + tx: &Transaction, + output: WalletOutput, + ) -> Self { Self { kind: offsets_for_key(key) .into_iter() .find_map(|(kind, offset)| (offset == output.offset()).then_some(kind)) .expect("scanned output for unknown offset"), - presumed_origin: presumed_origin(tx), + presumed_origin: presumed_origin(getter, tx), + output, + data: extract_serai_data(tx), + } + } + + pub fn new_with_presumed_origin( + key: ::G, + tx: &Transaction, + presumed_origin: Option
, + output: WalletOutput, + ) -> Self { + Self { + kind: offsets_for_key(key) + .into_iter() + .find_map(|(kind, offset)| (offset == output.offset()).then_some(kind)) + .expect("scanned output for unknown offset"), + presumed_origin, output, data: extract_serai_data(tx), } diff --git a/processor/bitcoin/src/rpc.rs b/processor/bitcoin/src/rpc.rs index 8af821215..cafb0ef3c 100644 --- a/processor/bitcoin/src/rpc.rs +++ b/processor/bitcoin/src/rpc.rs @@ -2,34 +2,36 @@ use bitcoin_serai::rpc::{RpcError, Rpc as BRpc}; use serai_client::primitives::{NetworkId, Coin, Amount}; +use serai_db::Db; use scanner::ScannerFeed; use signers::TransactionPublisher; use crate::{ + db, transaction::Transaction, block::{BlockHeader, Block}, }; #[derive(Clone)] -pub(crate) struct Rpc(BRpc); +pub(crate) struct Rpc { + pub(crate) db: D, + pub(crate) rpc: BRpc, +} #[async_trait::async_trait] -impl ScannerFeed for Rpc { +impl ScannerFeed for Rpc { const NETWORK: NetworkId = NetworkId::Bitcoin; const CONFIRMATIONS: u64 = 6; const WINDOW_LENGTH: u64 = 6; const TEN_MINUTES: u64 = 1; - type Block = Block; + type Block = Block; type EphemeralError = RpcError; async fn latest_finalized_block_number(&self) -> Result { - u64::try_from(self.0.get_latest_block_number().await?) - .unwrap() - .checked_sub(Self::CONFIRMATIONS) - .ok_or(RpcError::ConnectionError) + db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) } async fn unchecked_block_header_by_number( @@ -37,7 +39,7 @@ impl ScannerFeed for Rpc { number: u64, ) -> Result<::Header, Self::EphemeralError> { Ok(BlockHeader( - self.0.get_block(&self.0.get_block_hash(number.try_into().unwrap()).await?).await?.header, + self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?.header, )) } @@ -45,7 +47,10 @@ impl ScannerFeed for Rpc { &self, number: u64, ) -> Result { - Ok(Block(self.0.get_block(&self.0.get_block_hash(number.try_into().unwrap()).await?).await?)) + Ok(Block( + self.db.clone(), + self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?, + )) } fn dust(coin: Coin) -> Amount { @@ -98,10 +103,10 @@ impl ScannerFeed for Rpc { } #[async_trait::async_trait] -impl TransactionPublisher for Rpc { +impl TransactionPublisher for Rpc { type EphemeralError = RpcError; async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> { - self.0.send_raw_transaction(&tx.0).await.map(|_| ()) + self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) } } diff --git a/processor/bitcoin/src/scan.rs b/processor/bitcoin/src/scan.rs index 43518b57f..b3d3a6dcc 100644 --- a/processor/bitcoin/src/scan.rs +++ b/processor/bitcoin/src/scan.rs @@ -13,8 +13,11 @@ use bitcoin_serai::{ use serai_client::networks::bitcoin::Address; +use serai_db::Get; use primitives::OutputType; +use crate::{db, hash_bytes}; + const KEY_DST: &[u8] = b"Serai Bitcoin Processor Key Offset"; static BRANCH_BASE_OFFSET: LazyLock<::F> = LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"branch")); @@ -55,26 +58,17 @@ pub(crate) fn scanner(key: ::G) -> Scanner { scanner } -pub(crate) fn presumed_origin(tx: &Transaction) -> Option
{ - todo!("TODO") - - /* - let spent_output = { - let input = &tx.input[0]; - let mut spent_tx = input.previous_output.txid.as_raw_hash().to_byte_array(); - spent_tx.reverse(); - let mut tx; - while { - tx = self.rpc.get_transaction(&spent_tx).await; - tx.is_err() - } { - log::error!("couldn't get transaction from bitcoin node: {tx:?}"); - sleep(Duration::from_secs(5)).await; +pub(crate) fn presumed_origin(getter: &impl Get, tx: &Transaction) -> Option
{ + for input in &tx.input { + let txid = hash_bytes(input.previous_output.txid.to_raw_hash()); + let vout = input.previous_output.vout; + if let Some(address) = Address::new(ScriptBuf::from_bytes( + db::ScriptPubKey::get(getter, txid, vout).expect("unknown output being spent by input"), + )) { + return Some(address); } - tx.unwrap().output.swap_remove(usize::try_from(input.previous_output.vout).unwrap()) - }; - Address::new(spent_output.script_pubkey) - */ + } + None? } // Checks if this script matches SHA256 PUSH MSG_HASH OP_EQUALVERIFY .. diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs index c48f9a690..e225613c5 100644 --- a/processor/bitcoin/src/scheduler.rs +++ b/processor/bitcoin/src/scheduler.rs @@ -10,6 +10,7 @@ use serai_client::{ networks::bitcoin::Address, }; +use serai_db::Db; use primitives::{OutputType, ReceivedOutput, Payment}; use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; @@ -31,17 +32,24 @@ fn address_from_serai_key(key: ::G, kind: OutputType) .expect("couldn't create Serai-representable address for P2TR script") } -fn signable_transaction( +fn signable_transaction( fee_per_vbyte: u64, - inputs: Vec>, - payments: Vec>>, - change: Option>, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, ) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> { - assert!(inputs.len() < Planner::MAX_INPUTS); - assert!((payments.len() + usize::from(u8::from(change.is_some()))) < Planner::MAX_OUTPUTS); + assert!( + inputs.len() < + , EffectedReceivedOutputs>>>::MAX_INPUTS + ); + assert!( + (payments.len() + usize::from(u8::from(change.is_some()))) < + , EffectedReceivedOutputs>>>::MAX_OUTPUTS + ); let inputs = inputs.into_iter().map(|input| input.output).collect::>(); - let payments = payments + + let mut payments = payments .into_iter() .map(|payment| { (payment.address().clone(), { @@ -51,7 +59,8 @@ fn signable_transaction( }) }) .collect::>(); - let change = change.map(Planner::change_address); + let change = change + .map(, EffectedReceivedOutputs>>>::change_address); // TODO: ACP output BSignableTransaction::new( @@ -69,7 +78,7 @@ fn signable_transaction( } pub(crate) struct Planner; -impl TransactionPlanner> for Planner { +impl TransactionPlanner, EffectedReceivedOutputs>> for Planner { type FeeRate = u64; type SignableTransaction = SignableTransaction; @@ -94,29 +103,29 @@ impl TransactionPlanner> for Planner { // to unstick any transactions which had too low of a fee. const MAX_OUTPUTS: usize = 519; - fn fee_rate(block: &BlockFor, coin: Coin) -> Self::FeeRate { + fn fee_rate(block: &BlockFor>, coin: Coin) -> Self::FeeRate { assert_eq!(coin, Coin::Bitcoin); // TODO 1 } - fn branch_address(key: KeyFor) -> AddressFor { + fn branch_address(key: KeyFor>) -> AddressFor> { address_from_serai_key(key, OutputType::Branch) } - fn change_address(key: KeyFor) -> AddressFor { + fn change_address(key: KeyFor>) -> AddressFor> { address_from_serai_key(key, OutputType::Change) } - fn forwarding_address(key: KeyFor) -> AddressFor { + fn forwarding_address(key: KeyFor>) -> AddressFor> { address_from_serai_key(key, OutputType::Forwarded) } fn calculate_fee( fee_rate: Self::FeeRate, - inputs: Vec>, - payments: Vec>>, - change: Option>, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, ) -> Amount { - match signable_transaction(fee_rate, inputs, payments, change) { + match signable_transaction::(fee_rate, inputs, payments, change) { Ok(tx) => Amount(tx.1.needed_fee()), Err( TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, @@ -133,17 +142,17 @@ impl TransactionPlanner> for Planner { fn plan( fee_rate: Self::FeeRate, - inputs: Vec>, - payments: Vec>>, - change: Option>, - ) -> PlannedTransaction> { + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, + ) -> PlannedTransaction, Self::SignableTransaction, EffectedReceivedOutputs>> { let key = inputs.first().unwrap().key(); for input in &inputs { assert_eq!(key, input.key()); } let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); - match signable_transaction(fee_rate, inputs, payments, change) { + match signable_transaction::(fee_rate, inputs.clone(), payments, change) { Ok(tx) => PlannedTransaction { signable: tx.0, eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, @@ -153,7 +162,14 @@ impl TransactionPlanner> for Planner { let mut res = vec![]; for output in scanner.scan_transaction(tx) { - res.push(Output::new(key, tx, output)); + res.push(Output::new_with_presumed_origin( + key, + tx, + // It shouldn't matter if this is wrong as we should never try to return these + // We still provide an accurate value to ensure a lack of discrepancies + Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()), + output, + )); } res }), @@ -174,4 +190,4 @@ impl TransactionPlanner> for Planner { } } -pub(crate) type Scheduler = GenericScheduler; +pub(crate) type Scheduler = GenericScheduler, Planner>; diff --git a/processor/bitcoin/src/txindex.rs b/processor/bitcoin/src/txindex.rs new file mode 100644 index 000000000..d9d52526d --- /dev/null +++ b/processor/bitcoin/src/txindex.rs @@ -0,0 +1,80 @@ +/* + We want to be able to return received outputs. We do that by iterating over the inputs to find an + address format we recognize, then setting that address as the address to return to. + + Since inputs only contain the script signatures, yet addresses are for script public keys, we + need to pull up the output spent by an input and read the script public key from that. While we + could use `txindex=1`, and an asynchronous call to the Bitcoin node, we: + + 1) Can maintain a much smaller index ourselves + 2) Don't want the asynchronous call (which would require the flow be async, allowed to + potentially error, and more latent) + 3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet) + + This task builds that index. +*/ + +use serai_db::{DbTxn, Db}; + +use primitives::task::ContinuallyRan; +use scanner::ScannerFeed; + +use crate::{db, rpc::Rpc, hash_bytes}; + +pub(crate) struct TxIndexTask(Rpc); + +#[async_trait::async_trait] +impl ContinuallyRan for TxIndexTask { + async fn run_iteration(&mut self) -> Result { + let latest_block_number = self + .0 + .rpc + .get_latest_block_number() + .await + .map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?; + let latest_block_number = u64::try_from(latest_block_number).unwrap(); + // `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself) + let finalized_block_number = + latest_block_number.checked_sub(Rpc::::CONFIRMATIONS - 1).ok_or(format!( + "blockchain only just started and doesn't have {} blocks yet", + Rpc::::CONFIRMATIONS + ))?; + + let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db); + let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1); + + let mut iterated = false; + for b in next_block ..= finalized_block_number { + iterated = true; + + // Fetch the block + let block_hash = self + .0 + .rpc + .get_block_hash(b.try_into().unwrap()) + .await + .map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?; + let block = self + .0 + .rpc + .get_block(&block_hash) + .await + .map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?; + + let mut txn = self.0.db.txn(); + + for tx in &block.txdata[1 ..] { + let txid = hash_bytes(tx.compute_txid().to_raw_hash()); + for (o, output) in tx.output.iter().enumerate() { + let o = u32::try_from(o).unwrap(); + // Set the script pub key for this transaction + db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes()); + } + } + + db::LatestBlockToYieldAsFinalized::set(&mut txn, &b); + txn.commit(); + } + Ok(iterated) + } +} From b2f06cce709e2acbeb606d67b3f73f1ca04953d6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 02:48:53 -0400 Subject: [PATCH 099/179] Add an anyone-can-pay output to every Bitcoin transaction Resolves #284. --- processor/bitcoin/src/scheduler.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs index e225613c5..7f365c565 100644 --- a/processor/bitcoin/src/scheduler.rs +++ b/processor/bitcoin/src/scheduler.rs @@ -59,10 +59,22 @@ fn signable_transaction( }) }) .collect::>(); + /* + Push a payment to a key with a known private key which anyone can spend. If this transaction + gets stuck, this lets anyone create a child transaction spending this output, raising the fee, + getting the transaction unstuck (via CPFP). + */ + payments.push(Payment::new( + // The generator is even so this is valid + Address::new(p2tr_script_buf(::G::GENERATOR)), + // This uses the minimum output value allowed, as defined as a constant in bitcoin-serai + Balance { coin: Coin::Bitcoin, amount: Amount(bitcoin_serai::wallet::DUST) }, + None, + )); + let change = change .map(, EffectedReceivedOutputs>>>::change_address); - // TODO: ACP output BSignableTransaction::new( inputs.clone(), &payments From 196fc3b5c1101b143f7beebdb3b22f10cfd400c4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 03:01:39 -0400 Subject: [PATCH 100/179] Misc changes in response to prior two commits --- .github/actions/bitcoin/action.yml | 2 +- orchestration/dev/networks/bitcoin/run.sh | 4 ++-- orchestration/dev/networks/ethereum-relayer/.folder | 11 ----------- orchestration/dev/networks/monero/run.sh | 2 +- orchestration/testnet/networks/bitcoin/run.sh | 2 +- .../testnet/networks/ethereum-relayer/.folder | 11 ----------- processor/bitcoin/src/scheduler.rs | 8 ++++---- 7 files changed, 9 insertions(+), 31 deletions(-) diff --git a/.github/actions/bitcoin/action.yml b/.github/actions/bitcoin/action.yml index 6f628172d..2765571f7 100644 --- a/.github/actions/bitcoin/action.yml +++ b/.github/actions/bitcoin/action.yml @@ -37,4 +37,4 @@ runs: - name: Bitcoin Regtest Daemon shell: bash - run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon + run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon diff --git a/orchestration/dev/networks/bitcoin/run.sh b/orchestration/dev/networks/bitcoin/run.sh index da7c95a8b..bec89fa98 100755 --- a/orchestration/dev/networks/bitcoin/run.sh +++ b/orchestration/dev/networks/bitcoin/run.sh @@ -3,7 +3,7 @@ RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" -bitcoind -txindex -regtest --port=8333 \ +bitcoind -regtest --port=8333 \ -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \ -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \ - $1 + $@ diff --git a/orchestration/dev/networks/ethereum-relayer/.folder b/orchestration/dev/networks/ethereum-relayer/.folder index 675d44382..e69de29bb 100644 --- a/orchestration/dev/networks/ethereum-relayer/.folder +++ b/orchestration/dev/networks/ethereum-relayer/.folder @@ -1,11 +0,0 @@ -#!/bin/sh - -RPC_USER="${RPC_USER:=serai}" -RPC_PASS="${RPC_PASS:=seraidex}" - -# Run Monero -monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ - --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ - --rpc-access-control-origins "*" --disable-rpc-ban \ - --rpc-login=$RPC_USER:$RPC_PASS \ - $1 diff --git a/orchestration/dev/networks/monero/run.sh b/orchestration/dev/networks/monero/run.sh index 75a93e464..1186c4d17 100755 --- a/orchestration/dev/networks/monero/run.sh +++ b/orchestration/dev/networks/monero/run.sh @@ -8,4 +8,4 @@ monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ --rpc-access-control-origins "*" --disable-rpc-ban \ --rpc-login=$RPC_USER:$RPC_PASS --log-level 2 \ - $1 + $@ diff --git a/orchestration/testnet/networks/bitcoin/run.sh b/orchestration/testnet/networks/bitcoin/run.sh index dbec375ac..6544243b5 100755 --- a/orchestration/testnet/networks/bitcoin/run.sh +++ b/orchestration/testnet/networks/bitcoin/run.sh @@ -3,7 +3,7 @@ RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" -bitcoind -txindex -testnet -port=8333 \ +bitcoind -testnet -port=8333 \ -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \ -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \ --datadir=/volume diff --git a/orchestration/testnet/networks/ethereum-relayer/.folder b/orchestration/testnet/networks/ethereum-relayer/.folder index 675d44382..e69de29bb 100644 --- a/orchestration/testnet/networks/ethereum-relayer/.folder +++ b/orchestration/testnet/networks/ethereum-relayer/.folder @@ -1,11 +0,0 @@ -#!/bin/sh - -RPC_USER="${RPC_USER:=serai}" -RPC_PASS="${RPC_PASS:=seraidex}" - -# Run Monero -monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ - --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ - --rpc-access-control-origins "*" --disable-rpc-ban \ - --rpc-login=$RPC_USER:$RPC_PASS \ - $1 diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs index 7f365c565..6e49d23d9 100644 --- a/processor/bitcoin/src/scheduler.rs +++ b/processor/bitcoin/src/scheduler.rs @@ -64,12 +64,12 @@ fn signable_transaction( gets stuck, this lets anyone create a child transaction spending this output, raising the fee, getting the transaction unstuck (via CPFP). */ - payments.push(Payment::new( + payments.push(( // The generator is even so this is valid - Address::new(p2tr_script_buf(::G::GENERATOR)), + Address::new(p2tr_script_buf(::G::GENERATOR).unwrap()).unwrap(), // This uses the minimum output value allowed, as defined as a constant in bitcoin-serai - Balance { coin: Coin::Bitcoin, amount: Amount(bitcoin_serai::wallet::DUST) }, - None, + // TODO: Add a test for this comparing to bitcoin's `minimal_non_dust` + bitcoin_serai::wallet::DUST, )); let change = change From 942799335f7d713b9b98c15099416929b04527e4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 03:09:44 -0400 Subject: [PATCH 101/179] Adjust Bitcoin processor layout --- processor/bitcoin/src/main.rs | 123 +----------------- .../bitcoin/src/{ => primitives}/block.rs | 0 processor/bitcoin/src/primitives/mod.rs | 3 + .../bitcoin/src/{ => primitives}/output.rs | 4 +- .../src/{ => primitives}/transaction.rs | 0 processor/bitcoin/src/txindex.rs | 2 +- 6 files changed, 13 insertions(+), 119 deletions(-) rename processor/bitcoin/src/{ => primitives}/block.rs (100%) create mode 100644 processor/bitcoin/src/primitives/mod.rs rename processor/bitcoin/src/{ => primitives}/output.rs (98%) rename processor/bitcoin/src/{ => primitives}/transaction.rs (100%) diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index 941cc0dc9..2ff072b47 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -6,14 +6,12 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); +mod primitives; +pub(crate) use primitives::*; + // Internal utilities for scanning transactions mod scan; -// Primitive trait satisfactions -mod output; -mod transaction; -mod block; - // App-logic trait satisfactions mod rpc; mod scheduler; @@ -70,17 +68,10 @@ use serai_client::{ /* #[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Fee(u64); +pub(crate) struct Fee(u64); #[async_trait] impl TransactionTrait for Transaction { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - let mut hash = *self.compute_txid().as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - #[cfg(test)] async fn fee(&self, network: &Bitcoin) -> u64 { let mut value = 0; @@ -130,17 +121,8 @@ impl BlockTrait for Block { } } -// Shim required for testing/debugging purposes due to generic arguments also necessitating trait -// bounds -impl PartialEq for Bitcoin { - fn eq(&self, _: &Self) -> bool { - true - } -} -impl Eq for Bitcoin {} - impl Bitcoin { - pub async fn new(url: String) -> Bitcoin { + pub(crate) async fn new(url: String) -> Bitcoin { let mut res = Rpc::new(url.clone()).await; while let Err(e) = res { log::error!("couldn't connect to Bitcoin node: {e:?}"); @@ -151,7 +133,7 @@ impl Bitcoin { } #[cfg(test)] - pub async fn fresh_chain(&self) { + pub(crate) async fn fresh_chain(&self) { if self.rpc.get_latest_block_number().await.unwrap() > 0 { self .rpc @@ -194,64 +176,8 @@ impl Bitcoin { Ok(Fee(fee.max(1))) } - async fn make_signable_transaction( - &self, - block_number: usize, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - calculating_fee: bool, - ) -> Result, NetworkError> { - for payment in payments { - assert_eq!(payment.balance.coin, Coin::Bitcoin); - } - - // TODO2: Use an fee representative of several blocks, cached inside Self - let block_for_fee = self.get_block(block_number).await?; - let fee = self.median_fee(&block_for_fee).await?; - - let payments = payments - .iter() - .map(|payment| { - ( - payment.address.clone().into(), - // If we're solely estimating the fee, don't specify the actual amount - // This won't affect the fee calculation yet will ensure we don't hit a not enough funds - // error - if calculating_fee { Self::DUST } else { payment.balance.amount.0 }, - ) - }) - .collect::>(); - - match BSignableTransaction::new( - inputs.iter().map(|input| input.output.clone()).collect(), - &payments, - change.clone().map(Into::into), - None, - fee.0, - ) { - Ok(signable) => Ok(Some(signable)), - Err(TransactionError::NoInputs) => { - panic!("trying to create a bitcoin transaction without inputs") - } - // No outputs left and the change isn't worth enough/not even enough funds to pay the fee - Err(TransactionError::NoOutputs | TransactionError::NotEnoughFunds) => Ok(None), - // amortize_fee removes payments which fall below the dust threshold - Err(TransactionError::DustPayment) => panic!("dust payment despite removing dust"), - Err(TransactionError::TooMuchData) => { - panic!("too much data despite not specifying data") - } - Err(TransactionError::TooLowFee) => { - panic!("created a transaction whose fee is below the minimum") - } - Err(TransactionError::TooLargeTransaction) => { - panic!("created a too large transaction despite limiting inputs/outputs") - } - } - } - #[cfg(test)] - pub fn sign_btc_input_for_p2pkh( + pub(crate) fn sign_btc_input_for_p2pkh( tx: &Transaction, input_index: usize, private_key: &PrivateKey, @@ -288,17 +214,8 @@ impl Bitcoin { } } -fn address_from_key(key: ProjectivePoint) -> Address { - Address::new( - p2tr_script_buf(key).expect("creating address from key which isn't properly tweaked"), - ) - .expect("couldn't create Serai-representable address for P2TR script") -} - #[async_trait] impl Network for Bitcoin { - type Scheduler = Scheduler; - // 2 inputs should be 2 * 230 = 460 weight units // The output should be ~36 bytes, or 144 weight units // The overhead should be ~20 bytes at most, or 80 weight units @@ -307,34 +224,12 @@ impl Network for Bitcoin { // aggregation TX const COST_TO_AGGREGATE: u64 = 800; - const MAX_OUTPUTS: usize = MAX_OUTPUTS; - fn tweak_keys(keys: &mut ThresholdKeys) { *keys = tweak_keys(keys); // Also create a scanner to assert these keys, and all expected paths, are usable scanner(keys.group_key()); } - #[cfg(test)] - async fn external_address(&self, key: ProjectivePoint) -> Address { - address_from_key(key) - } - - fn branch_address(key: ProjectivePoint) -> Option
{ - let (_, offsets, _) = scanner(key); - Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))) - } - - fn change_address(key: ProjectivePoint) -> Option
{ - let (_, offsets, _) = scanner(key); - Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change]))) - } - - fn forward_address(key: ProjectivePoint) -> Option
{ - let (_, offsets, _) = scanner(key); - Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded]))) - } - #[cfg(test)] async fn get_block_number(&self, id: &[u8; 32]) -> usize { self.rpc.get_block_number(id).await.unwrap() @@ -409,8 +304,4 @@ impl Network for Bitcoin { self.get_block(block).await.unwrap() } } - -impl UtxoNetwork for Bitcoin { - const MAX_INPUTS: usize = MAX_INPUTS; -} */ diff --git a/processor/bitcoin/src/block.rs b/processor/bitcoin/src/primitives/block.rs similarity index 100% rename from processor/bitcoin/src/block.rs rename to processor/bitcoin/src/primitives/block.rs diff --git a/processor/bitcoin/src/primitives/mod.rs b/processor/bitcoin/src/primitives/mod.rs new file mode 100644 index 000000000..fba52dd96 --- /dev/null +++ b/processor/bitcoin/src/primitives/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod output; +pub(crate) mod transaction; +pub(crate) mod block; diff --git a/processor/bitcoin/src/output.rs b/processor/bitcoin/src/primitives/output.rs similarity index 98% rename from processor/bitcoin/src/output.rs rename to processor/bitcoin/src/primitives/output.rs index 2ed037057..05ab6acf9 100644 --- a/processor/bitcoin/src/output.rs +++ b/processor/bitcoin/src/primitives/output.rs @@ -53,7 +53,7 @@ pub(crate) struct Output { } impl Output { - pub fn new( + pub(crate) fn new( getter: &impl Get, key: ::G, tx: &Transaction, @@ -70,7 +70,7 @@ impl Output { } } - pub fn new_with_presumed_origin( + pub(crate) fn new_with_presumed_origin( key: ::G, tx: &Transaction, presumed_origin: Option
, diff --git a/processor/bitcoin/src/transaction.rs b/processor/bitcoin/src/primitives/transaction.rs similarity index 100% rename from processor/bitcoin/src/transaction.rs rename to processor/bitcoin/src/primitives/transaction.rs diff --git a/processor/bitcoin/src/txindex.rs b/processor/bitcoin/src/txindex.rs index d9d52526d..63d5072c3 100644 --- a/processor/bitcoin/src/txindex.rs +++ b/processor/bitcoin/src/txindex.rs @@ -67,7 +67,7 @@ impl ContinuallyRan for TxIndexTask { let txid = hash_bytes(tx.compute_txid().to_raw_hash()); for (o, output) in tx.output.iter().enumerate() { let o = u32::try_from(o).unwrap(); - // Set the script pub key for this transaction + // Set the script public key for this transaction db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes()); } } From a0bdcf9d817258d89a117620d2e34fb320cfdcc5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 03:23:00 -0400 Subject: [PATCH 102/179] Bitcoin Key Gen --- Cargo.lock | 2 ++ processor/bitcoin/Cargo.toml | 2 ++ processor/bitcoin/src/key_gen.rs | 26 ++++++++++++++++++++++++ processor/bitcoin/src/main.rs | 7 +------ processor/key-gen/src/db.rs | 34 +++++++++++++++++++------------- 5 files changed, 51 insertions(+), 20 deletions(-) create mode 100644 processor/bitcoin/src/key_gen.rs diff --git a/Cargo.lock b/Cargo.lock index 1839cc98b..8c0c3dd5a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8128,6 +8128,7 @@ dependencies = [ "bitcoin-serai", "borsh", "ciphersuite", + "dkg", "env_logger", "flexible-transcript", "log", @@ -8139,6 +8140,7 @@ dependencies = [ "serai-db", "serai-env", "serai-message-queue", + "serai-processor-key-gen", "serai-processor-messages", "serai-processor-primitives", "serai-processor-scanner", diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index 54ace26f6..c92e13849 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -25,6 +25,7 @@ borsh = { version = "1", default-features = false, features = ["std", "derive", transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"] } @@ -41,6 +42,7 @@ serai-env = { path = "../../common/env" } serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } messages = { package = "serai-processor-messages", path = "../messages" } +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } primitives = { package = "serai-processor-primitives", path = "../primitives" } scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } diff --git a/processor/bitcoin/src/key_gen.rs b/processor/bitcoin/src/key_gen.rs new file mode 100644 index 000000000..161832313 --- /dev/null +++ b/processor/bitcoin/src/key_gen.rs @@ -0,0 +1,26 @@ +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; +use frost::ThresholdKeys; + +use key_gen::KeyGenParams; + +use crate::scan::scanner; + +pub(crate) struct KeyGen; +impl KeyGenParams for KeyGen { + const ID: &'static str = "Bitcoin"; + + type ExternalNetworkCurve = Secp256k1; + + fn tweak_keys(keys: &mut ThresholdKeys) { + *keys = bitcoin_serai::wallet::tweak_keys(keys); + // Also create a scanner to assert these keys, and all expected paths, are usable + scanner(keys.group_key()); + } + + fn encode_key(key: ::G) -> Vec { + let key = key.to_bytes(); + let key: &[u8] = key.as_ref(); + // Skip the parity encoding as we know this key is even + key[1 ..].to_vec() + } +} diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index 2ff072b47..d86a4ba1a 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -13,6 +13,7 @@ pub(crate) use primitives::*; mod scan; // App-logic trait satisfactions +mod key_gen; mod rpc; mod scheduler; @@ -224,12 +225,6 @@ impl Network for Bitcoin { // aggregation TX const COST_TO_AGGREGATE: u64 = 800; - fn tweak_keys(keys: &mut ThresholdKeys) { - *keys = tweak_keys(keys); - // Also create a scanner to assert these keys, and all expected paths, are usable - scanner(keys.group_key()); - } - #[cfg(test)] async fn get_block_number(&self, id: &[u8; 32]) -> usize { self.rpc.get_block_number(id).await.unwrap() diff --git a/processor/key-gen/src/db.rs b/processor/key-gen/src/db.rs index e82b84a52..676fd2aa9 100644 --- a/processor/key-gen/src/db.rs +++ b/processor/key-gen/src/db.rs @@ -9,7 +9,7 @@ use dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::EvrfCurve}; use serai_validator_sets_primitives::Session; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_db::{Get, DbTxn, create_db}; +use serai_db::{Get, DbTxn}; use crate::KeyGenParams; @@ -35,20 +35,26 @@ pub(crate) struct Participations { pub(crate) network_participations: HashMap>, } -create_db!( - KeyGen { - Params: (session: &Session) -> RawParams, - Participations: (session: &Session) -> Participations, - KeyShares: (session: &Session) -> Vec, - } -); +mod _db { + use serai_validator_sets_primitives::Session; + + use serai_db::{Get, DbTxn, create_db}; + + create_db!( + KeyGen { + Params: (session: &Session) -> super::RawParams, + Participations: (session: &Session) -> super::Participations, + KeyShares: (session: &Session) -> Vec, + } + ); +} pub(crate) struct KeyGenDb(PhantomData

); impl KeyGenDb

{ pub(crate) fn set_params(txn: &mut impl DbTxn, session: Session, params: Params

) { assert_eq!(params.substrate_evrf_public_keys.len(), params.network_evrf_public_keys.len()); - Params::set( + _db::Params::set( txn, &session, &RawParams { @@ -68,7 +74,7 @@ impl KeyGenDb

{ } pub(crate) fn params(getter: &impl Get, session: Session) -> Option> { - Params::get(getter, &session).map(|params| Params { + _db::Params::get(getter, &session).map(|params| Params { t: params.t, n: params .network_evrf_public_keys @@ -101,10 +107,10 @@ impl KeyGenDb

{ session: Session, participations: &Participations, ) { - Participations::set(txn, &session, participations) + _db::Participations::set(txn, &session, participations) } pub(crate) fn participations(getter: &impl Get, session: Session) -> Option { - Participations::get(getter, &session) + _db::Participations::get(getter, &session) } // Set the key shares for a session. @@ -121,7 +127,7 @@ impl KeyGenDb

{ keys.extend(substrate_keys.serialize().as_slice()); keys.extend(network_keys.serialize().as_slice()); } - KeyShares::set(txn, &session, &keys); + _db::KeyShares::set(txn, &session, &keys); } #[allow(clippy::type_complexity)] @@ -129,7 +135,7 @@ impl KeyGenDb

{ getter: &impl Get, session: Session, ) -> Option<(Vec>, Vec>)> { - let keys = KeyShares::get(getter, &session)?; + let keys = _db::KeyShares::get(getter, &session)?; let mut keys: &[u8] = keys.as_ref(); let mut substrate_keys = vec![]; From b5c040ff4672e854d164eedb9dad628f61ee9963 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 04:54:03 -0400 Subject: [PATCH 103/179] Start on the new processor main loop --- processor/bitcoin/src/db.rs | 8 + processor/bitcoin/src/key_gen.rs | 6 +- processor/bitcoin/src/main.rs | 83 ++++++++++ processor/messages/src/lib.rs | 11 +- processor/src/main.rs | 259 ------------------------------- 5 files changed, 99 insertions(+), 268 deletions(-) diff --git a/processor/bitcoin/src/db.rs b/processor/bitcoin/src/db.rs index 1d73ebfee..94a7c0ba8 100644 --- a/processor/bitcoin/src/db.rs +++ b/processor/bitcoin/src/db.rs @@ -1,5 +1,13 @@ +use serai_client::validator_sets::primitives::Session; + use serai_db::{Get, DbTxn, create_db}; +create_db! { + Processor { + ExternalKeyForSession: (session: Session) -> Vec, + } +} + create_db! { BitcoinProcessor { LatestBlockToYieldAsFinalized: () -> u64, diff --git a/processor/bitcoin/src/key_gen.rs b/processor/bitcoin/src/key_gen.rs index 161832313..416677e78 100644 --- a/processor/bitcoin/src/key_gen.rs +++ b/processor/bitcoin/src/key_gen.rs @@ -1,12 +1,10 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; use frost::ThresholdKeys; -use key_gen::KeyGenParams; - use crate::scan::scanner; -pub(crate) struct KeyGen; -impl KeyGenParams for KeyGen { +pub(crate) struct KeyGenParams; +impl key_gen::KeyGenParams for KeyGenParams { const ID: &'static str = "Bitcoin"; type ExternalNetworkCurve = Secp256k1; diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index d86a4ba1a..bb788d1e8 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -6,6 +6,10 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); +use ciphersuite::Ciphersuite; + +use serai_db::{DbTxn, Db}; + mod primitives; pub(crate) use primitives::*; @@ -14,8 +18,11 @@ mod scan; // App-logic trait satisfactions mod key_gen; +use crate::key_gen::KeyGenParams; mod rpc; +use rpc::Rpc; mod scheduler; +use scheduler::Scheduler; // Our custom code for Bitcoin mod db; @@ -29,6 +36,82 @@ pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> res } +/// Fetch the next message from the Coordinator. +/// +/// This message is guaranteed to have never been handled before, where handling is defined as +/// this `txn` being committed. +async fn next_message(_txn: &mut impl DbTxn) -> messages::CoordinatorMessage { + todo!("TODO") +} + +async fn send_message(_msg: messages::ProcessorMessage) { + todo!("TODO") +} + +async fn coordinator_loop( + mut db: D, + mut key_gen: ::key_gen::KeyGen, + mut signers: signers::Signers, Scheduler, Rpc>, + mut scanner: Option>>, +) { + loop { + let mut txn = Some(db.txn()); + let msg = next_message(txn.as_mut().unwrap()).await; + match msg { + messages::CoordinatorMessage::KeyGen(msg) => { + // This is a computationally expensive call yet it happens infrequently + for msg in key_gen.handle(txn.as_mut().unwrap(), msg) { + send_message(messages::ProcessorMessage::KeyGen(msg)).await; + } + } + // These are cheap calls which are fine to be here in this loop + messages::CoordinatorMessage::Sign(msg) => signers.queue_message(txn.as_mut().unwrap(), &msg), + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { + session, + block_number, + block, + }, + ) => signers.cosign_block(txn.take().unwrap(), session, block_number, block), + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::SignSlashReport { session, report }, + ) => signers.sign_slash_report(txn.take().unwrap(), session, &report), + messages::CoordinatorMessage::Substrate(msg) => match msg { + messages::substrate::CoordinatorMessage::SetKeys { serai_time, session, key_pair } => { + db::ExternalKeyForSession::set(txn.as_mut().unwrap(), session, &key_pair.1.into_inner()); + todo!("TODO: Register in signers"); + todo!("TODO: Scanner activation") + } + messages::substrate::CoordinatorMessage::SlashesReported { session } => { + let key_bytes = db::ExternalKeyForSession::get(txn.as_ref().unwrap(), session).unwrap(); + let mut key_bytes = key_bytes.as_slice(); + let key = + ::ExternalNetworkCurve::read_G(&mut key_bytes) + .unwrap(); + assert!(key_bytes.is_empty()); + + signers.retire_session(txn.as_mut().unwrap(), session, &key) + } + messages::substrate::CoordinatorMessage::BlockWithBatchAcknowledgement { + block, + batch_id, + in_instruction_succeededs, + burns, + key_to_activate, + } => todo!("TODO"), + messages::substrate::CoordinatorMessage::BlockWithoutBatchAcknowledgement { + block, + burns, + } => todo!("TODO"), + }, + }; + // If the txn wasn't already consumed and committed, commit it + if let Some(txn) = txn { + txn.commit(); + } + } +} + #[tokio::main] async fn main() {} diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 998c7cea8..ae1ab6d58 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -113,8 +113,6 @@ pub mod sign { pub attempt: u32, } - // TODO: Make this generic to the ID once we introduce topics into the message-queue and remove - // the global ProcessorMessage/CoordinatorMessage #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { // Received preprocesses for the specified signing protocol. @@ -185,8 +183,10 @@ pub mod substrate { #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { - /// Keys set on the Serai network. + /// Keys set on the Serai blockchain. SetKeys { serai_time: u64, session: Session, key_pair: KeyPair }, + /// Slashes reported on the Serai blockchain OR the process timed out. + SlashesReported { session: Session }, /// The data from a block which acknowledged a Batch. BlockWithBatchAcknowledgement { block: u64, @@ -305,11 +305,12 @@ impl CoordinatorMessage { CoordinatorMessage::Substrate(msg) => { let (sub, id) = match msg { substrate::CoordinatorMessage::SetKeys { session, .. } => (0, session.encode()), + substrate::CoordinatorMessage::SlashesReported { session } => (1, session.encode()), substrate::CoordinatorMessage::BlockWithBatchAcknowledgement { block, .. } => { - (1, block.encode()) + (2, block.encode()) } substrate::CoordinatorMessage::BlockWithoutBatchAcknowledgement { block, .. } => { - (2, block.encode()) + (3, block.encode()) } }; diff --git a/processor/src/main.rs b/processor/src/main.rs index 104067290..51123b925 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -1,21 +1,3 @@ -use std::{time::Duration, collections::HashMap}; - -use zeroize::{Zeroize, Zeroizing}; - -use ciphersuite::{ - group::{ff::PrimeField, GroupEncoding}, - Ciphersuite, Ristretto, -}; -use dkg::evrf::EvrfCurve; - -use log::{info, warn}; -use tokio::time::sleep; - -use serai_client::{ - primitives::{BlockHash, NetworkId}, - validator_sets::primitives::{Session, KeyPair}, -}; - use messages::{ coordinator::{ SubstrateSignableId, PlanMeta, CoordinatorMessage as CoordinatorCoordinatorMessage, @@ -27,112 +9,18 @@ use serai_env as env; use message_queue::{Service, client::MessageQueue}; -mod networks; -use networks::{Block, Network}; -#[cfg(feature = "bitcoin")] -use networks::Bitcoin; -#[cfg(feature = "ethereum")] -use networks::Ethereum; -#[cfg(feature = "monero")] -use networks::Monero; - mod db; pub use db::*; mod coordinator; pub use coordinator::*; -use serai_processor_key_gen as key_gen; -use key_gen::{SessionDb, KeyConfirmed, KeyGen}; - -mod signer; -use signer::Signer; - -mod cosigner; -use cosigner::Cosigner; - -mod batch_signer; -use batch_signer::BatchSigner; - -mod slash_report_signer; -use slash_report_signer::SlashReportSigner; - mod multisigs; use multisigs::{MultisigEvent, MultisigManager}; #[cfg(test)] mod tests; -#[global_allocator] -static ALLOCATOR: zalloc::ZeroizingAlloc = - zalloc::ZeroizingAlloc(std::alloc::System); - -// Items which are mutably borrowed by Tributary. -// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't -// violated. -struct TributaryMutable { - // The following are actually mutably borrowed by Substrate as well. - // - Substrate triggers key gens, and determines which to use. - // - SubstrateBlock events cause scheduling which causes signing. - // - // This is still considered Tributary-mutable as most mutation (preprocesses/shares) happens by - // the Tributary. - // - // Creation of tasks is by Substrate, yet this is safe since the mutable borrow is transferred to - // Tributary. - // - // Tributary stops mutating a key gen attempt before Substrate is made aware of it, ensuring - // Tributary drops its mutable borrow before Substrate acquires it. Tributary will maintain a - // mutable borrow on the *key gen task*, yet the finalization code can successfully run for any - // attempt. - // - // The only other note is how the scanner may cause a signer task to be dropped, effectively - // invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage - // of a dropped task. - key_gen: KeyGen, - signers: HashMap>, - - // This is also mutably borrowed by the Scanner. - // The Scanner starts new sign tasks. - // The Tributary mutates already-created signed tasks, potentially completing them. - // Substrate may mark tasks as completed, invalidating any existing mutable borrows. - // The safety of this follows as written above. - - // There should only be one BatchSigner at a time (see #277) - batch_signer: Option>, - - // Solely mutated by the tributary. - cosigner: Option, - slash_report_signer: Option, -} - -// Items which are mutably borrowed by Substrate. -// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't -// violated. - -/* - The MultisigManager contains the Scanner and Schedulers. - - The scanner is expected to autonomously operate, scanning blocks as they appear. When a block is - sufficiently confirmed, the scanner causes the Substrate signer to sign a batch. It itself only - mutates its list of finalized blocks, to protect against re-orgs, and its in-memory state though. - - Disk mutations to the scan-state only happens once the relevant `Batch` is included on Substrate. - It can't be mutated as soon as the `Batch` is signed as we need to know the order of `Batch`s - relevant to `Burn`s. - - Schedulers take in new outputs, confirmed in `Batch`s, and outbound payments, triggered by - `Burn`s. - - Substrate also decides when to move to a new multisig, hence why this entire object is - Substrate-mutable. - - Since MultisigManager should always be verifiable, and the Tributary is temporal, MultisigManager - being entirely SubstrateMutable shows proper data pipe-lining. -*/ - -type SubstrateMutable = MultisigManager; - async fn handle_coordinator_msg( txn: &mut D::Transaction<'_>, network: &N, @@ -141,54 +29,6 @@ async fn handle_coordinator_msg( substrate_mutable: &mut SubstrateMutable, msg: &Message, ) { - // If this message expects a higher block number than we have, halt until synced - async fn wait( - txn: &D::Transaction<'_>, - substrate_mutable: &SubstrateMutable, - block_hash: &BlockHash, - ) { - let mut needed_hash = >::Id::default(); - needed_hash.as_mut().copy_from_slice(&block_hash.0); - - loop { - // Ensure our scanner has scanned this block, which means our daemon has this block at - // a sufficient depth - if substrate_mutable.block_number(txn, &needed_hash).await.is_none() { - warn!( - "node is desynced. we haven't scanned {} which should happen after {} confirms", - hex::encode(&needed_hash), - N::CONFIRMATIONS, - ); - sleep(Duration::from_secs(10)).await; - continue; - }; - break; - } - - // TODO2: Sanity check we got an AckBlock (or this is the AckBlock) for the block in question - - /* - let synced = |context: &SubstrateContext, key| -> Result<(), ()> { - // Check that we've synced this block and can actually operate on it ourselves - let latest = scanner.latest_scanned(key); - if usize::try_from(context.network_latest_finalized_block).unwrap() < latest { - log::warn!( - "external network node disconnected/desynced from rest of the network. \ - our block: {latest:?}, network's acknowledged: {}", - context.network_latest_finalized_block, - ); - Err(())?; - } - Ok(()) - }; - */ - } - - if let Some(required) = msg.msg.required_block() { - // wait only reads from, it doesn't mutate, substrate_mutable - wait(txn, substrate_mutable, &required).await; - } - async fn activate_key( network: &N, substrate_mutable: &mut SubstrateMutable, @@ -220,105 +60,6 @@ async fn handle_coordinator_msg( } match msg.msg.clone() { - CoordinatorMessage::KeyGen(msg) => { - for msg in tributary_mutable.key_gen.handle(txn, msg) { - coordinator.send(msg).await; - } - } - - CoordinatorMessage::Sign(msg) => { - if let Some(msg) = tributary_mutable - .signers - .get_mut(&msg.session()) - .expect("coordinator told us to sign with a signer we don't have") - .handle(txn, msg) - .await - { - coordinator.send(msg).await; - } - } - - CoordinatorMessage::Coordinator(msg) => match msg { - CoordinatorCoordinatorMessage::CosignSubstrateBlock { id, block_number } => { - let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { - panic!("CosignSubstrateBlock id didn't have a CosigningSubstrateBlock") - }; - let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else { - panic!("didn't have key shares for the key we were told to cosign with"); - }; - if let Some((cosigner, msg)) = - Cosigner::new(txn, id.session, keys, block_number, block, id.attempt) - { - tributary_mutable.cosigner = Some(cosigner); - coordinator.send(msg).await; - } else { - log::warn!("Cosigner::new returned None"); - } - } - CoordinatorCoordinatorMessage::SignSlashReport { id, report } => { - assert_eq!(id.id, SubstrateSignableId::SlashReport); - let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else { - panic!("didn't have key shares for the key we were told to perform a slash report with"); - }; - if let Some((slash_report_signer, msg)) = - SlashReportSigner::new(txn, N::NETWORK, id.session, keys, report, id.attempt) - { - tributary_mutable.slash_report_signer = Some(slash_report_signer); - coordinator.send(msg).await; - } else { - log::warn!("SlashReportSigner::new returned None"); - } - } - _ => { - let (is_cosign, is_batch, is_slash_report) = match msg { - CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } | - CoordinatorCoordinatorMessage::SignSlashReport { .. } => (false, false, false), - CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } | - CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => ( - matches!(&id.id, SubstrateSignableId::CosigningSubstrateBlock(_)), - matches!(&id.id, SubstrateSignableId::Batch(_)), - matches!(&id.id, SubstrateSignableId::SlashReport), - ), - CoordinatorCoordinatorMessage::BatchReattempt { .. } => (false, true, false), - }; - - if is_cosign { - if let Some(cosigner) = tributary_mutable.cosigner.as_mut() { - if let Some(msg) = cosigner.handle(txn, msg) { - coordinator.send(msg).await; - } - } else { - log::warn!( - "received message for cosigner yet didn't have a cosigner. {}", - "this is an error if we didn't reboot", - ); - } - } else if is_batch { - if let Some(msg) = tributary_mutable - .batch_signer - .as_mut() - .expect( - "coordinator told us to sign a batch when we don't currently have a Substrate signer", - ) - .handle(txn, msg) - { - coordinator.send(msg).await; - } - } else if is_slash_report { - if let Some(slash_report_signer) = tributary_mutable.slash_report_signer.as_mut() { - if let Some(msg) = slash_report_signer.handle(txn, msg) { - coordinator.send(msg).await; - } - } else { - log::warn!( - "received message for slash report signer yet didn't have {}", - "a slash report signer. this is an error if we didn't reboot", - ); - } - } - } - }, - CoordinatorMessage::Substrate(msg) => { match msg { messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session, key_pair } => { From a4e72c062e111c66fd9f6b0cd08a51b33d054581 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 06:39:44 -0400 Subject: [PATCH 104/179] Add note to signers on reducing disk IO --- processor/signers/src/db.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs index b4de78d9e..2c13ddba3 100644 --- a/processor/signers/src/db.rs +++ b/processor/signers/src/db.rs @@ -22,6 +22,19 @@ db_channel! { SlashReport: (session: Session) -> Vec, SlashReportSignature: (session: Session) -> Vec, + /* + TODO: Most of these are pointless? We drop all active signing sessions on reboot. It's + accordingly not valuable to use a DB-backed channel to communicate messages for signing + sessions (Preprocess/Shares). + + Transactions, Batches, Slash Reports, and Cosigns all have their own mechanisms/DB entries + and don't use the following channels. The only questions are: + + 1) If it's safe to drop Reattempt? Or if we need tweaks to enable that + 2) If we reboot with a pending Reattempt, we'll participate on reboot. If we drop that + Reattempt, we won't. Accordingly, we have degraded performance in that edge case in + exchange for less disk IO in the majority of cases. Is that work it? + */ CoordinatorToCosignerMessages: (session: Session) -> CoordinatorMessage, CosignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, From 69bbf09a3fa9e4434046711b041f690336b6d1d9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 08:57:57 -0400 Subject: [PATCH 105/179] Note better message structure in messages --- processor/messages/src/lib.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index ae1ab6d58..080864dc2 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -181,6 +181,24 @@ pub mod coordinator { pub mod substrate { use super::*; + /* TODO + #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + pub enum InInstructionResult { + Succeeded, + Failed, + } + #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + pub struct ExecutedBatch { + batch_id: u32, + in_instructions: Vec, + } + Block { + block: u64, + batches: Vec, + burns: Vec, + } + */ + #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { /// Keys set on the Serai blockchain. @@ -193,7 +211,6 @@ pub mod substrate { batch_id: u32, in_instruction_succeededs: Vec, burns: Vec, - key_to_activate: Option, }, /// The data from a block which didn't acknowledge a Batch. BlockWithoutBatchAcknowledgement { block: u64, burns: Vec }, From 507a37d01d88ec415bc159bafead32c0286e807e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 08:58:58 -0400 Subject: [PATCH 106/179] Continue filling out main loop Adds generics to the db_channel macro, fixes the bug where it needed at least one key. --- common/db/src/create_db.rs | 46 +++++++-- processor/bitcoin/src/db.rs | 13 ++- processor/bitcoin/src/key_gen.rs | 6 +- processor/bitcoin/src/main.rs | 114 ++++++++++++++++----- processor/bitcoin/src/primitives/mod.rs | 17 +++ processor/bitcoin/src/primitives/output.rs | 21 ++-- processor/key-gen/src/lib.rs | 35 ++++--- 7 files changed, 187 insertions(+), 65 deletions(-) diff --git a/common/db/src/create_db.rs b/common/db/src/create_db.rs index 7be1e1c86..1fb52b1b6 100644 --- a/common/db/src/create_db.rs +++ b/common/db/src/create_db.rs @@ -79,10 +79,22 @@ macro_rules! create_db { pub(crate) fn del$(<$($generic_name: $generic_type),+>)?( txn: &mut impl DbTxn $(, $arg: $arg_type)* - ) -> core::marker::PhantomData<($($($generic_name),+)?)> { + ) -> core::marker::PhantomData<($($($generic_name),+)?)> { txn.del(&$field_name::key$(::<$($generic_name),+>)?($($arg),*)); core::marker::PhantomData } + + pub(crate) fn take$(<$($generic_name: $generic_type),+>)?( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + ) -> Option<$field_type> { + let key = $field_name::key$(::<$($generic_name),+>)?($($arg),*); + let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap()); + if res.is_some() { + txn.del(key); + } + res + } } )* }; @@ -91,19 +103,30 @@ macro_rules! create_db { #[macro_export] macro_rules! db_channel { ($db_name: ident { - $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)* + $($field_name: ident: + $(<$($generic_name: tt: $generic_type: tt),+>)?( + $($arg: ident: $arg_type: ty),* + ) -> $field_type: ty$(,)? + )* }) => { $( create_db! { $db_name { - $field_name: ($($arg: $arg_type,)* index: u32) -> $field_type, + $field_name: $(<$($generic_name: $generic_type),+>)?( + $($arg: $arg_type,)* + index: u32 + ) -> $field_type } } impl $field_name { - pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) { + pub(crate) fn send$(<$($generic_name: $generic_type),+>)?( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + , value: &$field_type + ) { // Use index 0 to store the amount of messages - let messages_sent_key = $field_name::key($($arg),*, 0); + let messages_sent_key = $field_name::key$(::<$($generic_name),+>)?($($arg,)* 0); let messages_sent = txn.get(&messages_sent_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); @@ -114,19 +137,22 @@ macro_rules! db_channel { // at the same time let index_to_use = messages_sent + 2; - $field_name::set(txn, $($arg),*, index_to_use, value); + $field_name::set$(::<$($generic_name),+>)?(txn, $($arg,)* index_to_use, value); } - pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> { - let messages_recvd_key = $field_name::key($($arg),*, 1); + pub(crate) fn try_recv$(<$($generic_name: $generic_type),+>)?( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + ) -> Option<$field_type> { + let messages_recvd_key = $field_name::key$(::<$($generic_name),+>)?($($arg,)* 1); let messages_recvd = txn.get(&messages_recvd_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); let index_to_read = messages_recvd + 2; - let res = $field_name::get(txn, $($arg),*, index_to_read); + let res = $field_name::get$(::<$($generic_name),+>)?(txn, $($arg,)* index_to_read); if res.is_some() { - $field_name::del(txn, $($arg),*, index_to_read); + $field_name::del$(::<$($generic_name),+>)?(txn, $($arg,)* index_to_read); txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes()); } res diff --git a/processor/bitcoin/src/db.rs b/processor/bitcoin/src/db.rs index 94a7c0ba8..b0acc427d 100644 --- a/processor/bitcoin/src/db.rs +++ b/processor/bitcoin/src/db.rs @@ -1,10 +1,19 @@ +use ciphersuite::group::GroupEncoding; + use serai_client::validator_sets::primitives::Session; -use serai_db::{Get, DbTxn, create_db}; +use serai_db::{Get, DbTxn, create_db, db_channel}; +use primitives::EncodableG; create_db! { Processor { - ExternalKeyForSession: (session: Session) -> Vec, + ExternalKeyForSessionForSigners: (session: Session) -> EncodableG, + } +} + +db_channel! { + Processor { + KeyToActivate: () -> EncodableG } } diff --git a/processor/bitcoin/src/key_gen.rs b/processor/bitcoin/src/key_gen.rs index 416677e78..759443643 100644 --- a/processor/bitcoin/src/key_gen.rs +++ b/processor/bitcoin/src/key_gen.rs @@ -1,7 +1,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; use frost::ThresholdKeys; -use crate::scan::scanner; +use crate::{primitives::x_coord_to_even_point, scan::scanner}; pub(crate) struct KeyGenParams; impl key_gen::KeyGenParams for KeyGenParams { @@ -21,4 +21,8 @@ impl key_gen::KeyGenParams for KeyGenParams { // Skip the parity encoding as we know this key is even key[1 ..].to_vec() } + + fn decode_key(key: &[u8]) -> Option<::G> { + x_coord_to_even_point(key) + } } diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index bb788d1e8..136b89cbf 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -9,9 +9,11 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = use ciphersuite::Ciphersuite; use serai_db::{DbTxn, Db}; +use ::primitives::EncodableG; +use ::key_gen::KeyGenParams as KeyGenParamsTrait; mod primitives; -pub(crate) use primitives::*; +pub(crate) use crate::primitives::*; // Internal utilities for scanning transactions mod scan; @@ -50,59 +52,123 @@ async fn send_message(_msg: messages::ProcessorMessage) { async fn coordinator_loop( mut db: D, - mut key_gen: ::key_gen::KeyGen, + mut key_gen: ::key_gen::KeyGen, mut signers: signers::Signers, Scheduler, Rpc>, mut scanner: Option>>, ) { loop { - let mut txn = Some(db.txn()); - let msg = next_message(txn.as_mut().unwrap()).await; + let mut txn = db.txn(); + let msg = next_message(&mut txn).await; + let mut txn = Some(txn); match msg { messages::CoordinatorMessage::KeyGen(msg) => { + let txn = txn.as_mut().unwrap(); + let mut new_key = None; // This is a computationally expensive call yet it happens infrequently - for msg in key_gen.handle(txn.as_mut().unwrap(), msg) { + for msg in key_gen.handle(txn, msg) { + if let messages::key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } = &msg { + new_key = Some(*session) + } send_message(messages::ProcessorMessage::KeyGen(msg)).await; } + + // If we were yielded a key, register it in the signers + if let Some(session) = new_key { + let (substrate_keys, network_keys) = + ::key_gen::KeyGen::::key_shares(txn, session) + .expect("generated key pair yet couldn't get key shares"); + signers.register_keys(txn, session, substrate_keys, network_keys); + } } + // These are cheap calls which are fine to be here in this loop - messages::CoordinatorMessage::Sign(msg) => signers.queue_message(txn.as_mut().unwrap(), &msg), + messages::CoordinatorMessage::Sign(msg) => { + let txn = txn.as_mut().unwrap(); + signers.queue_message(txn, &msg) + } messages::CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { session, block_number, block, }, - ) => signers.cosign_block(txn.take().unwrap(), session, block_number, block), + ) => { + let txn = txn.take().unwrap(); + signers.cosign_block(txn, session, block_number, block) + } messages::CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::SignSlashReport { session, report }, - ) => signers.sign_slash_report(txn.take().unwrap(), session, &report), + ) => { + let txn = txn.take().unwrap(); + signers.sign_slash_report(txn, session, &report) + } + messages::CoordinatorMessage::Substrate(msg) => match msg { messages::substrate::CoordinatorMessage::SetKeys { serai_time, session, key_pair } => { - db::ExternalKeyForSession::set(txn.as_mut().unwrap(), session, &key_pair.1.into_inner()); - todo!("TODO: Register in signers"); - todo!("TODO: Scanner activation") + let txn = txn.as_mut().unwrap(); + let key = EncodableG( + KeyGenParams::decode_key(key_pair.1.as_ref()).expect("invalid key set on serai"), + ); + + // Queue the key to be activated upon the next Batch + db::KeyToActivate::send::< + <::ExternalNetworkCurve as Ciphersuite>::G, + >(txn, &key); + + // Set the external key, as needed by the signers + db::ExternalKeyForSessionForSigners::set::< + <::ExternalNetworkCurve as Ciphersuite>::G, + >(txn, session, &key); + + // This isn't cheap yet only happens for the very first set of keys + if scanner.is_none() { + todo!("TODO") + } } messages::substrate::CoordinatorMessage::SlashesReported { session } => { - let key_bytes = db::ExternalKeyForSession::get(txn.as_ref().unwrap(), session).unwrap(); - let mut key_bytes = key_bytes.as_slice(); - let key = - ::ExternalNetworkCurve::read_G(&mut key_bytes) - .unwrap(); - assert!(key_bytes.is_empty()); - - signers.retire_session(txn.as_mut().unwrap(), session, &key) + let txn = txn.as_mut().unwrap(); + + // Since this session had its slashes reported, it has finished all its signature + // protocols and has been fully retired. We retire it from the signers accordingly + let key = db::ExternalKeyForSessionForSigners::take::< + <::ExternalNetworkCurve as Ciphersuite>::G, + >(txn, session) + .unwrap() + .0; + + // This is a cheap call + signers.retire_session(txn, session, &key) } messages::substrate::CoordinatorMessage::BlockWithBatchAcknowledgement { - block, + block: _, batch_id, in_instruction_succeededs, burns, - key_to_activate, - } => todo!("TODO"), + } => { + let mut txn = txn.take().unwrap(); + let scanner = scanner.as_mut().unwrap(); + let key_to_activate = db::KeyToActivate::try_recv::< + <::ExternalNetworkCurve as Ciphersuite>::G, + >(&mut txn) + .map(|key| key.0); + // This is a cheap call as it internally just queues this to be done later + scanner.acknowledge_batch( + txn, + batch_id, + in_instruction_succeededs, + burns, + key_to_activate, + ) + } messages::substrate::CoordinatorMessage::BlockWithoutBatchAcknowledgement { - block, + block: _, burns, - } => todo!("TODO"), + } => { + let txn = txn.take().unwrap(); + let scanner = scanner.as_mut().unwrap(); + // This is a cheap call as it internally just queues this to be done later + scanner.queue_burns(txn, burns) + } }, }; // If the txn wasn't already consumed and committed, commit it diff --git a/processor/bitcoin/src/primitives/mod.rs b/processor/bitcoin/src/primitives/mod.rs index fba52dd96..e089c623a 100644 --- a/processor/bitcoin/src/primitives/mod.rs +++ b/processor/bitcoin/src/primitives/mod.rs @@ -1,3 +1,20 @@ +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::bitcoin::key::{Parity, XOnlyPublicKey}; + pub(crate) mod output; pub(crate) mod transaction; pub(crate) mod block; + +pub(crate) fn x_coord_to_even_point(key: &[u8]) -> Option<::G> { + if key.len() != 32 { + None? + }; + + // Read the x-only public key + let key = XOnlyPublicKey::from_slice(key).ok()?; + // Convert to a full public key + let key = key.public_key(Parity::Even); + // Convert to k256 (from libsecp256k1) + Secp256k1::read_G(&mut key.serialize().as_slice()).ok() +} diff --git a/processor/bitcoin/src/primitives/output.rs b/processor/bitcoin/src/primitives/output.rs index 05ab6acf9..f1a1dc7a4 100644 --- a/processor/bitcoin/src/primitives/output.rs +++ b/processor/bitcoin/src/primitives/output.rs @@ -4,11 +4,7 @@ use ciphersuite::{Ciphersuite, Secp256k1}; use bitcoin_serai::{ bitcoin::{ - hashes::Hash as HashTrait, - key::{Parity, XOnlyPublicKey}, - consensus::Encodable, - script::Instruction, - transaction::Transaction, + hashes::Hash as HashTrait, consensus::Encodable, script::Instruction, transaction::Transaction, }, wallet::ReceivedOutput as WalletOutput, }; @@ -24,7 +20,10 @@ use serai_client::{ use primitives::{OutputType, ReceivedOutput}; -use crate::scan::{offsets_for_key, presumed_origin, extract_serai_data}; +use crate::{ + primitives::x_coord_to_even_point, + scan::{offsets_for_key, presumed_origin, extract_serai_data}, +}; #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] pub(crate) struct OutputId([u8; 36]); @@ -117,15 +116,11 @@ impl ReceivedOutput<::G, Address> for Output { let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else { panic!("last item in v1 Taproot script wasn't bytes") }; - let key = XOnlyPublicKey::from_slice(key.as_ref()) - .expect("last item in v1 Taproot script wasn't a valid x-only public key"); + let key = x_coord_to_even_point(key.as_ref()) + .expect("last item in scanned v1 Taproot script wasn't a valid x-only public key"); - // Convert to a full key - let key = key.public_key(Parity::Even); - // Convert to a k256 key (from libsecp256k1) - let output_key = Secp256k1::read_G(&mut key.serialize().as_slice()).unwrap(); // The output's key minus the output's offset is the root key - output_key - (::G::GENERATOR * self.output.offset()) + key - (::G::GENERATOR * self.output.offset()) } fn presumed_origin(&self) -> Option

{ diff --git a/processor/key-gen/src/lib.rs b/processor/key-gen/src/lib.rs index 607534126..cb23a740b 100644 --- a/processor/key-gen/src/lib.rs +++ b/processor/key-gen/src/lib.rs @@ -20,7 +20,7 @@ use dkg::{Participant, ThresholdKeys, evrf::*}; use serai_validator_sets_primitives::Session; use messages::key_gen::*; -use serai_db::{DbTxn, Db}; +use serai_db::{Get, DbTxn}; mod generators; use generators::generators; @@ -49,6 +49,17 @@ pub trait KeyGenParams { fn encode_key(key: ::G) -> Vec { key.to_bytes().as_ref().to_vec() } + + /// Decode keys from their optimal encoding. + /// + /// A default implementation is provided which calls the traditional `from_bytes`. + fn decode_key(mut key: &[u8]) -> Option<::G> { + let res = ::read_G(&mut key).ok()?; + if !key.is_empty() { + None?; + } + Some(res) + } } /* @@ -128,47 +139,41 @@ fn coerce_keys( /// An instance of the Serai key generation protocol. #[derive(Debug)] -pub struct KeyGen { - db: D, +pub struct KeyGen { substrate_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, network_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, } -impl KeyGen { +impl KeyGen

{ /// Create a new key generation instance. #[allow(clippy::new_ret_no_self)] pub fn new( - db: D, substrate_evrf_private_key: Zeroizing< <::EmbeddedCurve as Ciphersuite>::F, >, network_evrf_private_key: Zeroizing< <::EmbeddedCurve as Ciphersuite>::F, >, - ) -> KeyGen { - KeyGen { db, substrate_evrf_private_key, network_evrf_private_key } + ) -> KeyGen

{ + KeyGen { substrate_evrf_private_key, network_evrf_private_key } } /// Fetch the key shares for a specific session. #[allow(clippy::type_complexity)] pub fn key_shares( - &self, + getter: &impl Get, session: Session, ) -> Option<(Vec>, Vec>)> { // This is safe, despite not having a txn, since it's a static value // It doesn't change over time/in relation to other operations // It is solely set or unset - KeyGenDb::

::key_shares(&self.db, session) + KeyGenDb::

::key_shares(getter, session) } /// Handle a message from the coordinator. - pub fn handle( - &mut self, - txn: &mut D::Transaction<'_>, - msg: CoordinatorMessage, - ) -> Vec { + pub fn handle(&mut self, txn: &mut impl DbTxn, msg: CoordinatorMessage) -> Vec { const SUBSTRATE_KEY_CONTEXT: &[u8] = b"substrate"; const NETWORK_KEY_CONTEXT: &[u8] = b"network"; fn context(session: Session, key_context: &[u8]) -> [u8; 32] { @@ -292,7 +297,7 @@ impl KeyGen { // If we've already generated these keys, we don't actually need to save these // participations and continue. We solely have to verify them, as to identify malicious // participants and prevent DoSs, before returning - if self.key_shares(session).is_some() { + if Self::key_shares(txn, session).is_some() { log::debug!("already finished generating a key for {:?}", session); match EvrfDkg::::verify( From bd962eb908f0dfbcb5cd0cb2dbc09e844abe43ff Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 09:12:00 -0400 Subject: [PATCH 107/179] Misc tidying of serai-db calls --- common/db/src/create_db.rs | 42 ++++++++++--------- processor/bitcoin/src/main.rs | 16 +++---- processor/scanner/src/db.rs | 19 ++++----- processor/scanner/src/report/db.rs | 7 +--- processor/scanner/src/substrate/db.rs | 7 ++-- .../utxo/transaction-chaining/src/db.rs | 4 +- 6 files changed, 46 insertions(+), 49 deletions(-) diff --git a/common/db/src/create_db.rs b/common/db/src/create_db.rs index 1fb52b1b6..50fe51f7f 100644 --- a/common/db/src/create_db.rs +++ b/common/db/src/create_db.rs @@ -47,9 +47,13 @@ macro_rules! create_db { }) => { $( #[derive(Clone, Debug)] - pub(crate) struct $field_name; - impl $field_name { - pub(crate) fn key$(<$($generic_name: $generic_type),+>)?($($arg: $arg_type),*) -> Vec { + pub(crate) struct $field_name$( + <$($generic_name: $generic_type),+> + )?$( + (core::marker::PhantomData<($($generic_name),+)>) + )?; + impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? { + pub(crate) fn key($($arg: $arg_type),*) -> Vec { use scale::Encode; $crate::serai_db_key( stringify!($db_name).as_bytes(), @@ -57,38 +61,38 @@ macro_rules! create_db { ($($arg),*).encode() ) } - pub(crate) fn set$(<$($generic_name: $generic_type),+>)?( + pub(crate) fn set( txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type ) { - let key = $field_name::key$(::<$($generic_name),+>)?($($arg),*); + let key = Self::key($($arg),*); txn.put(&key, borsh::to_vec(data).unwrap()); } - pub(crate) fn get$(<$($generic_name: $generic_type),+>)?( + pub(crate) fn get( getter: &impl Get, $($arg: $arg_type),* ) -> Option<$field_type> { - getter.get($field_name::key$(::<$($generic_name),+>)?($($arg),*)).map(|data| { + getter.get(Self::key($($arg),*)).map(|data| { borsh::from_slice(data.as_ref()).unwrap() }) } // Returns a PhantomData of all generic types so if the generic was only used in the value, // not the keys, this doesn't have unused generic types #[allow(dead_code)] - pub(crate) fn del$(<$($generic_name: $generic_type),+>)?( + pub(crate) fn del( txn: &mut impl DbTxn $(, $arg: $arg_type)* ) -> core::marker::PhantomData<($($($generic_name),+)?)> { - txn.del(&$field_name::key$(::<$($generic_name),+>)?($($arg),*)); + txn.del(&Self::key($($arg),*)); core::marker::PhantomData } - pub(crate) fn take$(<$($generic_name: $generic_type),+>)?( + pub(crate) fn take( txn: &mut impl DbTxn $(, $arg: $arg_type)* ) -> Option<$field_type> { - let key = $field_name::key$(::<$($generic_name),+>)?($($arg),*); + let key = Self::key($($arg),*); let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap()); if res.is_some() { txn.del(key); @@ -119,14 +123,14 @@ macro_rules! db_channel { } } - impl $field_name { - pub(crate) fn send$(<$($generic_name: $generic_type),+>)?( + impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? { + pub(crate) fn send( txn: &mut impl DbTxn $(, $arg: $arg_type)* , value: &$field_type ) { // Use index 0 to store the amount of messages - let messages_sent_key = $field_name::key$(::<$($generic_name),+>)?($($arg,)* 0); + let messages_sent_key = Self::key($($arg,)* 0); let messages_sent = txn.get(&messages_sent_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); @@ -137,22 +141,22 @@ macro_rules! db_channel { // at the same time let index_to_use = messages_sent + 2; - $field_name::set$(::<$($generic_name),+>)?(txn, $($arg,)* index_to_use, value); + Self::set(txn, $($arg,)* index_to_use, value); } - pub(crate) fn try_recv$(<$($generic_name: $generic_type),+>)?( + pub(crate) fn try_recv( txn: &mut impl DbTxn $(, $arg: $arg_type)* ) -> Option<$field_type> { - let messages_recvd_key = $field_name::key$(::<$($generic_name),+>)?($($arg,)* 1); + let messages_recvd_key = Self::key($($arg,)* 1); let messages_recvd = txn.get(&messages_recvd_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); let index_to_read = messages_recvd + 2; - let res = $field_name::get$(::<$($generic_name),+>)?(txn, $($arg,)* index_to_read); + let res = Self::get(txn, $($arg,)* index_to_read); if res.is_some() { - $field_name::del$(::<$($generic_name),+>)?(txn, $($arg,)* index_to_read); + Self::del(txn, $($arg,)* index_to_read); txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes()); } res diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index 136b89cbf..f1f14082b 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -111,14 +111,14 @@ async fn coordinator_loop( ); // Queue the key to be activated upon the next Batch - db::KeyToActivate::send::< + db::KeyToActivate::< <::ExternalNetworkCurve as Ciphersuite>::G, - >(txn, &key); + >::send(txn, &key); // Set the external key, as needed by the signers - db::ExternalKeyForSessionForSigners::set::< + db::ExternalKeyForSessionForSigners::< <::ExternalNetworkCurve as Ciphersuite>::G, - >(txn, session, &key); + >::set(txn, session, &key); // This isn't cheap yet only happens for the very first set of keys if scanner.is_none() { @@ -130,9 +130,9 @@ async fn coordinator_loop( // Since this session had its slashes reported, it has finished all its signature // protocols and has been fully retired. We retire it from the signers accordingly - let key = db::ExternalKeyForSessionForSigners::take::< + let key = db::ExternalKeyForSessionForSigners::< <::ExternalNetworkCurve as Ciphersuite>::G, - >(txn, session) + >::take(txn, session) .unwrap() .0; @@ -147,9 +147,9 @@ async fn coordinator_loop( } => { let mut txn = txn.take().unwrap(); let scanner = scanner.as_mut().unwrap(); - let key_to_activate = db::KeyToActivate::try_recv::< + let key_to_activate = db::KeyToActivate::< <::ExternalNetworkCurve as Ciphersuite>::G, - >(&mut txn) + >::try_recv(&mut txn) .map(|key| key.0); // This is a cheap call as it internally just queues this to be done later scanner.acknowledge_batch( diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 5fcdc160d..107616cc8 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -107,7 +107,7 @@ create_db!( pub(crate) struct ScannerGlobalDb(PhantomData); impl ScannerGlobalDb { pub(crate) fn has_any_key_been_queued(getter: &impl Get) -> bool { - ActiveKeys::get::>>(getter).is_some() + ActiveKeys::>>::get(getter).is_some() } /// Queue a key. @@ -315,7 +315,7 @@ pub(crate) struct ReceiverScanData { db_channel! { ScannerScanEventuality { - ScannedBlock: (empty_key: ()) -> Vec, + ScannedBlock: () -> Vec, } } @@ -364,14 +364,14 @@ impl ScanToEventualityDb { for output in &data.returns { output.write(&mut buf).unwrap(); } - ScannedBlock::send(txn, (), &buf); + ScannedBlock::send(txn, &buf); } pub(crate) fn recv_scan_data( txn: &mut impl DbTxn, expected_block_number: u64, ) -> ReceiverScanData { let data = - ScannedBlock::try_recv(txn, ()).expect("receiving data for a scanned block not yet sent"); + ScannedBlock::try_recv(txn).expect("receiving data for a scanned block not yet sent"); let mut data = data.as_slice(); let block_number = { @@ -462,7 +462,7 @@ struct BlockBoundInInstructions { db_channel! { ScannerScanReport { - InInstructions: (empty_key: ()) -> BlockBoundInInstructions, + InInstructions: () -> BlockBoundInInstructions, } } @@ -484,7 +484,6 @@ impl ScanToReportDb { } InInstructions::send( txn, - (), &BlockBoundInInstructions { block_number, returnable_in_instructions: buf }, ); } @@ -493,7 +492,7 @@ impl ScanToReportDb { txn: &mut impl DbTxn, block_number: u64, ) -> InInstructionData { - let data = InInstructions::try_recv(txn, ()) + let data = InInstructions::try_recv(txn) .expect("receiving InInstructions for a scanned block not yet sent"); assert_eq!( block_number, data.block_number, @@ -556,7 +555,7 @@ mod _public_db { db_channel! { ScannerPublic { - Batches: (empty_key: ()) -> Batch, + Batches: () -> Batch, BatchesToSign: (key: &[u8]) -> Batch, AcknowledgedBatches: (key: &[u8]) -> u32, CompletedEventualities: (key: &[u8]) -> [u8; 32], @@ -570,12 +569,12 @@ mod _public_db { pub struct Batches; impl Batches { pub(crate) fn send(txn: &mut impl DbTxn, batch: &Batch) { - _public_db::Batches::send(txn, (), batch); + _public_db::Batches::send(txn, batch); } /// Receive a batch to publish. pub fn try_recv(txn: &mut impl DbTxn) -> Option { - _public_db::Batches::try_recv(txn, ()) + _public_db::Batches::try_recv(txn) } } diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs index 10a3f6bb5..186accacd 100644 --- a/processor/scanner/src/report/db.rs +++ b/processor/scanner/src/report/db.rs @@ -54,9 +54,7 @@ impl ReportDb { } pub(crate) fn take_block_number_for_batch(txn: &mut impl DbTxn, id: u32) -> Option { - let block_number = BlockNumberForBatch::get(txn, id)?; - BlockNumberForBatch::del(txn, id); - Some(block_number) + BlockNumberForBatch::take(txn, id) } pub(crate) fn save_external_key_for_session_to_sign_batch( @@ -103,8 +101,7 @@ impl ReportDb { txn: &mut impl DbTxn, id: u32, ) -> Option>>> { - let buf = SerializedReturnAddresses::get(txn, id)?; - SerializedReturnAddresses::del(txn, id); + let buf = SerializedReturnAddresses::take(txn, id)?; let mut buf = buf.as_slice(); let mut res = Vec::with_capacity(buf.len() / (32 + 1 + 8)); diff --git a/processor/scanner/src/substrate/db.rs b/processor/scanner/src/substrate/db.rs index 697897c25..184358569 100644 --- a/processor/scanner/src/substrate/db.rs +++ b/processor/scanner/src/substrate/db.rs @@ -37,7 +37,7 @@ pub(crate) enum Action { db_channel!( ScannerSubstrate { - Actions: (empty_key: ()) -> ActionEncodable, + Actions: () -> ActionEncodable, } ); @@ -52,7 +52,6 @@ impl SubstrateDb { ) { Actions::send( txn, - (), &ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { batch_id, in_instruction_succeededs, @@ -62,11 +61,11 @@ impl SubstrateDb { ); } pub(crate) fn queue_queue_burns(txn: &mut impl DbTxn, burns: Vec) { - Actions::send(txn, (), &ActionEncodable::QueueBurns(burns)); + Actions::send(txn, &ActionEncodable::QueueBurns(burns)); } pub(crate) fn next_action(txn: &mut impl DbTxn) -> Option> { - let action_encodable = Actions::try_recv(txn, ())?; + let action_encodable = Actions::try_recv(txn)?; Some(match action_encodable { ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { batch_id, diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs index 697f1009e..11bcd78db 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/db.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -69,9 +69,7 @@ impl Db { txn: &mut impl DbTxn, output: & as ReceivedOutput, AddressFor>>::Id, ) -> bool { - let res = AlreadyAccumulatedOutput::get(txn, output.as_ref()).is_some(); - AlreadyAccumulatedOutput::del(txn, output.as_ref()); - res + AlreadyAccumulatedOutput::take(txn, output.as_ref()).is_some() } pub(crate) fn queued_payments( From dadce1a26e77235eb32db1b13fa1d6d002158e17 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 11:58:27 -0400 Subject: [PATCH 108/179] Add section documenting the safety of txindex upon reorganizations --- processor/bitcoin/src/scan.rs | 8 ++++---- processor/bitcoin/src/txindex.rs | 30 ++++++++++++++++++++++++++++-- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/processor/bitcoin/src/scan.rs b/processor/bitcoin/src/scan.rs index b3d3a6dcc..6d7fab887 100644 --- a/processor/bitcoin/src/scan.rs +++ b/processor/bitcoin/src/scan.rs @@ -16,7 +16,7 @@ use serai_client::networks::bitcoin::Address; use serai_db::Get; use primitives::OutputType; -use crate::{db, hash_bytes}; +use crate::hash_bytes; const KEY_DST: &[u8] = b"Serai Bitcoin Processor Key Offset"; static BRANCH_BASE_OFFSET: LazyLock<::F> = @@ -62,9 +62,9 @@ pub(crate) fn presumed_origin(getter: &impl Get, tx: &Transaction) -> Option ScriptBuf { + // We index every single output on the blockchain, so this shouldn't be possible + ScriptBuf::from_bytes( + db::ScriptPubKey::get(getter, txid, vout) + .expect("requested script public key for unknown output"), + ) +} + pub(crate) struct TxIndexTask(Rpc); #[async_trait::async_trait] @@ -40,6 +54,18 @@ impl ContinuallyRan for TxIndexTask { Rpc::::CONFIRMATIONS ))?; + /* + `finalized_block_number` is the latest block number minus confirmations. The blockchain may + undetectably re-organize though, as while the scanner will maintain an index of finalized + blocks and panics on reorganization, this runs prior to the scanner and that index. + + A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this + saves the script public keys *by the transaction hash an output index*. Accordingly, it isn't + invalidated on reorganization. The only risk would be if the new chain reorganized to + include a transaction to Serai which we didn't index the parents of. If that happens, we'll + panic when we scan the transaction, causing the invariant to be detected. + */ + let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db); let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1); @@ -63,7 +89,7 @@ impl ContinuallyRan for TxIndexTask { let mut txn = self.0.db.txn(); - for tx in &block.txdata[1 ..] { + for tx in &block.txdata { let txid = hash_bytes(tx.compute_txid().to_raw_hash()); for (o, output) in tx.output.iter().enumerate() { let o = u32::try_from(o).unwrap(); From dff2ef5ce76d1c51fc1af689fd7d42670df5ae8e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 11:59:15 -0400 Subject: [PATCH 109/179] Add binary search to find the block to start scanning from --- processor/bitcoin/src/main.rs | 97 ++++++++++++++------- processor/bitcoin/src/rpc.rs | 44 ++++++++++ processor/scanner/src/lib.rs | 5 ++ processor/src/main.rs | 160 ---------------------------------- 4 files changed, 113 insertions(+), 193 deletions(-) diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index f1f14082b..1c07b6cd7 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -6,11 +6,16 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); +use core::cmp::Ordering; + use ciphersuite::Ciphersuite; +use serai_client::validator_sets::primitives::Session; + use serai_db::{DbTxn, Db}; use ::primitives::EncodableG; use ::key_gen::KeyGenParams as KeyGenParamsTrait; +use scanner::{ScannerFeed, Scanner}; mod primitives; pub(crate) use crate::primitives::*; @@ -38,6 +43,56 @@ pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> res } +async fn first_block_after_time(feed: &S, serai_time: u64) -> u64 { + async fn first_block_after_time_iteration( + feed: &S, + serai_time: u64, + ) -> Result, S::EphemeralError> { + let latest = feed.latest_finalized_block_number().await?; + let latest_time = feed.time_of_block(latest).await?; + if latest_time < serai_time { + tokio::time::sleep(core::time::Duration::from_secs(serai_time - latest_time)).await; + return Ok(None); + } + + // A finalized block has a time greater than or equal to the time we want to start at + // Find the first such block with a binary search + // start_search and end_search are inclusive + let mut start_search = 0; + let mut end_search = latest; + while start_search != end_search { + // This on purposely chooses the earlier block in the case two blocks are both in the middle + let to_check = start_search + ((end_search - start_search) / 2); + let block_time = feed.time_of_block(to_check).await?; + match block_time.cmp(&serai_time) { + Ordering::Less => { + start_search = to_check + 1; + assert!(start_search <= end_search); + } + Ordering::Equal | Ordering::Greater => { + // This holds true since we pick the earlier block upon an even search distance + // If it didn't, this would cause an infinite loop + assert!(to_check < end_search); + end_search = to_check; + } + } + } + Ok(Some(start_search)) + } + loop { + match first_block_after_time_iteration(feed, serai_time).await { + Ok(Some(block)) => return block, + Ok(None) => { + log::info!("waiting for block to activate at (a block with timestamp >= {serai_time})"); + } + Err(e) => { + log::error!("couldn't find the first block Serai should scan due to an RPC error: {e:?}"); + } + } + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } +} + /// Fetch the next message from the Coordinator. /// /// This message is guaranteed to have never been handled before, where handling is defined as @@ -52,11 +107,13 @@ async fn send_message(_msg: messages::ProcessorMessage) { async fn coordinator_loop( mut db: D, + feed: Rpc, mut key_gen: ::key_gen::KeyGen, mut signers: signers::Signers, Scheduler, Rpc>, mut scanner: Option>>, ) { loop { + let db_clone = db.clone(); let mut txn = db.txn(); let msg = next_message(&mut txn).await; let mut txn = Some(txn); @@ -120,9 +177,13 @@ async fn coordinator_loop( <::ExternalNetworkCurve as Ciphersuite>::G, >::set(txn, session, &key); - // This isn't cheap yet only happens for the very first set of keys - if scanner.is_none() { - todo!("TODO") + // This is presumed extremely expensive, potentially blocking for several minutes, yet + // only happens for the very first set of keys + if session == Session(0) { + assert!(scanner.is_none()); + let start_block = first_block_after_time(&feed, serai_time).await; + scanner = + Some(Scanner::new::>(db_clone, feed.clone(), start_block, key.0).await); } } messages::substrate::CoordinatorMessage::SlashesReported { session } => { @@ -241,36 +302,6 @@ impl TransactionTrait for Transaction { } } -#[async_trait] -impl BlockTrait for Block { - async fn time(&self, rpc: &Bitcoin) -> u64 { - // Use the network median time defined in BIP-0113 since the in-block time isn't guaranteed to - // be monotonic - let mut timestamps = vec![u64::from(self.header.time)]; - let mut parent = self.parent(); - // BIP-0113 uses a median of the prior 11 blocks - while timestamps.len() < 11 { - let mut parent_block; - while { - parent_block = rpc.rpc.get_block(&parent).await; - parent_block.is_err() - } { - log::error!("couldn't get parent block when trying to get block time: {parent_block:?}"); - sleep(Duration::from_secs(5)).await; - } - let parent_block = parent_block.unwrap(); - timestamps.push(u64::from(parent_block.header.time)); - parent = parent_block.parent(); - - if parent == [0; 32] { - break; - } - } - timestamps.sort(); - timestamps[timestamps.len() / 2] - } -} - impl Bitcoin { pub(crate) async fn new(url: String) -> Bitcoin { let mut res = Rpc::new(url.clone()).await; diff --git a/processor/bitcoin/src/rpc.rs b/processor/bitcoin/src/rpc.rs index cafb0ef3c..a6f6e5fd8 100644 --- a/processor/bitcoin/src/rpc.rs +++ b/processor/bitcoin/src/rpc.rs @@ -34,6 +34,50 @@ impl ScannerFeed for Rpc { db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) } + async fn time_of_block(&self, number: u64) -> Result { + let number = usize::try_from(number).unwrap(); + + /* + The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the + median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve + CLTV). This creates a monotonic median time which we use as the block time. + */ + // This implements `GetMedianTimePast` + let median = { + const MEDIAN_TIMESPAN: usize = 11; + let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN); + for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number { + timestamps.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time); + } + timestamps.sort(); + timestamps[timestamps.len() / 2] + }; + + /* + This block's timestamp is guaranteed to be greater than this median: + https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9 + /src/validation.cpp#L4182-L4184 + + This does not guarantee the median always increases however. Take the following trivial + example, as the window is initially built: + + 0 block has time 0 // Prior blocks: [] + 1 block has time 1 // Prior blocks: [0] + 2 block has time 2 // Prior blocks: [0, 1] + 3 block has time 2 // Prior blocks: [0, 1, 2] + + These two blocks have the same time (both greater than the median of their prior blocks) and + the same median. + + The median will never decrease however. The values pushed onto the window will always be + greater than the median. If a value greater than the median is popped, the median will remain + the same (due to the counterbalance of the pushed value). If a value less than the median is + popped, the median will increase (either to another instance of the same value, yet one + closer to the end of the repeating sequence, or to a higher value). + */ + Ok(median.into()) + } + async fn unchecked_block_header_by_number( &self, number: u64, diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 4f30f5e71..6ed16d745 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -106,6 +106,11 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// consensus. The genesis block accordingly has block number 0. async fn latest_finalized_block_number(&self) -> Result; + /// Fetch the timestamp of a block (represented in seconds since the epoch). + /// + /// This must be monotonically incrementing. Two blocks may share a timestamp. + async fn time_of_block(&self, number: u64) -> Result; + /// Fetch a block header by its number. /// /// This does not check the returned BlockHeader is the header for the block we indexed. diff --git a/processor/src/main.rs b/processor/src/main.rs index 51123b925..65e74f550 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -29,158 +29,15 @@ async fn handle_coordinator_msg( substrate_mutable: &mut SubstrateMutable, msg: &Message, ) { - async fn activate_key( - network: &N, - substrate_mutable: &mut SubstrateMutable, - tributary_mutable: &mut TributaryMutable, - txn: &mut D::Transaction<'_>, - session: Session, - key_pair: KeyPair, - activation_number: usize, - ) { - info!("activating {session:?}'s keys at {activation_number}"); - - let network_key = ::Curve::read_G::<&[u8]>(&mut key_pair.1.as_ref()) - .expect("Substrate finalized invalid point as a network's key"); - - if tributary_mutable.key_gen.in_set(&session) { - // See TributaryMutable's struct definition for why this block is safe - let KeyConfirmed { substrate_keys, network_keys } = - tributary_mutable.key_gen.confirm(txn, session, &key_pair); - if session.0 == 0 { - tributary_mutable.batch_signer = - Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); - } - tributary_mutable - .signers - .insert(session, Signer::new(network.clone(), session, network_keys)); - } - - substrate_mutable.add_key(txn, activation_number, network_key).await; - } - match msg.msg.clone() { CoordinatorMessage::Substrate(msg) => { match msg { - messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session, key_pair } => { - // This is the first key pair for this network so no block has been finalized yet - // TODO: Write documentation for this in docs/ - // TODO: Use an Option instead of a magic? - if context.network_latest_finalized_block.0 == [0; 32] { - assert!(tributary_mutable.signers.is_empty()); - assert!(tributary_mutable.batch_signer.is_none()); - assert!(tributary_mutable.cosigner.is_none()); - // We can't check this as existing is no longer pub - // assert!(substrate_mutable.existing.as_ref().is_none()); - - // Wait until a network's block's time exceeds Serai's time - // These time calls are extremely expensive for what they do, yet they only run when - // confirming the first key pair, before any network activity has occurred, so they - // should be fine - - // If the latest block number is 10, then the block indexed by 1 has 10 confirms - // 10 + 1 - 10 = 1 - let mut block_i; - while { - block_i = (network.get_latest_block_number_with_retries().await + 1) - .saturating_sub(N::CONFIRMATIONS); - network.get_block_with_retries(block_i).await.time(network).await < context.serai_time - } { - info!( - "serai confirmed the first key pair for a set. {} {}", - "we're waiting for a network's finalized block's time to exceed unix time ", - context.serai_time, - ); - sleep(Duration::from_secs(5)).await; - } - - // Find the first block to do so - let mut earliest = block_i; - // earliest > 0 prevents a panic if Serai creates keys before the genesis block - // which... should be impossible - // Yet a prevented panic is a prevented panic - while (earliest > 0) && - (network.get_block_with_retries(earliest - 1).await.time(network).await >= - context.serai_time) - { - earliest -= 1; - } - - // Use this as the activation block - let activation_number = earliest; - - activate_key( - network, - substrate_mutable, - tributary_mutable, - txn, - session, - key_pair, - activation_number, - ) - .await; - } else { - let mut block_before_queue_block = >::Id::default(); - block_before_queue_block - .as_mut() - .copy_from_slice(&context.network_latest_finalized_block.0); - // We can't set these keys for activation until we know their queue block, which we - // won't until the next Batch is confirmed - // Set this variable so when we get the next Batch event, we can handle it - PendingActivationsDb::set_pending_activation::( - txn, - &block_before_queue_block, - session, - key_pair, - ); - } - } - messages::substrate::CoordinatorMessage::SubstrateBlock { context, block: substrate_block, burns, batches, } => { - if let Some((block, session, key_pair)) = - PendingActivationsDb::pending_activation::(txn) - { - // Only run if this is a Batch belonging to a distinct block - if context.network_latest_finalized_block.as_ref() != block.as_ref() { - let mut queue_block = >::Id::default(); - queue_block.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref()); - - let activation_number = substrate_mutable - .block_number(txn, &queue_block) - .await - .expect("KeyConfirmed from context we haven't synced") + - N::CONFIRMATIONS; - - activate_key( - network, - substrate_mutable, - tributary_mutable, - txn, - session, - key_pair, - activation_number, - ) - .await; - //clear pending activation - txn.del(PendingActivationsDb::key()); - } - } - - // Since this block was acknowledged, we no longer have to sign the batches within it - if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { - for batch_id in batches { - batch_signer.batch_signed(txn, batch_id); - } - } - - let (acquired_lock, to_sign) = - substrate_mutable.substrate_block(txn, network, context, burns).await; - // Send SubstrateBlockAck, with relevant plan IDs, before we trigger the signing of these // plans if !tributary_mutable.signers.is_empty() { @@ -197,23 +54,6 @@ async fn handle_coordinator_msg( }) .await; } - - // See commentary in TributaryMutable for why this is safe - let signers = &mut tributary_mutable.signers; - for (key, id, tx, eventuality) in to_sign { - if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) { - let signer = signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.sign_transaction(txn, id, tx, &eventuality).await { - coordinator.send(msg).await; - } - } - } - - // This is not premature, even if this block had multiple `Batch`s created, as the first - // `Batch` alone will trigger all Plans/Eventualities/Signs - if acquired_lock { - substrate_mutable.release_scanner_lock().await; - } } } } From 57dc197053a5b372582bbf5cd835affaa34e19b2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 18:56:23 -0400 Subject: [PATCH 110/179] serai-processor-bin Moves the coordinator loop out of serai-bitcoin-processor, completing it. Fixes a potential race condition in the message-queue regarding multiple sockets sending messages at once. --- .github/workflows/tests.yml | 1 + Cargo.lock | 37 +++ Cargo.toml | 1 + message-queue/src/main.rs | 5 +- processor/bin/Cargo.toml | 60 +++++ processor/bin/LICENSE | 15 ++ processor/bin/README.md | 3 + processor/bin/src/coordinator.rs | 196 +++++++++++++++ processor/bin/src/lib.rs | 293 +++++++++++++++++++++++ processor/bitcoin/Cargo.toml | 8 +- processor/bitcoin/src/key_gen.rs | 8 +- processor/bitcoin/src/main.rs | 226 ++--------------- processor/bitcoin/src/txindex.rs | 2 +- processor/key-gen/src/db.rs | 13 +- processor/key-gen/src/lib.rs | 31 +-- processor/scanner/src/db.rs | 9 +- processor/scanner/src/lib.rs | 38 ++- processor/signers/src/coordinator/mod.rs | 1 + processor/signers/src/lib.rs | 1 + processor/src/coordinator.rs | 43 ---- processor/src/db.rs | 43 ---- processor/src/main.rs | 257 -------------------- 22 files changed, 701 insertions(+), 590 deletions(-) create mode 100644 processor/bin/Cargo.toml create mode 100644 processor/bin/LICENSE create mode 100644 processor/bin/README.md create mode 100644 processor/bin/src/coordinator.rs create mode 100644 processor/bin/src/lib.rs delete mode 100644 processor/src/coordinator.rs delete mode 100644 processor/src/db.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index edd219f97..8bf4084da 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -49,6 +49,7 @@ jobs: -p serai-processor-utxo-scheduler \ -p serai-processor-transaction-chaining-scheduler \ -p serai-processor-signers \ + -p serai-processor-bin \ -p serai-bitcoin-processor \ -p serai-ethereum-processor \ -p serai-monero-processor \ diff --git a/Cargo.lock b/Cargo.lock index 8c0c3dd5a..7e7d78a31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8131,6 +8131,7 @@ dependencies = [ "dkg", "env_logger", "flexible-transcript", + "hex", "log", "modular-frost", "parity-scale-codec", @@ -8140,6 +8141,7 @@ dependencies = [ "serai-db", "serai-env", "serai-message-queue", + "serai-processor-bin", "serai-processor-key-gen", "serai-processor-messages", "serai-processor-primitives", @@ -8150,6 +8152,7 @@ dependencies = [ "serai-processor-utxo-scheduler-primitives", "tokio", "zalloc", + "zeroize", ] [[package]] @@ -8635,6 +8638,40 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-processor-bin" +version = "0.1.0" +dependencies = [ + "async-trait", + "bitcoin-serai", + "borsh", + "ciphersuite", + "dkg", + "env_logger", + "flexible-transcript", + "hex", + "log", + "modular-frost", + "parity-scale-codec", + "rand_core", + "secp256k1", + "serai-client", + "serai-db", + "serai-env", + "serai-message-queue", + "serai-processor-key-gen", + "serai-processor-messages", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "serai-processor-transaction-chaining-scheduler", + "serai-processor-utxo-scheduler-primitives", + "tokio", + "zalloc", + "zeroize", +] + [[package]] name = "serai-processor-frost-attempt-manager" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 25e6c25d8..b35b3318f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,6 +83,7 @@ members = [ "processor/scheduler/utxo/transaction-chaining", "processor/signers", + "processor/bin", "processor/bitcoin", "processor/ethereum", "processor/monero", diff --git a/message-queue/src/main.rs b/message-queue/src/main.rs index c43cc3c84..03c580ce4 100644 --- a/message-queue/src/main.rs +++ b/message-queue/src/main.rs @@ -72,6 +72,9 @@ pub(crate) fn queue_message( // Assert one, and only one of these, is the coordinator assert!(matches!(meta.from, Service::Coordinator) ^ matches!(meta.to, Service::Coordinator)); + // Lock the queue + let queue_lock = QUEUES.read().unwrap()[&(meta.from, meta.to)].write().unwrap(); + // Verify (from, to, intent) hasn't been prior seen fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec { [&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat() @@ -93,7 +96,7 @@ pub(crate) fn queue_message( DbTxn::put(&mut txn, intent_key, []); // Queue it - let id = QUEUES.read().unwrap()[&(meta.from, meta.to)].write().unwrap().queue_message( + let id = queue_lock.queue_message( &mut txn, QueuedMessage { from: meta.from, diff --git a/processor/bin/Cargo.toml b/processor/bin/Cargo.toml new file mode 100644 index 000000000..f3f3b7536 --- /dev/null +++ b/processor/bin/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "serai-processor-bin" +version = "0.1.0" +description = "Framework for Serai processor binaries" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/bin" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +async-trait = { version = "0.1", default-features = false } +zeroize = { version = "1", default-features = false, features = ["std"] } +rand_core = { version = "0.6", default-features = false } + +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } + +secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"] } +bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["std"] } + +log = { version = "0.4", default-features = false, features = ["std"] } +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +zalloc = { path = "../../common/zalloc" } +serai-db = { path = "../../common/db" } +serai-env = { path = "../../common/env" } + +serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } + +messages = { package = "serai-processor-messages", path = "../messages" } +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" } +transaction-chaining-scheduler = { package = "serai-processor-transaction-chaining-scheduler", path = "../scheduler/utxo/transaction-chaining" } +signers = { package = "serai-processor-signers", path = "../signers" } + +message-queue = { package = "serai-message-queue", path = "../../message-queue" } + +[features] +parity-db = ["serai-db/parity-db"] +rocksdb = ["serai-db/rocksdb"] diff --git a/processor/bin/LICENSE b/processor/bin/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/bin/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/bin/README.md b/processor/bin/README.md new file mode 100644 index 000000000..858a29252 --- /dev/null +++ b/processor/bin/README.md @@ -0,0 +1,3 @@ +# Serai Processor Bin + +The framework for Serai processor binaries, common to the Serai processors. diff --git a/processor/bin/src/coordinator.rs b/processor/bin/src/coordinator.rs new file mode 100644 index 000000000..d9d8d1129 --- /dev/null +++ b/processor/bin/src/coordinator.rs @@ -0,0 +1,196 @@ +use std::sync::Arc; + +use tokio::sync::mpsc; + +use scale::Encode; +use serai_client::{ + primitives::{NetworkId, Signature}, + validator_sets::primitives::Session, + in_instructions::primitives::{Batch, SignedBatch}, +}; + +use serai_env as env; +use serai_db::{Get, DbTxn, Db, create_db, db_channel}; +use message_queue::{Service, Metadata, client::MessageQueue}; + +create_db! { + ProcessorBinCoordinator { + SavedMessages: () -> u64, + } +} + +db_channel! { + ProcessorBinCoordinator { + CoordinatorMessages: () -> Vec + } +} + +async fn send(service: Service, queue: &MessageQueue, msg: messages::ProcessorMessage) { + let metadata = Metadata { from: service, to: Service::Coordinator, intent: msg.intent() }; + let msg = borsh::to_vec(&msg).unwrap(); + queue.queue(metadata, msg).await; +} + +pub(crate) struct Coordinator { + new_message: mpsc::UnboundedReceiver<()>, + service: Service, + message_queue: Arc, +} + +pub(crate) struct CoordinatorSend(Service, Arc); + +impl Coordinator { + pub(crate) fn new(mut db: crate::Db) -> Self { + let (new_message_send, new_message_recv) = mpsc::unbounded_channel(); + + let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { + "bitcoin" => NetworkId::Bitcoin, + "ethereum" => NetworkId::Ethereum, + "monero" => NetworkId::Monero, + _ => panic!("unrecognized network"), + }; + let service = Service::Processor(network_id); + let message_queue = Arc::new(MessageQueue::from_env(service)); + + // Spawn a task to move messages from the message-queue to our database so we can achieve + // atomicity. This is the only place we read/ack messages from + tokio::spawn({ + let message_queue = message_queue.clone(); + async move { + loop { + let msg = message_queue.next(Service::Coordinator).await; + + let prior_msg = msg.id.checked_sub(1); + let saved_messages = SavedMessages::get(&db); + /* + This should either be: + A) The message after the message we just saved (as normal) + B) The message we just saved (if we rebooted and failed to ack it) + */ + assert!((saved_messages == prior_msg) || (saved_messages == Some(msg.id))); + if saved_messages < Some(msg.id) { + let mut txn = db.txn(); + CoordinatorMessages::send(&mut txn, &msg.msg); + SavedMessages::set(&mut txn, &msg.id); + txn.commit(); + } + // Acknowledge this message + message_queue.ack(Service::Coordinator, msg.id).await; + + // Fire that there's a new message + new_message_send.send(()).expect("failed to tell the Coordinator there's a new message"); + } + } + }); + + Coordinator { new_message: new_message_recv, service, message_queue } + } + + pub(crate) fn coordinator_send(&self) -> CoordinatorSend { + CoordinatorSend(self.service, self.message_queue.clone()) + } + + /// Fetch the next message from the Coordinator. + /// + /// This message is guaranteed to have never been handled before, where handling is defined as + /// this `txn` being committed. + pub(crate) async fn next_message( + &mut self, + txn: &mut impl DbTxn, + ) -> messages::CoordinatorMessage { + loop { + match CoordinatorMessages::try_recv(txn) { + Some(msg) => { + return borsh::from_slice(&msg) + .expect("message wasn't a borsh-encoded CoordinatorMessage") + } + None => { + let _ = + tokio::time::timeout(core::time::Duration::from_secs(60), self.new_message.recv()) + .await; + } + } + } + } + + #[allow(clippy::unused_async)] + pub(crate) async fn send_message(&mut self, msg: messages::ProcessorMessage) { + send(self.service, &self.message_queue, msg).await + } +} + +#[async_trait::async_trait] +impl signers::Coordinator for CoordinatorSend { + type EphemeralError = (); + + async fn send( + &mut self, + msg: messages::sign::ProcessorMessage, + ) -> Result<(), Self::EphemeralError> { + // TODO: Use a fallible send for these methods + send(self.0, &self.1, messages::ProcessorMessage::Sign(msg)).await; + Ok(()) + } + + async fn publish_cosign( + &mut self, + block_number: u64, + block: [u8; 32], + signature: Signature, + ) -> Result<(), Self::EphemeralError> { + send( + self.0, + &self.1, + messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::CosignedBlock { + block_number, + block, + signature: signature.encode(), + }, + ), + ) + .await; + Ok(()) + } + + async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError> { + send( + self.0, + &self.1, + messages::ProcessorMessage::Substrate(messages::substrate::ProcessorMessage::Batch { batch }), + ) + .await; + Ok(()) + } + + async fn publish_signed_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError> { + send( + self.0, + &self.1, + messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedBatch { batch }, + ), + ) + .await; + Ok(()) + } + + async fn publish_slash_report_signature( + &mut self, + session: Session, + signature: Signature, + ) -> Result<(), Self::EphemeralError> { + send( + self.0, + &self.1, + messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedSlashReport { + session, + signature: signature.encode(), + }, + ), + ) + .await; + Ok(()) + } +} diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs new file mode 100644 index 000000000..15873873b --- /dev/null +++ b/processor/bin/src/lib.rs @@ -0,0 +1,293 @@ +use core::cmp::Ordering; + +use zeroize::{Zeroize, Zeroizing}; + +use ciphersuite::{ + group::{ff::PrimeField, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use dkg::evrf::EvrfCurve; + +use serai_client::validator_sets::primitives::Session; + +use serai_env as env; +use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel}; + +use primitives::EncodableG; +use ::key_gen::{KeyGenParams, KeyGen}; +use scheduler::SignableTransaction; +use scanner::{ScannerFeed, Scanner, KeyFor, Scheduler}; +use signers::{TransactionPublisher, Signers}; + +mod coordinator; +use coordinator::Coordinator; + +create_db! { + ProcessorBin { + ExternalKeyForSessionForSigners: (session: Session) -> EncodableG, + } +} + +db_channel! { + ProcessorBin { + KeyToActivate: () -> EncodableG + } +} + +/// The type used for the database. +#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] +pub type Db = serai_db::ParityDb; +/// The type used for the database. +#[cfg(feature = "rocksdb")] +pub type Db = serai_db::RocksDB; + +/// Initialize the processor. +/// +/// Yields the database. +#[allow(unused_variables, unreachable_code)] +pub fn init() -> Db { + // Override the panic handler with one which will panic if any tokio task panics + { + let existing = std::panic::take_hook(); + std::panic::set_hook(Box::new(move |panic| { + existing(panic); + const MSG: &str = "exiting the process due to a task panicking"; + println!("{MSG}"); + log::error!("{MSG}"); + std::process::exit(1); + })); + } + + if std::env::var("RUST_LOG").is_err() { + std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); + } + env_logger::init(); + + #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] + let db = + serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + #[cfg(feature = "rocksdb")] + let db = serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + db +} + +/// THe URL for the external network's node. +pub fn url() -> String { + let login = env::var("NETWORK_RPC_LOGIN").expect("network RPC login wasn't specified"); + let hostname = env::var("NETWORK_RPC_HOSTNAME").expect("network RPC hostname wasn't specified"); + let port = env::var("NETWORK_RPC_PORT").expect("network port domain wasn't specified"); + "http://".to_string() + &login + "@" + &hostname + ":" + &port +} + +fn key_gen() -> KeyGen { + fn read_key_from_env(label: &'static str) -> Zeroizing { + let key_hex = + Zeroizing::new(env::var(label).unwrap_or_else(|| panic!("{label} wasn't provided"))); + let bytes = Zeroizing::new( + hex::decode(key_hex).unwrap_or_else(|_| panic!("{label} wasn't a valid hex string")), + ); + + let mut repr = ::Repr::default(); + if repr.as_ref().len() != bytes.len() { + panic!("{label} wasn't the correct length"); + } + repr.as_mut().copy_from_slice(bytes.as_slice()); + let res = Zeroizing::new( + Option::from(::from_repr(repr)) + .unwrap_or_else(|| panic!("{label} wasn't a valid scalar")), + ); + repr.as_mut().zeroize(); + res + } + KeyGen::new( + read_key_from_env::<::EmbeddedCurve>("SUBSTRATE_EVRF_KEY"), + read_key_from_env::<::EmbeddedCurve>( + "NETWORK_EVRF_KEY", + ), + ) +} + +async fn first_block_after_time(feed: &S, serai_time: u64) -> u64 { + async fn first_block_after_time_iteration( + feed: &S, + serai_time: u64, + ) -> Result, S::EphemeralError> { + let latest = feed.latest_finalized_block_number().await?; + let latest_time = feed.time_of_block(latest).await?; + if latest_time < serai_time { + tokio::time::sleep(core::time::Duration::from_secs(serai_time - latest_time)).await; + return Ok(None); + } + + // A finalized block has a time greater than or equal to the time we want to start at + // Find the first such block with a binary search + // start_search and end_search are inclusive + let mut start_search = 0; + let mut end_search = latest; + while start_search != end_search { + // This on purposely chooses the earlier block in the case two blocks are both in the middle + let to_check = start_search + ((end_search - start_search) / 2); + let block_time = feed.time_of_block(to_check).await?; + match block_time.cmp(&serai_time) { + Ordering::Less => { + start_search = to_check + 1; + assert!(start_search <= end_search); + } + Ordering::Equal | Ordering::Greater => { + // This holds true since we pick the earlier block upon an even search distance + // If it didn't, this would cause an infinite loop + assert!(to_check < end_search); + end_search = to_check; + } + } + } + Ok(Some(start_search)) + } + loop { + match first_block_after_time_iteration(feed, serai_time).await { + Ok(Some(block)) => return block, + Ok(None) => { + log::info!("waiting for block to activate at (a block with timestamp >= {serai_time})"); + } + Err(e) => { + log::error!("couldn't find the first block Serai should scan due to an RPC error: {e:?}"); + } + } + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } +} + +/// The main loop of a Processor, interacting with the Coordinator. +pub async fn coordinator_loop< + S: ScannerFeed, + K: KeyGenParams>>, + Sch: Scheduler< + S, + SignableTransaction: SignableTransaction, + >, + P: TransactionPublisher<::Transaction>, +>( + mut db: Db, + feed: S, + publisher: P, +) { + let mut coordinator = Coordinator::new(db.clone()); + + let mut key_gen = key_gen::(); + let mut scanner = Scanner::new::(db.clone(), feed.clone()).await; + let mut signers = + Signers::::new(db.clone(), coordinator.coordinator_send(), publisher); + + loop { + let db_clone = db.clone(); + let mut txn = db.txn(); + let msg = coordinator.next_message(&mut txn).await; + let mut txn = Some(txn); + match msg { + messages::CoordinatorMessage::KeyGen(msg) => { + let txn = txn.as_mut().unwrap(); + let mut new_key = None; + // This is a computationally expensive call yet it happens infrequently + for msg in key_gen.handle(txn, msg) { + if let messages::key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } = &msg { + new_key = Some(*session) + } + coordinator.send_message(messages::ProcessorMessage::KeyGen(msg)).await; + } + + // If we were yielded a key, register it in the signers + if let Some(session) = new_key { + let (substrate_keys, network_keys) = KeyGen::::key_shares(txn, session) + .expect("generated key pair yet couldn't get key shares"); + signers.register_keys(txn, session, substrate_keys, network_keys); + } + } + + // These are cheap calls which are fine to be here in this loop + messages::CoordinatorMessage::Sign(msg) => { + let txn = txn.as_mut().unwrap(); + signers.queue_message(txn, &msg) + } + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { + session, + block_number, + block, + }, + ) => { + let txn = txn.take().unwrap(); + signers.cosign_block(txn, session, block_number, block) + } + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::SignSlashReport { session, report }, + ) => { + let txn = txn.take().unwrap(); + signers.sign_slash_report(txn, session, &report) + } + + messages::CoordinatorMessage::Substrate(msg) => match msg { + messages::substrate::CoordinatorMessage::SetKeys { serai_time, session, key_pair } => { + let txn = txn.as_mut().unwrap(); + let key = + EncodableG(K::decode_key(key_pair.1.as_ref()).expect("invalid key set on serai")); + + // Queue the key to be activated upon the next Batch + KeyToActivate::>::send(txn, &key); + + // Set the external key, as needed by the signers + ExternalKeyForSessionForSigners::>::set(txn, session, &key); + + // This is presumed extremely expensive, potentially blocking for several minutes, yet + // only happens for the very first set of keys + if session == Session(0) { + assert!(scanner.is_none()); + let start_block = first_block_after_time(&feed, serai_time).await; + scanner = + Some(Scanner::initialize::(db_clone, feed.clone(), start_block, key.0).await); + } + } + messages::substrate::CoordinatorMessage::SlashesReported { session } => { + let txn = txn.as_mut().unwrap(); + + // Since this session had its slashes reported, it has finished all its signature + // protocols and has been fully retired. We retire it from the signers accordingly + let key = ExternalKeyForSessionForSigners::>::take(txn, session).unwrap().0; + + // This is a cheap call + signers.retire_session(txn, session, &key) + } + messages::substrate::CoordinatorMessage::BlockWithBatchAcknowledgement { + block: _, + batch_id, + in_instruction_succeededs, + burns, + } => { + let mut txn = txn.take().unwrap(); + let scanner = scanner.as_mut().unwrap(); + let key_to_activate = KeyToActivate::>::try_recv(&mut txn).map(|key| key.0); + // This is a cheap call as it internally just queues this to be done later + scanner.acknowledge_batch( + txn, + batch_id, + in_instruction_succeededs, + burns, + key_to_activate, + ) + } + messages::substrate::CoordinatorMessage::BlockWithoutBatchAcknowledgement { + block: _, + burns, + } => { + let txn = txn.take().unwrap(); + let scanner = scanner.as_mut().unwrap(); + // This is a cheap call as it internally just queues this to be done later + scanner.queue_burns(txn, burns) + } + }, + }; + // If the txn wasn't already consumed and committed, commit it + if let Some(txn) = txn { + txn.commit(); + } + } +} diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index c92e13849..c968e36b9 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -18,8 +18,10 @@ workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } +zeroize = { version = "1", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false } +hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } @@ -51,8 +53,10 @@ utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = transaction-chaining-scheduler = { package = "serai-processor-transaction-chaining-scheduler", path = "../scheduler/utxo/transaction-chaining" } signers = { package = "serai-processor-signers", path = "../signers" } +bin = { package = "serai-processor-bin", path = "../bin" } + message-queue = { package = "serai-message-queue", path = "../../message-queue" } [features] -parity-db = ["serai-db/parity-db"] -rocksdb = ["serai-db/rocksdb"] +parity-db = ["bin/parity-db"] +rocksdb = ["bin/rocksdb"] diff --git a/processor/bitcoin/src/key_gen.rs b/processor/bitcoin/src/key_gen.rs index 759443643..415441348 100644 --- a/processor/bitcoin/src/key_gen.rs +++ b/processor/bitcoin/src/key_gen.rs @@ -7,22 +7,22 @@ pub(crate) struct KeyGenParams; impl key_gen::KeyGenParams for KeyGenParams { const ID: &'static str = "Bitcoin"; - type ExternalNetworkCurve = Secp256k1; + type ExternalNetworkCiphersuite = Secp256k1; - fn tweak_keys(keys: &mut ThresholdKeys) { + fn tweak_keys(keys: &mut ThresholdKeys) { *keys = bitcoin_serai::wallet::tweak_keys(keys); // Also create a scanner to assert these keys, and all expected paths, are usable scanner(keys.group_key()); } - fn encode_key(key: ::G) -> Vec { + fn encode_key(key: ::G) -> Vec { let key = key.to_bytes(); let key: &[u8] = key.as_ref(); // Skip the parity encoding as we know this key is even key[1 ..].to_vec() } - fn decode_key(key: &[u8]) -> Option<::G> { + fn decode_key(key: &[u8]) -> Option<::G> { x_coord_to_even_point(key) } } diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index 1c07b6cd7..09228d44f 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -6,16 +6,9 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); -use core::cmp::Ordering; +use bitcoin_serai::rpc::Rpc as BRpc; -use ciphersuite::Ciphersuite; - -use serai_client::validator_sets::primitives::Session; - -use serai_db::{DbTxn, Db}; -use ::primitives::EncodableG; -use ::key_gen::KeyGenParams as KeyGenParamsTrait; -use scanner::{ScannerFeed, Scanner}; +use ::primitives::task::{Task, ContinuallyRan}; mod primitives; pub(crate) use crate::primitives::*; @@ -34,6 +27,7 @@ use scheduler::Scheduler; // Our custom code for Bitcoin mod db; mod txindex; +use txindex::TxIndexTask; pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> [u8; 32] { use bitcoin_serai::bitcoin::hashes::Hash; @@ -43,205 +37,30 @@ pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> res } -async fn first_block_after_time(feed: &S, serai_time: u64) -> u64 { - async fn first_block_after_time_iteration( - feed: &S, - serai_time: u64, - ) -> Result, S::EphemeralError> { - let latest = feed.latest_finalized_block_number().await?; - let latest_time = feed.time_of_block(latest).await?; - if latest_time < serai_time { - tokio::time::sleep(core::time::Duration::from_secs(serai_time - latest_time)).await; - return Ok(None); - } - - // A finalized block has a time greater than or equal to the time we want to start at - // Find the first such block with a binary search - // start_search and end_search are inclusive - let mut start_search = 0; - let mut end_search = latest; - while start_search != end_search { - // This on purposely chooses the earlier block in the case two blocks are both in the middle - let to_check = start_search + ((end_search - start_search) / 2); - let block_time = feed.time_of_block(to_check).await?; - match block_time.cmp(&serai_time) { - Ordering::Less => { - start_search = to_check + 1; - assert!(start_search <= end_search); - } - Ordering::Equal | Ordering::Greater => { - // This holds true since we pick the earlier block upon an even search distance - // If it didn't, this would cause an infinite loop - assert!(to_check < end_search); - end_search = to_check; - } - } - } - Ok(Some(start_search)) - } - loop { - match first_block_after_time_iteration(feed, serai_time).await { - Ok(Some(block)) => return block, - Ok(None) => { - log::info!("waiting for block to activate at (a block with timestamp >= {serai_time})"); - } - Err(e) => { - log::error!("couldn't find the first block Serai should scan due to an RPC error: {e:?}"); - } - } - tokio::time::sleep(core::time::Duration::from_secs(5)).await; - } -} - -/// Fetch the next message from the Coordinator. -/// -/// This message is guaranteed to have never been handled before, where handling is defined as -/// this `txn` being committed. -async fn next_message(_txn: &mut impl DbTxn) -> messages::CoordinatorMessage { - todo!("TODO") -} - -async fn send_message(_msg: messages::ProcessorMessage) { - todo!("TODO") -} - -async fn coordinator_loop( - mut db: D, - feed: Rpc, - mut key_gen: ::key_gen::KeyGen, - mut signers: signers::Signers, Scheduler, Rpc>, - mut scanner: Option>>, -) { - loop { - let db_clone = db.clone(); - let mut txn = db.txn(); - let msg = next_message(&mut txn).await; - let mut txn = Some(txn); - match msg { - messages::CoordinatorMessage::KeyGen(msg) => { - let txn = txn.as_mut().unwrap(); - let mut new_key = None; - // This is a computationally expensive call yet it happens infrequently - for msg in key_gen.handle(txn, msg) { - if let messages::key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } = &msg { - new_key = Some(*session) - } - send_message(messages::ProcessorMessage::KeyGen(msg)).await; - } - - // If we were yielded a key, register it in the signers - if let Some(session) = new_key { - let (substrate_keys, network_keys) = - ::key_gen::KeyGen::::key_shares(txn, session) - .expect("generated key pair yet couldn't get key shares"); - signers.register_keys(txn, session, substrate_keys, network_keys); +#[tokio::main] +async fn main() { + let db = bin::init(); + let feed = Rpc { + db: db.clone(), + rpc: loop { + match BRpc::new(bin::url()).await { + Ok(rpc) => break rpc, + Err(e) => { + log::error!("couldn't connect to the Bitcoin node: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; } } + }, + }; - // These are cheap calls which are fine to be here in this loop - messages::CoordinatorMessage::Sign(msg) => { - let txn = txn.as_mut().unwrap(); - signers.queue_message(txn, &msg) - } - messages::CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { - session, - block_number, - block, - }, - ) => { - let txn = txn.take().unwrap(); - signers.cosign_block(txn, session, block_number, block) - } - messages::CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::SignSlashReport { session, report }, - ) => { - let txn = txn.take().unwrap(); - signers.sign_slash_report(txn, session, &report) - } + let (index_task, index_handle) = Task::new(); + tokio::spawn(TxIndexTask(feed.clone()).continually_run(index_task, vec![])); + core::mem::forget(index_handle); - messages::CoordinatorMessage::Substrate(msg) => match msg { - messages::substrate::CoordinatorMessage::SetKeys { serai_time, session, key_pair } => { - let txn = txn.as_mut().unwrap(); - let key = EncodableG( - KeyGenParams::decode_key(key_pair.1.as_ref()).expect("invalid key set on serai"), - ); - - // Queue the key to be activated upon the next Batch - db::KeyToActivate::< - <::ExternalNetworkCurve as Ciphersuite>::G, - >::send(txn, &key); - - // Set the external key, as needed by the signers - db::ExternalKeyForSessionForSigners::< - <::ExternalNetworkCurve as Ciphersuite>::G, - >::set(txn, session, &key); - - // This is presumed extremely expensive, potentially blocking for several minutes, yet - // only happens for the very first set of keys - if session == Session(0) { - assert!(scanner.is_none()); - let start_block = first_block_after_time(&feed, serai_time).await; - scanner = - Some(Scanner::new::>(db_clone, feed.clone(), start_block, key.0).await); - } - } - messages::substrate::CoordinatorMessage::SlashesReported { session } => { - let txn = txn.as_mut().unwrap(); - - // Since this session had its slashes reported, it has finished all its signature - // protocols and has been fully retired. We retire it from the signers accordingly - let key = db::ExternalKeyForSessionForSigners::< - <::ExternalNetworkCurve as Ciphersuite>::G, - >::take(txn, session) - .unwrap() - .0; - - // This is a cheap call - signers.retire_session(txn, session, &key) - } - messages::substrate::CoordinatorMessage::BlockWithBatchAcknowledgement { - block: _, - batch_id, - in_instruction_succeededs, - burns, - } => { - let mut txn = txn.take().unwrap(); - let scanner = scanner.as_mut().unwrap(); - let key_to_activate = db::KeyToActivate::< - <::ExternalNetworkCurve as Ciphersuite>::G, - >::try_recv(&mut txn) - .map(|key| key.0); - // This is a cheap call as it internally just queues this to be done later - scanner.acknowledge_batch( - txn, - batch_id, - in_instruction_succeededs, - burns, - key_to_activate, - ) - } - messages::substrate::CoordinatorMessage::BlockWithoutBatchAcknowledgement { - block: _, - burns, - } => { - let txn = txn.take().unwrap(); - let scanner = scanner.as_mut().unwrap(); - // This is a cheap call as it internally just queues this to be done later - scanner.queue_burns(txn, burns) - } - }, - }; - // If the txn wasn't already consumed and committed, commit it - if let Some(txn) = txn { - txn.commit(); - } - } + bin::coordinator_loop::<_, KeyGenParams, Scheduler<_>, Rpc>(db, feed.clone(), feed) + .await; } -#[tokio::main] -async fn main() {} - /* use bitcoin_serai::{ bitcoin::{ @@ -278,9 +97,6 @@ use serai_client::{ */ /* -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub(crate) struct Fee(u64); - #[async_trait] impl TransactionTrait for Transaction { #[cfg(test)] diff --git a/processor/bitcoin/src/txindex.rs b/processor/bitcoin/src/txindex.rs index 2d3f1cd64..4ed389732 100644 --- a/processor/bitcoin/src/txindex.rs +++ b/processor/bitcoin/src/txindex.rs @@ -35,7 +35,7 @@ pub(crate) fn script_pubkey_for_on_chain_output( ) } -pub(crate) struct TxIndexTask(Rpc); +pub(crate) struct TxIndexTask(pub(crate) Rpc); #[async_trait::async_trait] impl ContinuallyRan for TxIndexTask { diff --git a/processor/key-gen/src/db.rs b/processor/key-gen/src/db.rs index 676fd2aa9..149fe1a2f 100644 --- a/processor/key-gen/src/db.rs +++ b/processor/key-gen/src/db.rs @@ -19,7 +19,7 @@ pub(crate) struct Params { pub(crate) substrate_evrf_public_keys: Vec<<::EmbeddedCurve as Ciphersuite>::G>, pub(crate) network_evrf_public_keys: - Vec<<::EmbeddedCurve as Ciphersuite>::G>, + Vec<<::EmbeddedCurve as Ciphersuite>::G>, } #[derive(BorshSerialize, BorshDeserialize)] @@ -93,9 +93,9 @@ impl KeyGenDb

{ .network_evrf_public_keys .into_iter() .map(|key| { - <::EmbeddedCurve as Ciphersuite>::read_G::<&[u8]>( - &mut key.as_ref(), - ) + <::EmbeddedCurve as Ciphersuite>::read_G::< + &[u8], + >(&mut key.as_ref()) .unwrap() }) .collect(), @@ -118,7 +118,7 @@ impl KeyGenDb

{ txn: &mut impl DbTxn, session: Session, substrate_keys: &[ThresholdKeys], - network_keys: &[ThresholdKeys], + network_keys: &[ThresholdKeys], ) { assert_eq!(substrate_keys.len(), network_keys.len()); @@ -134,7 +134,8 @@ impl KeyGenDb

{ pub(crate) fn key_shares( getter: &impl Get, session: Session, - ) -> Option<(Vec>, Vec>)> { + ) -> Option<(Vec>, Vec>)> + { let keys = _db::KeyShares::get(getter, &session)?; let mut keys: &[u8] = keys.as_ref(); diff --git a/processor/key-gen/src/lib.rs b/processor/key-gen/src/lib.rs index cb23a740b..fd847cc55 100644 --- a/processor/key-gen/src/lib.rs +++ b/processor/key-gen/src/lib.rs @@ -34,27 +34,29 @@ pub trait KeyGenParams { const ID: &'static str; /// The curve used for the external network. - type ExternalNetworkCurve: EvrfCurve< + type ExternalNetworkCiphersuite: EvrfCurve< EmbeddedCurve: Ciphersuite< - G: ec_divisors::DivisorCurve::F>, + G: ec_divisors::DivisorCurve< + FieldElement = ::F, + >, >, >; /// Tweaks keys as necessary/beneficial. - fn tweak_keys(keys: &mut ThresholdKeys); + fn tweak_keys(keys: &mut ThresholdKeys); /// Encode keys as optimal. /// /// A default implementation is provided which calls the traditional `to_bytes`. - fn encode_key(key: ::G) -> Vec { + fn encode_key(key: ::G) -> Vec { key.to_bytes().as_ref().to_vec() } /// Decode keys from their optimal encoding. /// /// A default implementation is provided which calls the traditional `from_bytes`. - fn decode_key(mut key: &[u8]) -> Option<::G> { - let res = ::read_G(&mut key).ok()?; + fn decode_key(mut key: &[u8]) -> Option<::G> { + let res = ::read_G(&mut key).ok()?; if !key.is_empty() { None?; } @@ -143,7 +145,7 @@ pub struct KeyGen { substrate_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, network_evrf_private_key: - Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, + Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, } impl KeyGen

{ @@ -154,7 +156,7 @@ impl KeyGen

{ <::EmbeddedCurve as Ciphersuite>::F, >, network_evrf_private_key: Zeroizing< - <::EmbeddedCurve as Ciphersuite>::F, + <::EmbeddedCurve as Ciphersuite>::F, >, ) -> KeyGen

{ KeyGen { substrate_evrf_private_key, network_evrf_private_key } @@ -165,7 +167,8 @@ impl KeyGen

{ pub fn key_shares( getter: &impl Get, session: Session, - ) -> Option<(Vec>, Vec>)> { + ) -> Option<(Vec>, Vec>)> + { // This is safe, despite not having a txn, since it's a static value // It doesn't change over time/in relation to other operations // It is solely set or unset @@ -198,7 +201,7 @@ impl KeyGen

{ let network_evrf_public_keys = evrf_public_keys.into_iter().map(|(_, key)| key).collect::>(); let (network_evrf_public_keys, additional_faulty) = - coerce_keys::(&network_evrf_public_keys); + coerce_keys::(&network_evrf_public_keys); faulty.extend(additional_faulty); // Participate for both Substrate and the network @@ -228,7 +231,7 @@ impl KeyGen

{ &self.substrate_evrf_private_key, &mut participation, ); - participate::( + participate::( context::

(session, NETWORK_KEY_CONTEXT), threshold, &network_evrf_public_keys, @@ -283,7 +286,7 @@ impl KeyGen

{ }; let len_at_network_participation_start_pos = participation.len(); let Ok(network_participation) = - Participation::::read(&mut participation, n) + Participation::::read(&mut participation, n) else { return blame; }; @@ -317,7 +320,7 @@ impl KeyGen

{ } } - match EvrfDkg::::verify( + match EvrfDkg::::verify( &mut OsRng, generators(), context::

(session, NETWORK_KEY_CONTEXT), @@ -490,7 +493,7 @@ impl KeyGen

{ Err(blames) => return blames, }; - let network_dkg = match verify_dkg::( + let network_dkg = match verify_dkg::( txn, session, false, diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 107616cc8..49ab17859 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -70,6 +70,8 @@ impl OutputWithInInstruction { create_db!( ScannerGlobal { + StartBlock: () -> u64, + QueuedKey: (key: K) -> (), ActiveKeys: () -> Vec>, @@ -106,8 +108,11 @@ create_db!( pub(crate) struct ScannerGlobalDb(PhantomData); impl ScannerGlobalDb { - pub(crate) fn has_any_key_been_queued(getter: &impl Get) -> bool { - ActiveKeys::>>::get(getter).is_some() + pub(crate) fn start_block(getter: &impl Get) -> Option { + StartBlock::get(getter) + } + pub(crate) fn set_start_block(txn: &mut impl DbTxn, block: u64) { + StartBlock::set(txn, &block) } /// Queue a key. diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 6ed16d745..ebd783bfa 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -344,17 +344,10 @@ impl Scanner { /// Create a new scanner. /// /// This will begin its execution, spawning several asynchronous tasks. - pub async fn new>( - mut db: impl Db, - feed: S, - start_block: u64, - start_key: KeyFor, - ) -> Self { - if !ScannerGlobalDb::::has_any_key_been_queued(&db) { - let mut txn = db.txn(); - ScannerGlobalDb::::queue_key(&mut txn, start_block, start_key); - txn.commit(); - } + /// + /// This will return None if the Scanner was never initialized. + pub async fn new>(db: impl Db, feed: S) -> Option { + let start_block = ScannerGlobalDb::::start_block(&db)?; let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); @@ -381,7 +374,28 @@ impl Scanner { // window its allowed to scan tokio::spawn(eventuality_task.continually_run(eventuality_task_def, vec![scan_handle])); - Self { substrate_handle, _S: PhantomData } + Some(Self { substrate_handle, _S: PhantomData }) + } + + /// Initialize the scanner. + /// + /// This will begin its execution, spawning several asynchronous tasks. + /// + /// This passes through to `Scanner::new` if prior called. + pub async fn initialize>( + mut db: impl Db, + feed: S, + start_block: u64, + start_key: KeyFor, + ) -> Self { + if ScannerGlobalDb::::start_block(&db).is_none() { + let mut txn = db.txn(); + ScannerGlobalDb::::set_start_block(&mut txn, start_block); + ScannerGlobalDb::::queue_key(&mut txn, start_block, start_key); + txn.commit(); + } + + Self::new::(db, feed).await.unwrap() } /// Acknowledge a Batch having been published on Serai. diff --git a/processor/signers/src/coordinator/mod.rs b/processor/signers/src/coordinator/mod.rs index a3163922e..e749f8410 100644 --- a/processor/signers/src/coordinator/mod.rs +++ b/processor/signers/src/coordinator/mod.rs @@ -114,6 +114,7 @@ impl ContinuallyRan for CoordinatorTask { self .coordinator .publish_slash_report_signature( + session, <_>::decode(&mut slash_report_signature.as_slice()).unwrap(), ) .await diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index 881205f81..e06dd07f9 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -71,6 +71,7 @@ pub trait Coordinator: 'static + Send + Sync { /// Publish a slash report's signature. async fn publish_slash_report_signature( &mut self, + session: Session, signature: Signature, ) -> Result<(), Self::EphemeralError>; } diff --git a/processor/src/coordinator.rs b/processor/src/coordinator.rs deleted file mode 100644 index 26786e30c..000000000 --- a/processor/src/coordinator.rs +++ /dev/null @@ -1,43 +0,0 @@ -use messages::{ProcessorMessage, CoordinatorMessage}; - -use message_queue::{Service, Metadata, client::MessageQueue}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Message { - pub id: u64, - pub msg: CoordinatorMessage, -} - -#[async_trait::async_trait] -pub trait Coordinator { - async fn send(&mut self, msg: impl Send + Into); - async fn recv(&mut self) -> Message; - async fn ack(&mut self, msg: Message); -} - -#[async_trait::async_trait] -impl Coordinator for MessageQueue { - async fn send(&mut self, msg: impl Send + Into) { - let msg: ProcessorMessage = msg.into(); - let metadata = Metadata { from: self.service, to: Service::Coordinator, intent: msg.intent() }; - let msg = borsh::to_vec(&msg).unwrap(); - - self.queue(metadata, msg).await; - } - - async fn recv(&mut self) -> Message { - let msg = self.next(Service::Coordinator).await; - - let id = msg.id; - - // Deserialize it into a CoordinatorMessage - let msg: CoordinatorMessage = - borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded CoordinatorMessage"); - - return Message { id, msg }; - } - - async fn ack(&mut self, msg: Message) { - MessageQueue::ack(self, Service::Coordinator, msg.id).await - } -} diff --git a/processor/src/db.rs b/processor/src/db.rs deleted file mode 100644 index ffd7c43ad..000000000 --- a/processor/src/db.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::io::Read; - -use scale::{Encode, Decode}; -use serai_client::validator_sets::primitives::{Session, KeyPair}; - -pub use serai_db::*; - -use crate::networks::{Block, Network}; - -create_db!( - MainDb { - HandledMessageDb: (id: u64) -> (), - PendingActivationsDb: () -> Vec - } -); - -impl PendingActivationsDb { - pub fn pending_activation( - getter: &impl Get, - ) -> Option<(>::Id, Session, KeyPair)> { - if let Some(bytes) = Self::get(getter) { - if !bytes.is_empty() { - let mut slice = bytes.as_slice(); - let (session, key_pair) = <(Session, KeyPair)>::decode(&mut slice).unwrap(); - let mut block_before_queue_block = >::Id::default(); - slice.read_exact(block_before_queue_block.as_mut()).unwrap(); - assert!(slice.is_empty()); - return Some((block_before_queue_block, session, key_pair)); - } - } - None - } - pub fn set_pending_activation( - txn: &mut impl DbTxn, - block_before_queue_block: &>::Id, - session: Session, - key_pair: KeyPair, - ) { - let mut buf = (session, key_pair).encode(); - buf.extend(block_before_queue_block.as_ref()); - Self::set(txn, &buf); - } -} diff --git a/processor/src/main.rs b/processor/src/main.rs index 65e74f550..b4a5053a4 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -60,263 +60,9 @@ async fn handle_coordinator_msg( } } -async fn boot( - raw_db: &mut D, - network: &N, - coordinator: &mut Co, -) -> (D, TributaryMutable, SubstrateMutable) { - fn read_key_from_env(label: &'static str) -> Zeroizing { - let key_hex = - Zeroizing::new(env::var(label).unwrap_or_else(|| panic!("{label} wasn't provided"))); - let bytes = Zeroizing::new( - hex::decode(key_hex).unwrap_or_else(|_| panic!("{label} wasn't a valid hex string")), - ); - - let mut repr = ::Repr::default(); - if repr.as_ref().len() != bytes.len() { - panic!("{label} wasn't the correct length"); - } - repr.as_mut().copy_from_slice(bytes.as_slice()); - let res = Zeroizing::new( - Option::from(::from_repr(repr)) - .unwrap_or_else(|| panic!("{label} wasn't a valid scalar")), - ); - repr.as_mut().zeroize(); - res - } - - let key_gen = KeyGen::::new( - raw_db.clone(), - read_key_from_env::<::EmbeddedCurve>("SUBSTRATE_EVRF_KEY"), - read_key_from_env::<::EmbeddedCurve>("NETWORK_EVRF_KEY"), - ); - - let (multisig_manager, current_keys, actively_signing) = - MultisigManager::new(raw_db, network).await; - - let mut batch_signer = None; - let mut signers = HashMap::new(); - - for (i, key) in current_keys.iter().enumerate() { - let Some((session, (substrate_keys, network_keys))) = key_gen.keys(key) else { continue }; - let network_key = network_keys[0].group_key(); - - // If this is the oldest key, load the BatchSigner for it as the active BatchSigner - // The new key only takes responsibility once the old key is fully deprecated - // - // We don't have to load any state for this since the Scanner will re-fire any events - // necessary, only no longer scanning old blocks once Substrate acks them - if i == 0 { - batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); - } - - // The Scanner re-fires events as needed for batch_signer yet not signer - // This is due to the transactions which we start signing from due to a block not being - // guaranteed to be signed before we stop scanning the block on reboot - // We could simplify the Signer flow by delaying when it acks a block, yet that'd: - // 1) Increase the startup time - // 2) Cause re-emission of Batch events, which we'd need to check the safety of - // (TODO: Do anyways?) - // 3) Violate the attempt counter (TODO: Is this already being violated?) - let mut signer = Signer::new(network.clone(), session, network_keys); - - // Sign any TXs being actively signed - for (plan, tx, eventuality) in &actively_signing { - if plan.key == network_key { - let mut txn = raw_db.txn(); - if let Some(msg) = - signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality).await - { - coordinator.send(msg).await; - } - // This should only have re-writes of existing data - drop(txn); - } - } - - signers.insert(session, signer); - } - - // Spawn a task to rebroadcast signed TXs yet to be mined into a finalized block - // This hedges against being dropped due to full mempools, temporarily too low of a fee... - tokio::spawn(Signer::::rebroadcast_task(raw_db.clone(), network.clone())); - - ( - raw_db.clone(), - TributaryMutable { key_gen, batch_signer, cosigner: None, slash_report_signer: None, signers }, - multisig_manager, - ) -} - -#[allow(clippy::await_holding_lock)] // Needed for txn, unfortunately can't be down-scoped -async fn run(mut raw_db: D, network: N, mut coordinator: Co) { - // We currently expect a contextless bidirectional mapping between these two values - // (which is that any value of A can be interpreted as B and vice versa) - // While we can write a contextual mapping, we have yet to do so - // This check ensures no network which doesn't have a bidirectional mapping is defined - assert_eq!(>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len()); - - let (main_db, mut tributary_mutable, mut substrate_mutable) = - boot(&mut raw_db, &network, &mut coordinator).await; - - // We can't load this from the DB as we can't guarantee atomic increments with the ack function - // TODO: Load with a slight tolerance - let mut last_coordinator_msg = None; - - loop { - let mut txn = raw_db.txn(); - - log::trace!("new db txn in run"); - - let mut outer_msg = None; - - tokio::select! { - // This blocks the entire processor until it finishes handling this message - // KeyGen specifically may take a notable amount of processing time - // While that shouldn't be an issue in practice, as after processing an attempt it'll handle - // the other messages in the queue, it may be beneficial to parallelize these - // They could potentially be parallelized by type (KeyGen, Sign, Substrate) without issue - msg = coordinator.recv() => { - if let Some(last_coordinator_msg) = last_coordinator_msg { - assert_eq!(msg.id, last_coordinator_msg + 1); - } - last_coordinator_msg = Some(msg.id); - - // Only handle this if we haven't already - if HandledMessageDb::get(&main_db, msg.id).is_none() { - HandledMessageDb::set(&mut txn, msg.id, &()); - - // This is isolated to better think about how its ordered, or rather, about how the other - // cases aren't ordered - // - // While the coordinator messages are ordered, they're not deterministically ordered - // Tributary-caused messages are deterministically ordered, and Substrate-caused messages - // are deterministically-ordered, yet they're both shoved into a singular queue - // The order at which they're shoved in together isn't deterministic - // - // This is safe so long as Tributary and Substrate messages don't both expect mutable - // references over the same data - handle_coordinator_msg( - &mut txn, - &network, - &mut coordinator, - &mut tributary_mutable, - &mut substrate_mutable, - &msg, - ).await; - } - - outer_msg = Some(msg); - }, - - scanner_event = substrate_mutable.next_scanner_event() => { - let msg = substrate_mutable.scanner_event_to_multisig_event( - &mut txn, - &network, - scanner_event - ).await; - - match msg { - MultisigEvent::Batches(retired_key_new_key, batches) => { - // Start signing this batch - for batch in batches { - info!("created batch {} ({} instructions)", batch.id, batch.instructions.len()); - - // The coordinator expects BatchPreprocess to immediately follow Batch - coordinator.send( - messages::substrate::ProcessorMessage::Batch { batch: batch.clone() } - ).await; - - if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { - if let Some(msg) = batch_signer.sign(&mut txn, batch) { - coordinator.send(msg).await; - } - } - } - - if let Some((retired_key, new_key)) = retired_key_new_key { - // Safe to mutate since all signing operations are done and no more will be added - if let Some(retired_session) = SessionDb::get(&txn, retired_key.to_bytes().as_ref()) { - tributary_mutable.signers.remove(&retired_session); - } - tributary_mutable.batch_signer.take(); - let keys = tributary_mutable.key_gen.keys(&new_key); - if let Some((session, (substrate_keys, _))) = keys { - tributary_mutable.batch_signer = - Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); - } - } - }, - MultisigEvent::Completed(key, id, tx) => { - if let Some(session) = SessionDb::get(&txn, &key) { - let signer = tributary_mutable.signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.completed(&mut txn, id, &tx) { - coordinator.send(msg).await; - } - } - } - } - }, - } - - txn.commit(); - if let Some(msg) = outer_msg { - coordinator.ack(msg).await; - } - } -} - #[tokio::main] async fn main() { - // Override the panic handler with one which will panic if any tokio task panics - { - let existing = std::panic::take_hook(); - std::panic::set_hook(Box::new(move |panic| { - existing(panic); - const MSG: &str = "exiting the process due to a task panicking"; - println!("{MSG}"); - log::error!("{MSG}"); - std::process::exit(1); - })); - } - - if std::env::var("RUST_LOG").is_err() { - std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); - } - env_logger::init(); - - #[allow(unused_variables, unreachable_code)] - let db = { - #[cfg(all(feature = "parity-db", feature = "rocksdb"))] - panic!("built with parity-db and rocksdb"); - #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] - let db = - serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); - #[cfg(feature = "rocksdb")] - let db = - serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); - db - }; - - // Network configuration - let url = { - let login = env::var("NETWORK_RPC_LOGIN").expect("network RPC login wasn't specified"); - let hostname = env::var("NETWORK_RPC_HOSTNAME").expect("network RPC hostname wasn't specified"); - let port = env::var("NETWORK_RPC_PORT").expect("network port domain wasn't specified"); - "http://".to_string() + &login + "@" + &hostname + ":" + &port - }; - let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { - "bitcoin" => NetworkId::Bitcoin, - "ethereum" => NetworkId::Ethereum, - "monero" => NetworkId::Monero, - _ => panic!("unrecognized network"), - }; - - let coordinator = MessageQueue::from_env(Service::Processor(network_id)); - match network_id { - #[cfg(feature = "bitcoin")] - NetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await, #[cfg(feature = "ethereum")] NetworkId::Ethereum => { let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") @@ -327,8 +73,5 @@ async fn main() { let relayer_url = relayer_hostname + ":" + &relayer_port; run(db.clone(), Ethereum::new(db, url, relayer_url).await, coordinator).await } - #[cfg(feature = "monero")] - NetworkId::Monero => run(db, Monero::new(url).await, coordinator).await, - _ => panic!("spawning a processor for an unsupported network"), } } From 2f5d141289a94b6280b0dc22cc40464455d735a5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 11 Sep 2024 19:29:56 -0400 Subject: [PATCH 111/179] Use a local DB channel for sending to the message-queue The provided message-queue queue functions runs unti it succeeds. This means sending to the message-queue will no longer potentially block for arbitrary amount of times as sending messages is just writing them to a DB. --- processor/bin/src/coordinator.rs | 154 ++++++++++++++++++------------- processor/bin/src/lib.rs | 4 +- processor/bitcoin/src/db.rs | 19 +--- processor/bitcoin/src/main.rs | 3 +- 4 files changed, 96 insertions(+), 84 deletions(-) diff --git a/processor/bin/src/coordinator.rs b/processor/bin/src/coordinator.rs index d9d8d1129..12442c3d7 100644 --- a/processor/bin/src/coordinator.rs +++ b/processor/bin/src/coordinator.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::sync::{LazyLock, Arc, Mutex}; use tokio::sync::mpsc; @@ -9,8 +9,8 @@ use serai_client::{ in_instructions::primitives::{Batch, SignedBatch}, }; -use serai_env as env; use serai_db::{Get, DbTxn, Db, create_db, db_channel}; +use serai_env as env; use message_queue::{Service, Metadata, client::MessageQueue}; create_db! { @@ -21,27 +21,47 @@ create_db! { db_channel! { ProcessorBinCoordinator { - CoordinatorMessages: () -> Vec + ReceivedCoordinatorMessages: () -> Vec, + } +} + +// A lock to access SentCoordinatorMessages::send +static SEND_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(())); + +db_channel! { + ProcessorBinCoordinator { + SentCoordinatorMessages: () -> Vec, } } -async fn send(service: Service, queue: &MessageQueue, msg: messages::ProcessorMessage) { - let metadata = Metadata { from: service, to: Service::Coordinator, intent: msg.intent() }; - let msg = borsh::to_vec(&msg).unwrap(); - queue.queue(metadata, msg).await; +#[derive(Clone)] +pub(crate) struct CoordinatorSend { + db: crate::Db, + sent_message: mpsc::UnboundedSender<()>, } -pub(crate) struct Coordinator { - new_message: mpsc::UnboundedReceiver<()>, - service: Service, - message_queue: Arc, +impl CoordinatorSend { + fn send(&mut self, msg: &messages::ProcessorMessage) { + let _lock = SEND_LOCK.lock().unwrap(); + let mut txn = self.db.txn(); + SentCoordinatorMessages::send(&mut txn, &borsh::to_vec(msg).unwrap()); + txn.commit(); + self + .sent_message + .send(()) + .expect("failed to tell the Coordinator tasks there's a new message to send"); + } } -pub(crate) struct CoordinatorSend(Service, Arc); +pub(crate) struct Coordinator { + received_message: mpsc::UnboundedReceiver<()>, + send: CoordinatorSend, +} impl Coordinator { - pub(crate) fn new(mut db: crate::Db) -> Self { - let (new_message_send, new_message_recv) = mpsc::unbounded_channel(); + pub(crate) fn new(db: crate::Db) -> Self { + let (received_message_send, received_message_recv) = mpsc::unbounded_channel(); + let (sent_message_send, mut sent_message_recv) = mpsc::unbounded_channel(); let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { "bitcoin" => NetworkId::Bitcoin, @@ -55,6 +75,7 @@ impl Coordinator { // Spawn a task to move messages from the message-queue to our database so we can achieve // atomicity. This is the only place we read/ack messages from tokio::spawn({ + let mut db = db.clone(); let message_queue = message_queue.clone(); async move { loop { @@ -70,7 +91,7 @@ impl Coordinator { assert!((saved_messages == prior_msg) || (saved_messages == Some(msg.id))); if saved_messages < Some(msg.id) { let mut txn = db.txn(); - CoordinatorMessages::send(&mut txn, &msg.msg); + ReceivedCoordinatorMessages::send(&mut txn, &msg.msg); SavedMessages::set(&mut txn, &msg.id); txn.commit(); } @@ -78,16 +99,45 @@ impl Coordinator { message_queue.ack(Service::Coordinator, msg.id).await; // Fire that there's a new message - new_message_send.send(()).expect("failed to tell the Coordinator there's a new message"); + received_message_send + .send(()) + .expect("failed to tell the Coordinator there's a new message"); + } + } + }); + + // Spawn a task to send messages to the message-queue + tokio::spawn({ + let mut db = db.clone(); + async move { + loop { + let mut txn = db.txn(); + match SentCoordinatorMessages::try_recv(&mut txn) { + Some(msg) => { + let metadata = Metadata { + from: service, + to: Service::Coordinator, + intent: borsh::from_slice::(&msg).unwrap().intent(), + }; + message_queue.queue(metadata, msg).await; + txn.commit(); + } + None => { + let _ = + tokio::time::timeout(core::time::Duration::from_secs(60), sent_message_recv.recv()) + .await; + } + } } } }); - Coordinator { new_message: new_message_recv, service, message_queue } + let send = CoordinatorSend { db, sent_message: sent_message_send }; + Coordinator { received_message: received_message_recv, send } } pub(crate) fn coordinator_send(&self) -> CoordinatorSend { - CoordinatorSend(self.service, self.message_queue.clone()) + self.send.clone() } /// Fetch the next message from the Coordinator. @@ -99,23 +149,22 @@ impl Coordinator { txn: &mut impl DbTxn, ) -> messages::CoordinatorMessage { loop { - match CoordinatorMessages::try_recv(txn) { + match ReceivedCoordinatorMessages::try_recv(txn) { Some(msg) => { return borsh::from_slice(&msg) .expect("message wasn't a borsh-encoded CoordinatorMessage") } None => { let _ = - tokio::time::timeout(core::time::Duration::from_secs(60), self.new_message.recv()) + tokio::time::timeout(core::time::Duration::from_secs(60), self.received_message.recv()) .await; } } } } - #[allow(clippy::unused_async)] - pub(crate) async fn send_message(&mut self, msg: messages::ProcessorMessage) { - send(self.service, &self.message_queue, msg).await + pub(crate) fn send_message(&mut self, msg: &messages::ProcessorMessage) { + self.send.send(msg); } } @@ -127,8 +176,7 @@ impl signers::Coordinator for CoordinatorSend { &mut self, msg: messages::sign::ProcessorMessage, ) -> Result<(), Self::EphemeralError> { - // TODO: Use a fallible send for these methods - send(self.0, &self.1, messages::ProcessorMessage::Sign(msg)).await; + self.send(&messages::ProcessorMessage::Sign(msg)); Ok(()) } @@ -138,40 +186,27 @@ impl signers::Coordinator for CoordinatorSend { block: [u8; 32], signature: Signature, ) -> Result<(), Self::EphemeralError> { - send( - self.0, - &self.1, - messages::ProcessorMessage::Coordinator( - messages::coordinator::ProcessorMessage::CosignedBlock { - block_number, - block, - signature: signature.encode(), - }, - ), - ) - .await; + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::CosignedBlock { + block_number, + block, + signature: signature.encode(), + }, + )); Ok(()) } async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError> { - send( - self.0, - &self.1, - messages::ProcessorMessage::Substrate(messages::substrate::ProcessorMessage::Batch { batch }), - ) - .await; + self.send(&messages::ProcessorMessage::Substrate( + messages::substrate::ProcessorMessage::Batch { batch }, + )); Ok(()) } async fn publish_signed_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError> { - send( - self.0, - &self.1, - messages::ProcessorMessage::Coordinator( - messages::coordinator::ProcessorMessage::SignedBatch { batch }, - ), - ) - .await; + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedBatch { batch }, + )); Ok(()) } @@ -180,17 +215,12 @@ impl signers::Coordinator for CoordinatorSend { session: Session, signature: Signature, ) -> Result<(), Self::EphemeralError> { - send( - self.0, - &self.1, - messages::ProcessorMessage::Coordinator( - messages::coordinator::ProcessorMessage::SignedSlashReport { - session, - signature: signature.encode(), - }, - ), - ) - .await; + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedSlashReport { + session, + signature: signature.encode(), + }, + )); Ok(()) } } diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs index 15873873b..67ea61507 100644 --- a/processor/bin/src/lib.rs +++ b/processor/bin/src/lib.rs @@ -158,7 +158,7 @@ async fn first_block_after_time(feed: &S, serai_time: u64) -> u6 } /// The main loop of a Processor, interacting with the Coordinator. -pub async fn coordinator_loop< +pub async fn main_loop< S: ScannerFeed, K: KeyGenParams>>, Sch: Scheduler< @@ -192,7 +192,7 @@ pub async fn coordinator_loop< if let messages::key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } = &msg { new_key = Some(*session) } - coordinator.send_message(messages::ProcessorMessage::KeyGen(msg)).await; + coordinator.send_message(&messages::ProcessorMessage::KeyGen(msg)); } // If we were yielded a key, register it in the signers diff --git a/processor/bitcoin/src/db.rs b/processor/bitcoin/src/db.rs index b0acc427d..1d73ebfee 100644 --- a/processor/bitcoin/src/db.rs +++ b/processor/bitcoin/src/db.rs @@ -1,21 +1,4 @@ -use ciphersuite::group::GroupEncoding; - -use serai_client::validator_sets::primitives::Session; - -use serai_db::{Get, DbTxn, create_db, db_channel}; -use primitives::EncodableG; - -create_db! { - Processor { - ExternalKeyForSessionForSigners: (session: Session) -> EncodableG, - } -} - -db_channel! { - Processor { - KeyToActivate: () -> EncodableG - } -} +use serai_db::{Get, DbTxn, create_db}; create_db! { BitcoinProcessor { diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index 09228d44f..74e174ee2 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -57,8 +57,7 @@ async fn main() { tokio::spawn(TxIndexTask(feed.clone()).continually_run(index_task, vec![])); core::mem::forget(index_handle); - bin::coordinator_loop::<_, KeyGenParams, Scheduler<_>, Rpc>(db, feed.clone(), feed) - .await; + bin::main_loop::<_, KeyGenParams, Scheduler<_>, Rpc>(db, feed.clone(), feed).await; } /* From b9b591ac94309fc9ba7d1129bf7753dc3ddba995 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 12 Sep 2024 18:40:10 -0400 Subject: [PATCH 112/179] Monero processor primitives --- Cargo.lock | 36 ++- processor/bin/Cargo.toml | 17 +- processor/bin/src/coordinator.rs | 1 + processor/bitcoin/Cargo.toml | 12 +- processor/monero/Cargo.toml | 34 ++- processor/monero/src/key_gen.rs | 11 + processor/monero/src/lib.rs | 2 + processor/monero/src/main.rs | 43 ++++ processor/monero/src/primitives/block.rs | 54 +++++ processor/monero/src/primitives/mod.rs | 3 + processor/monero/src/primitives/output.rs | 86 ++++++++ .../monero/src/primitives/transaction.rs | 137 ++++++++++++ processor/monero/src/rpc.rs | 156 +++++++++++++ processor/monero/src/scheduler.rs | 205 ++++++++++++++++++ substrate/client/Cargo.toml | 4 +- substrate/client/src/networks/monero.rs | 189 +++++++++------- 16 files changed, 858 insertions(+), 132 deletions(-) create mode 100644 processor/monero/src/key_gen.rs create mode 100644 processor/monero/src/main.rs create mode 100644 processor/monero/src/primitives/block.rs create mode 100644 processor/monero/src/primitives/mod.rs create mode 100644 processor/monero/src/primitives/output.rs create mode 100644 processor/monero/src/primitives/transaction.rs create mode 100644 processor/monero/src/rpc.rs create mode 100644 processor/monero/src/scheduler.rs diff --git a/Cargo.lock b/Cargo.lock index 7e7d78a31..ec3ccf8b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8129,7 +8129,6 @@ dependencies = [ "borsh", "ciphersuite", "dkg", - "env_logger", "flexible-transcript", "hex", "log", @@ -8139,11 +8138,8 @@ dependencies = [ "secp256k1", "serai-client", "serai-db", - "serai-env", - "serai-message-queue", "serai-processor-bin", "serai-processor-key-gen", - "serai-processor-messages", "serai-processor-primitives", "serai-processor-scanner", "serai-processor-scheduler-primitives", @@ -8152,7 +8148,6 @@ dependencies = [ "serai-processor-utxo-scheduler-primitives", "tokio", "zalloc", - "zeroize", ] [[package]] @@ -8170,7 +8165,7 @@ dependencies = [ "frost-schnorrkel", "hex", "modular-frost", - "monero-wallet", + "monero-address", "multiaddr", "parity-scale-codec", "rand_core", @@ -8522,19 +8517,26 @@ version = "0.1.0" dependencies = [ "async-trait", "borsh", - "const-hex", + "ciphersuite", "dalek-ff-group", - "env_logger", + "dkg", + "flexible-transcript", "hex", "log", - "monero-simple-request-rpc", + "modular-frost", "monero-wallet", "parity-scale-codec", + "rand_core", + "serai-client", "serai-db", - "serai-env", - "serai-message-queue", - "serai-processor-messages", - "serde_json", + "serai-processor-bin", + "serai-processor-key-gen", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "serai-processor-utxo-scheduler", + "serai-processor-utxo-scheduler-primitives", "tokio", "zalloc", ] @@ -8643,18 +8645,13 @@ name = "serai-processor-bin" version = "0.1.0" dependencies = [ "async-trait", - "bitcoin-serai", "borsh", "ciphersuite", "dkg", "env_logger", - "flexible-transcript", "hex", "log", - "modular-frost", "parity-scale-codec", - "rand_core", - "secp256k1", "serai-client", "serai-db", "serai-env", @@ -8665,10 +8662,7 @@ dependencies = [ "serai-processor-scanner", "serai-processor-scheduler-primitives", "serai-processor-signers", - "serai-processor-transaction-chaining-scheduler", - "serai-processor-utxo-scheduler-primitives", "tokio", - "zalloc", "zeroize", ] diff --git a/processor/bin/Cargo.toml b/processor/bin/Cargo.toml index f3f3b7536..01a774ac5 100644 --- a/processor/bin/Cargo.toml +++ b/processor/bin/Cargo.toml @@ -19,29 +19,22 @@ workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } zeroize = { version = "1", default-features = false, features = ["std"] } -rand_core = { version = "0.6", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } -ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } -dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } -frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } -secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"] } -bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["std"] } +serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } log = { version = "0.4", default-features = false, features = ["std"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -zalloc = { path = "../../common/zalloc" } -serai-db = { path = "../../common/db" } serai-env = { path = "../../common/env" } - -serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } +serai-db = { path = "../../common/db" } messages = { package = "serai-processor-messages", path = "../messages" } key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } @@ -49,8 +42,6 @@ key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } primitives = { package = "serai-processor-primitives", path = "../primitives" } scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } scanner = { package = "serai-processor-scanner", path = "../scanner" } -utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" } -transaction-chaining-scheduler = { package = "serai-processor-transaction-chaining-scheduler", path = "../scheduler/utxo/transaction-chaining" } signers = { package = "serai-processor-signers", path = "../signers" } message-queue = { package = "serai-message-queue", path = "../../message-queue" } diff --git a/processor/bin/src/coordinator.rs b/processor/bin/src/coordinator.rs index 12442c3d7..ead4a131a 100644 --- a/processor/bin/src/coordinator.rs +++ b/processor/bin/src/coordinator.rs @@ -69,6 +69,7 @@ impl Coordinator { "monero" => NetworkId::Monero, _ => panic!("unrecognized network"), }; + // TODO: Read this from ScannerFeed let service = Service::Processor(network_id); let message_queue = Arc::new(MessageQueue::from_env(service)); diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index c968e36b9..2d4958c7d 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -18,7 +18,6 @@ workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } -zeroize = { version = "1", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } @@ -33,17 +32,14 @@ frost = { package = "modular-frost", path = "../../crypto/frost", default-featur secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"] } bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["std"] } +serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } + +zalloc = { path = "../../common/zalloc" } log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -zalloc = { path = "../../common/zalloc" } serai-db = { path = "../../common/db" } -serai-env = { path = "../../common/env" } -serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } - -messages = { package = "serai-processor-messages", path = "../messages" } key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } primitives = { package = "serai-processor-primitives", path = "../primitives" } @@ -55,8 +51,6 @@ signers = { package = "serai-processor-signers", path = "../signers" } bin = { package = "serai-processor-bin", path = "../bin" } -message-queue = { package = "serai-message-queue", path = "../../message-queue" } - [features] parity-db = ["bin/parity-db"] rocksdb = ["bin/rocksdb"] diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml index e71472e49..5538d025c 100644 --- a/processor/monero/Cargo.toml +++ b/processor/monero/Cargo.toml @@ -18,29 +18,39 @@ workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } +rand_core = { version = "0.6", default-features = false } -const-hex = { version = "1", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -serde_json = { version = "1", default-features = false, features = ["std"] } -dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } -monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request", default-features = false, optional = true } -monero-wallet = { path = "../../networks/monero/wallet", default-features = false, features = ["std", "multisig", "compile-time-generators"], optional = true } +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } +dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ed25519"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ed25519"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } +monero-wallet = { path = "../../networks/monero/wallet", default-features = false, features = ["std", "multisig"] } + +serai-client = { path = "../../substrate/client", default-features = false, features = ["monero"] } + +zalloc = { path = "../../common/zalloc" } log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -zalloc = { path = "../../common/zalloc" } serai-db = { path = "../../common/db" } -serai-env = { path = "../../common/env" } -messages = { package = "serai-processor-messages", path = "../messages" } +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" } +utxo-standard-scheduler = { package = "serai-processor-utxo-scheduler", path = "../scheduler/utxo/standard" } +signers = { package = "serai-processor-signers", path = "../signers" } -message-queue = { package = "serai-message-queue", path = "../../message-queue" } +bin = { package = "serai-processor-bin", path = "../bin" } [features] -parity-db = ["serai-db/parity-db"] -rocksdb = ["serai-db/rocksdb"] +parity-db = ["bin/parity-db"] +rocksdb = ["bin/rocksdb"] diff --git a/processor/monero/src/key_gen.rs b/processor/monero/src/key_gen.rs new file mode 100644 index 000000000..dee330293 --- /dev/null +++ b/processor/monero/src/key_gen.rs @@ -0,0 +1,11 @@ +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ed25519}; +use frost::ThresholdKeys; + +pub(crate) struct KeyGenParams; +impl key_gen::KeyGenParams for KeyGenParams { + const ID: &'static str = "Monero"; + + type ExternalNetworkCiphersuite = Ed25519; + + fn tweak_keys(keys: &mut ThresholdKeys) {} +} diff --git a/processor/monero/src/lib.rs b/processor/monero/src/lib.rs index 8786bef33..f9b334ef5 100644 --- a/processor/monero/src/lib.rs +++ b/processor/monero/src/lib.rs @@ -1,3 +1,4 @@ +/* #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![doc = include_str!("../README.md")] #![deny(missing_docs)] @@ -809,3 +810,4 @@ impl UtxoNetwork for Monero { // TODO: Test creating a TX this big const MAX_INPUTS: usize = 120; } +*/ diff --git a/processor/monero/src/main.rs b/processor/monero/src/main.rs new file mode 100644 index 000000000..41896de11 --- /dev/null +++ b/processor/monero/src/main.rs @@ -0,0 +1,43 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + +use monero_wallet::rpc::Rpc as MRpc; + +mod primitives; +pub(crate) use crate::primitives::*; + +/* +mod key_gen; +use crate::key_gen::KeyGenParams; +mod rpc; +use rpc::Rpc; +mod scheduler; +use scheduler::Scheduler; + +#[tokio::main] +async fn main() { + let db = bin::init(); + let feed = Rpc { + db: db.clone(), + rpc: loop { + match MRpc::new(bin::url()).await { + Ok(rpc) => break rpc, + Err(e) => { + log::error!("couldn't connect to the Monero node: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } + } + }, + }; + + bin::main_loop::<_, KeyGenParams, Scheduler<_>, Rpc>(db, feed.clone(), feed).await; +} +*/ + +#[tokio::main] +async fn main() {} diff --git a/processor/monero/src/primitives/block.rs b/processor/monero/src/primitives/block.rs new file mode 100644 index 000000000..40d0f2969 --- /dev/null +++ b/processor/monero/src/primitives/block.rs @@ -0,0 +1,54 @@ +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_wallet::{transaction::Transaction, block::Block as MBlock}; + +use serai_client::networks::monero::Address; + +use primitives::{ReceivedOutput, EventualityTracker}; + +use crate::{output::Output, transaction::Eventuality}; + +#[derive(Clone, Debug)] +pub(crate) struct BlockHeader(pub(crate) MBlock); +impl primitives::BlockHeader for BlockHeader { + fn id(&self) -> [u8; 32] { + self.0.hash() + } + fn parent(&self) -> [u8; 32] { + self.0.header.previous + } +} + +#[derive(Clone, Debug)] +pub(crate) struct Block(pub(crate) MBlock, Vec); + +#[async_trait::async_trait] +impl primitives::Block for Block { + type Header = BlockHeader; + + type Key = ::G; + type Address = Address; + type Output = Output; + type Eventuality = Eventuality; + + fn id(&self) -> [u8; 32] { + self.0.hash() + } + + fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { + todo!("TODO") + } + + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + > { + todo!("TODO") + } +} diff --git a/processor/monero/src/primitives/mod.rs b/processor/monero/src/primitives/mod.rs new file mode 100644 index 000000000..fba52dd96 --- /dev/null +++ b/processor/monero/src/primitives/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod output; +pub(crate) mod transaction; +pub(crate) mod block; diff --git a/processor/monero/src/primitives/output.rs b/processor/monero/src/primitives/output.rs new file mode 100644 index 000000000..d3eb3be3b --- /dev/null +++ b/processor/monero/src/primitives/output.rs @@ -0,0 +1,86 @@ +use std::io; + +use ciphersuite::{group::Group, Ciphersuite, Ed25519}; + +use monero_wallet::WalletOutput; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{ + primitives::{Coin, Amount, Balance}, + networks::monero::Address, +}; + +use primitives::{OutputType, ReceivedOutput}; + +#[rustfmt::skip] +#[derive( + Clone, Copy, PartialEq, Eq, Default, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, +)] +pub(crate) struct OutputId(pub(crate) [u8; 32]); +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Output(WalletOutput); + +impl Output { + pub(crate) fn new(output: WalletOutput) -> Self { + Self(output) + } +} + +impl ReceivedOutput<::G, Address> for Output { + type Id = OutputId; + type TransactionId = [u8; 32]; + + fn kind(&self) -> OutputType { + todo!("TODO") + } + + fn id(&self) -> Self::Id { + OutputId(self.0.key().compress().to_bytes()) + } + + fn transaction_id(&self) -> Self::TransactionId { + self.0.transaction() + } + + fn key(&self) -> ::G { + // The spend key will be a key we generated, so it'll be in the prime-order subgroup + // The output's key is the spend key + (key_offset * G), so it's in the prime-order subgroup if + // the spend key is + dalek_ff_group::EdwardsPoint( + self.0.key() - (*::G::generator() * self.0.key_offset()), + ) + } + + fn presumed_origin(&self) -> Option

{ + None + } + + fn balance(&self) -> Balance { + Balance { coin: Coin::Monero, amount: Amount(self.0.commitment().amount) } + } + + fn data(&self) -> &[u8] { + self.0.arbitrary_data().first().map_or(&[], Vec::as_slice) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + self.0.write(writer) + } + + fn read(reader: &mut R) -> io::Result { + WalletOutput::read(reader).map(Self) + } +} diff --git a/processor/monero/src/primitives/transaction.rs b/processor/monero/src/primitives/transaction.rs new file mode 100644 index 000000000..1ba494719 --- /dev/null +++ b/processor/monero/src/primitives/transaction.rs @@ -0,0 +1,137 @@ +use std::io; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::Ed25519; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; + +use monero_wallet::{ + transaction::Transaction as MTransaction, + send::{ + SignableTransaction as MSignableTransaction, TransactionMachine, Eventuality as MEventuality, + }, +}; + +use crate::output::OutputId; + +#[derive(Clone, Debug)] +pub(crate) struct Transaction(pub(crate) MTransaction); + +impl From for Transaction { + fn from(tx: MTransaction) -> Self { + Self(tx) + } +} + +impl scheduler::Transaction for Transaction { + fn read(reader: &mut impl io::Read) -> io::Result { + MTransaction::read(reader).map(Self) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.0.write(writer) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SignableTransaction { + id: [u8; 32], + signable: MSignableTransaction, +} + +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine(MSignableTransaction, ThresholdKeys); +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = ::Signature; + type SignMachine = ::SignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + self.0.multisig(self.1).expect("incorrect keys used for SignableTransaction").preprocess(rng) + } +} + +impl scheduler::SignableTransaction for SignableTransaction { + type Transaction = Transaction; + type Ciphersuite = Ed25519; + type PreprocessMachine = ClonableTransctionMachine; + + fn read(reader: &mut impl io::Read) -> io::Result { + let mut id = [0; 32]; + reader.read_exact(&mut id)?; + + let signable = MSignableTransaction::read(reader)?; + Ok(SignableTransaction { id, signable }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&self.id)?; + self.signable.write(writer) + } + + fn id(&self) -> [u8; 32] { + self.id + } + + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { + ClonableTransctionMachine(self.signable, keys) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Eventuality { + id: [u8; 32], + singular_spent_output: Option, + eventuality: MEventuality, +} + +impl primitives::Eventuality for Eventuality { + type OutputId = OutputId; + + fn id(&self) -> [u8; 32] { + self.id + } + + // We define the lookup as our ID since the resolving transaction only has a singular possible ID + fn lookup(&self) -> Vec { + self.eventuality.extra() + } + + fn singular_spent_output(&self) -> Option { + self.singular_spent_output + } + + fn read(reader: &mut impl io::Read) -> io::Result { + let mut id = [0; 32]; + reader.read_exact(&mut id)?; + + let singular_spent_output = { + let mut singular_spent_output_opt = [0xff]; + reader.read_exact(&mut singular_spent_output_opt)?; + assert!(singular_spent_output_opt[0] <= 1); + (singular_spent_output_opt[0] == 1) + .then(|| -> io::Result<_> { + let mut singular_spent_output = [0; 32]; + reader.read_exact(&mut singular_spent_output)?; + Ok(OutputId(singular_spent_output)) + }) + .transpose()? + }; + + let eventuality = MEventuality::read(reader)?; + Ok(Self { id, singular_spent_output, eventuality }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&self.id)?; + + if let Some(singular_spent_output) = self.singular_spent_output { + writer.write_all(&[1])?; + writer.write_all(singular_spent_output.as_ref())?; + } else { + writer.write_all(&[0])?; + } + + self.eventuality.write(writer) + } +} diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs new file mode 100644 index 000000000..a6f6e5fd8 --- /dev/null +++ b/processor/monero/src/rpc.rs @@ -0,0 +1,156 @@ +use bitcoin_serai::rpc::{RpcError, Rpc as BRpc}; + +use serai_client::primitives::{NetworkId, Coin, Amount}; + +use serai_db::Db; +use scanner::ScannerFeed; +use signers::TransactionPublisher; + +use crate::{ + db, + transaction::Transaction, + block::{BlockHeader, Block}, +}; + +#[derive(Clone)] +pub(crate) struct Rpc { + pub(crate) db: D, + pub(crate) rpc: BRpc, +} + +#[async_trait::async_trait] +impl ScannerFeed for Rpc { + const NETWORK: NetworkId = NetworkId::Bitcoin; + const CONFIRMATIONS: u64 = 6; + const WINDOW_LENGTH: u64 = 6; + + const TEN_MINUTES: u64 = 1; + + type Block = Block; + + type EphemeralError = RpcError; + + async fn latest_finalized_block_number(&self) -> Result { + db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) + } + + async fn time_of_block(&self, number: u64) -> Result { + let number = usize::try_from(number).unwrap(); + + /* + The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the + median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve + CLTV). This creates a monotonic median time which we use as the block time. + */ + // This implements `GetMedianTimePast` + let median = { + const MEDIAN_TIMESPAN: usize = 11; + let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN); + for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number { + timestamps.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time); + } + timestamps.sort(); + timestamps[timestamps.len() / 2] + }; + + /* + This block's timestamp is guaranteed to be greater than this median: + https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9 + /src/validation.cpp#L4182-L4184 + + This does not guarantee the median always increases however. Take the following trivial + example, as the window is initially built: + + 0 block has time 0 // Prior blocks: [] + 1 block has time 1 // Prior blocks: [0] + 2 block has time 2 // Prior blocks: [0, 1] + 3 block has time 2 // Prior blocks: [0, 1, 2] + + These two blocks have the same time (both greater than the median of their prior blocks) and + the same median. + + The median will never decrease however. The values pushed onto the window will always be + greater than the median. If a value greater than the median is popped, the median will remain + the same (due to the counterbalance of the pushed value). If a value less than the median is + popped, the median will increase (either to another instance of the same value, yet one + closer to the end of the repeating sequence, or to a higher value). + */ + Ok(median.into()) + } + + async fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> Result<::Header, Self::EphemeralError> { + Ok(BlockHeader( + self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?.header, + )) + } + + async fn unchecked_block_by_number( + &self, + number: u64, + ) -> Result { + Ok(Block( + self.db.clone(), + self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?, + )) + } + + fn dust(coin: Coin) -> Amount { + assert_eq!(coin, Coin::Bitcoin); + + /* + A Taproot input is: + - 36 bytes for the OutPoint + - 0 bytes for the script (+1 byte for the length) + - 4 bytes for the sequence + Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format + + There's also: + - 1 byte for the witness length + - 1 byte for the signature length + - 64 bytes for the signature + which have the SegWit discount. + + (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units + 230 ceil div 4 = 57 vbytes + + Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are: + - 1000 sat/kilo-vbyte for a transaction to be relayed + - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte + The DUST constant needs to be determined by the latter. + Since these are solely relay rules, and may be raised, we require all outputs be spendable + under a 5000 sat/kilo-vbyte fee rate. + + 5000 sat/kilo-vbyte = 5 sat/vbyte + 5 * 57 = 285 sats/spent-output + + Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding + 100 vbytes, tripling the transaction size, then the sats/tx would be < 1000. + + Increase by an order of magnitude, in order to ensure this is actually worth our time, and we + get 10,000 satoshis. This is $5 if 1 BTC = 50,000 USD. + */ + Amount(10_000) + } + + async fn cost_to_aggregate( + &self, + coin: Coin, + _reference_block: &Self::Block, + ) -> Result { + assert_eq!(coin, Coin::Bitcoin); + // TODO + Ok(Amount(0)) + } +} + +#[async_trait::async_trait] +impl TransactionPublisher for Rpc { + type EphemeralError = RpcError; + + async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> { + self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) + } +} diff --git a/processor/monero/src/scheduler.rs b/processor/monero/src/scheduler.rs new file mode 100644 index 000000000..6e49d23d9 --- /dev/null +++ b/processor/monero/src/scheduler.rs @@ -0,0 +1,205 @@ +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::ScriptBuf, + wallet::{TransactionError, SignableTransaction as BSignableTransaction, p2tr_script_buf}, +}; + +use serai_client::{ + primitives::{Coin, Amount}, + networks::bitcoin::Address, +}; + +use serai_db::Db; +use primitives::{OutputType, ReceivedOutput, Payment}; +use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; +use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; +use transaction_chaining_scheduler::{EffectedReceivedOutputs, Scheduler as GenericScheduler}; + +use crate::{ + scan::{offsets_for_key, scanner}, + output::Output, + transaction::{SignableTransaction, Eventuality}, + rpc::Rpc, +}; + +fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { + let offset = ::G::GENERATOR * offsets_for_key(key)[&kind]; + Address::new( + p2tr_script_buf(key + offset) + .expect("creating address from Serai key which wasn't properly tweaked"), + ) + .expect("couldn't create Serai-representable address for P2TR script") +} + +fn signable_transaction( + fee_per_vbyte: u64, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, +) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> { + assert!( + inputs.len() < + , EffectedReceivedOutputs>>>::MAX_INPUTS + ); + assert!( + (payments.len() + usize::from(u8::from(change.is_some()))) < + , EffectedReceivedOutputs>>>::MAX_OUTPUTS + ); + + let inputs = inputs.into_iter().map(|input| input.output).collect::>(); + + let mut payments = payments + .into_iter() + .map(|payment| { + (payment.address().clone(), { + let balance = payment.balance(); + assert_eq!(balance.coin, Coin::Bitcoin); + balance.amount.0 + }) + }) + .collect::>(); + /* + Push a payment to a key with a known private key which anyone can spend. If this transaction + gets stuck, this lets anyone create a child transaction spending this output, raising the fee, + getting the transaction unstuck (via CPFP). + */ + payments.push(( + // The generator is even so this is valid + Address::new(p2tr_script_buf(::G::GENERATOR).unwrap()).unwrap(), + // This uses the minimum output value allowed, as defined as a constant in bitcoin-serai + // TODO: Add a test for this comparing to bitcoin's `minimal_non_dust` + bitcoin_serai::wallet::DUST, + )); + + let change = change + .map(, EffectedReceivedOutputs>>>::change_address); + + BSignableTransaction::new( + inputs.clone(), + &payments + .iter() + .cloned() + .map(|(address, amount)| (ScriptBuf::from(address), amount)) + .collect::>(), + change.clone().map(ScriptBuf::from), + None, + fee_per_vbyte, + ) + .map(|bst| (SignableTransaction { inputs, payments, change, fee_per_vbyte }, bst)) +} + +pub(crate) struct Planner; +impl TransactionPlanner, EffectedReceivedOutputs>> for Planner { + type FeeRate = u64; + + type SignableTransaction = SignableTransaction; + + /* + Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT). + + A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes. While + our inputs are entirely SegWit, such fine tuning is not necessary and could create issues in + the future (if the size decreases or we misevaluate it). It also offers a minimal amount of + benefit when we are able to logarithmically accumulate inputs/fulfill payments. + + For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and + 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 + bytes. + + 100,000 / 192 = 520 + 520 * 192 leaves 160 bytes of overhead for the transaction structure itself. + */ + const MAX_INPUTS: usize = 520; + // We always reserve one output to create an anyone-can-spend output enabling anyone to use CPFP + // to unstick any transactions which had too low of a fee. + const MAX_OUTPUTS: usize = 519; + + fn fee_rate(block: &BlockFor>, coin: Coin) -> Self::FeeRate { + assert_eq!(coin, Coin::Bitcoin); + // TODO + 1 + } + + fn branch_address(key: KeyFor>) -> AddressFor> { + address_from_serai_key(key, OutputType::Branch) + } + fn change_address(key: KeyFor>) -> AddressFor> { + address_from_serai_key(key, OutputType::Change) + } + fn forwarding_address(key: KeyFor>) -> AddressFor> { + address_from_serai_key(key, OutputType::Forwarded) + } + + fn calculate_fee( + fee_rate: Self::FeeRate, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, + ) -> Amount { + match signable_transaction::(fee_rate, inputs, payments, change) { + Ok(tx) => Amount(tx.1.needed_fee()), + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to calculate_fee"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { fee, .. }) => Amount(fee), + } + } + + fn plan( + fee_rate: Self::FeeRate, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, + ) -> PlannedTransaction, Self::SignableTransaction, EffectedReceivedOutputs>> { + let key = inputs.first().unwrap().key(); + for input in &inputs { + assert_eq!(key, input.key()); + } + + let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); + match signable_transaction::(fee_rate, inputs.clone(), payments, change) { + Ok(tx) => PlannedTransaction { + signable: tx.0, + eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, + auxilliary: EffectedReceivedOutputs({ + let tx = tx.1.transaction(); + let scanner = scanner(key); + + let mut res = vec![]; + for output in scanner.scan_transaction(tx) { + res.push(Output::new_with_presumed_origin( + key, + tx, + // It shouldn't matter if this is wrong as we should never try to return these + // We still provide an accurate value to ensure a lack of discrepancies + Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()), + output, + )); + } + res + }), + }, + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to plan"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { .. }) => { + panic!("plan called for a transaction without enough funds") + } + } + } +} + +pub(crate) type Scheduler = GenericScheduler, Planner>; diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index 5cba05f07..5f7a24d43 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -42,7 +42,7 @@ simple-request = { path = "../../common/request", version = "0.1", optional = tr bitcoin = { version = "0.32", optional = true } ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", optional = true } -monero-wallet = { path = "../../networks/monero/wallet", version = "0.1.0", default-features = false, features = ["std"], optional = true } +monero-address = { path = "../../networks/monero/wallet/address", version = "0.1.0", default-features = false, features = ["std"], optional = true } [dev-dependencies] rand_core = "0.6" @@ -65,7 +65,7 @@ borsh = ["serai-abi/borsh"] networks = [] bitcoin = ["networks", "dep:bitcoin"] -monero = ["networks", "ciphersuite/ed25519", "monero-wallet"] +monero = ["networks", "ciphersuite/ed25519", "monero-address"] # Assumes the default usage is to use Serai as a DEX, which doesn't actually # require connecting to a Serai node diff --git a/substrate/client/src/networks/monero.rs b/substrate/client/src/networks/monero.rs index bd5e0a15c..c99a0abdd 100644 --- a/substrate/client/src/networks/monero.rs +++ b/substrate/client/src/networks/monero.rs @@ -1,102 +1,141 @@ use core::{str::FromStr, fmt}; -use scale::{Encode, Decode}; - use ciphersuite::{Ciphersuite, Ed25519}; -use monero_wallet::address::{AddressError, Network, AddressType, MoneroAddress}; +use monero_address::{Network, AddressType as MoneroAddressType, MoneroAddress}; + +use crate::primitives::ExternalAddress; + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +enum AddressType { + Legacy, + Subaddress, + Featured(u8), +} + +/// A representation of a Monero address. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Address { + kind: AddressType, + spend: ::G, + view: ::G, +} -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Address(MoneroAddress); -impl Address { - pub fn new(address: MoneroAddress) -> Option
{ - if address.payment_id().is_some() { - return None; +fn byte_for_kind(kind: AddressType) -> u8 { + // We use the second and third highest bits for the type + // This leaves the top bit open for interpretation as a VarInt later + match kind { + AddressType::Legacy => 0, + AddressType::Subaddress => 1 << 5, + AddressType::Featured(flags) => { + // The flags only take up the low three bits + debug_assert!(flags <= 0b111); + (2 << 5) | flags } - Some(Address(address)) } } -impl FromStr for Address { - type Err = AddressError; - fn from_str(str: &str) -> Result { - MoneroAddress::from_str(Network::Mainnet, str).map(Address) +impl borsh::BorshSerialize for Address { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + writer.write_all(&[byte_for_kind(self.kind)])?; + writer.write_all(&self.spend.compress().to_bytes())?; + writer.write_all(&self.view.compress().to_bytes()) } } - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) +impl borsh::BorshDeserialize for Address { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let mut kind_byte = [0xff]; + reader.read_exact(&mut kind_byte)?; + let kind_byte = kind_byte[0]; + let kind = match kind_byte >> 5 { + 0 => AddressType::Legacy, + 1 => AddressType::Subaddress, + 2 => AddressType::Featured(kind_byte & 0b111), + _ => Err(borsh::io::Error::other("unrecognized type"))?, + }; + // Check this wasn't malleated + if byte_for_kind(kind) != kind_byte { + Err(borsh::io::Error::other("malleated type byte"))?; + } + let spend = Ed25519::read_G(reader)?; + let view = Ed25519::read_G(reader)?; + Ok(Self { kind, spend, view }) } } -// SCALE-encoded variant of Monero addresses. -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -enum EncodedAddressType { - Legacy, - Subaddress, - Featured(u8), +impl TryFrom for Address { + type Error = (); + fn try_from(address: MoneroAddress) -> Result { + let spend = address.spend().compress().to_bytes(); + let view = address.view().compress().to_bytes(); + let kind = match address.kind() { + MoneroAddressType::Legacy => AddressType::Legacy, + MoneroAddressType::LegacyIntegrated(_) => Err(())?, + MoneroAddressType::Subaddress => AddressType::Subaddress, + MoneroAddressType::Featured { subaddress, payment_id, guaranteed } => { + if payment_id.is_some() { + Err(())? + } + // This maintains the same bit layout as featured addresses use + AddressType::Featured(u8::from(*subaddress) + (u8::from(*guaranteed) << 2)) + } + }; + Ok(Address { + kind, + spend: Ed25519::read_G(&mut spend.as_slice()).map_err(|_| ())?, + view: Ed25519::read_G(&mut view.as_slice()).map_err(|_| ())?, + }) + } } -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -struct EncodedAddress { - kind: EncodedAddressType, - spend: [u8; 32], - view: [u8; 32], +impl From
for MoneroAddress { + fn from(address: Address) -> MoneroAddress { + let kind = match address.kind { + AddressType::Legacy => MoneroAddressType::Legacy, + AddressType::Subaddress => MoneroAddressType::Subaddress, + AddressType::Featured(features) => { + debug_assert!(features <= 0b111); + let subaddress = (features & 1) != 0; + let integrated = (features & (1 << 1)) != 0; + debug_assert!(!integrated); + let guaranteed = (features & (1 << 2)) != 0; + MoneroAddressType::Featured { subaddress, payment_id: None, guaranteed } + } + }; + MoneroAddress::new(Network::Mainnet, kind, address.spend.0, address.view.0) + } } -impl TryFrom> for Address { +impl TryFrom for Address { type Error = (); - fn try_from(data: Vec) -> Result { - // Decode as SCALE - let addr = EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())?; - // Convert over - Ok(Address(MoneroAddress::new( - Network::Mainnet, - match addr.kind { - EncodedAddressType::Legacy => AddressType::Legacy, - EncodedAddressType::Subaddress => AddressType::Subaddress, - EncodedAddressType::Featured(flags) => { - let subaddress = (flags & 1) != 0; - let integrated = (flags & (1 << 1)) != 0; - let guaranteed = (flags & (1 << 2)) != 0; - if integrated { - Err(())?; - } - AddressType::Featured { subaddress, payment_id: None, guaranteed } - } - }, - Ed25519::read_G::<&[u8]>(&mut addr.spend.as_ref()).map_err(|_| ())?.0, - Ed25519::read_G::<&[u8]>(&mut addr.view.as_ref()).map_err(|_| ())?.0, - ))) + fn try_from(data: ExternalAddress) -> Result { + // Decode as an Address + let mut data = data.as_ref(); + let address = +
::deserialize_reader(&mut data).map_err(|_| ())?; + if !data.is_empty() { + Err(())? + } + Ok(address) + } +} +impl From
for ExternalAddress { + fn from(address: Address) -> ExternalAddress { + // This is 65 bytes which is less than MAX_ADDRESS_LEN + ExternalAddress::new(borsh::to_vec(&address).unwrap()).unwrap() } } -#[allow(clippy::from_over_into)] -impl Into for Address { - fn into(self) -> MoneroAddress { - self.0 +impl FromStr for Address { + type Err = (); + fn from_str(str: &str) -> Result { + let Ok(address) = MoneroAddress::from_str(Network::Mainnet, str) else { Err(())? }; + Address::try_from(address) } } -#[allow(clippy::from_over_into)] -impl Into> for Address { - fn into(self) -> Vec { - EncodedAddress { - kind: match self.0.kind() { - AddressType::Legacy => EncodedAddressType::Legacy, - AddressType::LegacyIntegrated(_) => { - panic!("integrated address became Serai Monero address") - } - AddressType::Subaddress => EncodedAddressType::Subaddress, - AddressType::Featured { subaddress, payment_id, guaranteed } => { - debug_assert!(payment_id.is_none()); - EncodedAddressType::Featured(u8::from(*subaddress) + (u8::from(*guaranteed) << 2)) - } - }, - spend: self.0.spend().compress().0, - view: self.0.view().compress().0, - } - .encode() +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + MoneroAddress::from(*self).fmt(f) } } From e3c1b12f612e953950ab018d9958e4c6703b8b79 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 13 Sep 2024 00:10:52 -0400 Subject: [PATCH 113/179] Correct Multisig Rotation to use WINDOW_LENGTH where proper --- spec/processor/Multisig Rotation.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spec/processor/Multisig Rotation.md b/spec/processor/Multisig Rotation.md index 916ce56b4..867080259 100644 --- a/spec/processor/Multisig Rotation.md +++ b/spec/processor/Multisig Rotation.md @@ -12,11 +12,11 @@ The following timeline is established: 1) The new multisig is created, and has its keys set on Serai. Once the next `Batch` with a new external network block is published, its block becomes the "queue block". The new multisig is set to activate at the "queue block", plus - `CONFIRMATIONS` blocks (the "activation block"). + `WINDOW_LENGTH` blocks (the "activation block"). We don't use the last `Batch`'s external network block, as that `Batch` may - be older than `CONFIRMATIONS` blocks. Any yet-to-be-included-and-finalized - `Batch` will be within `CONFIRMATIONS` blocks of what any processor has + be older than `WINDOW_LENGTH` blocks. Any yet-to-be-included-and-finalized + `Batch` will be within `WINDOW_LENGTH` blocks of what any processor has scanned however, as it'll wait for inclusion and finalization before continuing scanning. @@ -122,7 +122,7 @@ The following timeline is established: Once all the 6 hour period has expired, no `Eventuality`s remain, and all outputs are forwarded, the multisig publishes a final `Batch` of the first - block, plus `CONFIRMATIONS`, which met these conditions, regardless of if it + block, plus `WINDOW_LENGTH`, which met these conditions, regardless of if it would've otherwise had a `Batch`. No further actions by it, nor its validators, are expected (unless, of course, those validators remain present in the new multisig). From ea9cd4893a9c8e392ff656f95fc070705958b5b5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 13 Sep 2024 00:48:57 -0400 Subject: [PATCH 114/179] Misc continuances on the Monero processor --- Cargo.lock | 2 +- processor/bitcoin/src/rpc.rs | 2 + processor/key-gen/src/lib.rs | 6 +- processor/monero/Cargo.toml | 1 + processor/monero/src/key_gen.rs | 5 +- processor/monero/src/main.rs | 2 +- processor/monero/src/rpc.rs | 117 ++++++------------------------ processor/monero/src/scheduler.rs | 31 ++------ processor/scanner/src/lib.rs | 6 ++ 9 files changed, 46 insertions(+), 126 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec3ccf8b2..b3419a85e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5105,7 +5105,6 @@ dependencies = [ "hex", "modular-frost", "monero-address", - "monero-clsag", "monero-rpc", "monero-serai", "monero-simple-request-rpc", @@ -8524,6 +8523,7 @@ dependencies = [ "hex", "log", "modular-frost", + "monero-simple-request-rpc", "monero-wallet", "parity-scale-codec", "rand_core", diff --git a/processor/bitcoin/src/rpc.rs b/processor/bitcoin/src/rpc.rs index a6f6e5fd8..23db55703 100644 --- a/processor/bitcoin/src/rpc.rs +++ b/processor/bitcoin/src/rpc.rs @@ -21,7 +21,9 @@ pub(crate) struct Rpc { #[async_trait::async_trait] impl ScannerFeed for Rpc { const NETWORK: NetworkId = NetworkId::Bitcoin; + // 6 confirmations is widely accepted as secure and shouldn't occur const CONFIRMATIONS: u64 = 6; + // The window length should be roughly an hour const WINDOW_LENGTH: u64 = 6; const TEN_MINUTES: u64 = 1; diff --git a/processor/key-gen/src/lib.rs b/processor/key-gen/src/lib.rs index fd847cc55..4db87b201 100644 --- a/processor/key-gen/src/lib.rs +++ b/processor/key-gen/src/lib.rs @@ -43,7 +43,11 @@ pub trait KeyGenParams { >; /// Tweaks keys as necessary/beneficial. - fn tweak_keys(keys: &mut ThresholdKeys); + /// + /// A default implementation which doesn't perform any tweaking is provided. + fn tweak_keys(keys: &mut ThresholdKeys) { + let _ = keys; + } /// Encode keys as optimal. /// diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml index 5538d025c..f70d6187e 100644 --- a/processor/monero/Cargo.toml +++ b/processor/monero/Cargo.toml @@ -31,6 +31,7 @@ dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } monero-wallet = { path = "../../networks/monero/wallet", default-features = false, features = ["std", "multisig"] } +monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request", default-features = false } serai-client = { path = "../../substrate/client", default-features = false, features = ["monero"] } diff --git a/processor/monero/src/key_gen.rs b/processor/monero/src/key_gen.rs index dee330293..6e30d7bf3 100644 --- a/processor/monero/src/key_gen.rs +++ b/processor/monero/src/key_gen.rs @@ -1,11 +1,8 @@ -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ed25519}; -use frost::ThresholdKeys; +use ciphersuite::Ed25519; pub(crate) struct KeyGenParams; impl key_gen::KeyGenParams for KeyGenParams { const ID: &'static str = "Monero"; type ExternalNetworkCiphersuite = Ed25519; - - fn tweak_keys(keys: &mut ThresholdKeys) {} } diff --git a/processor/monero/src/main.rs b/processor/monero/src/main.rs index 41896de11..eda24b566 100644 --- a/processor/monero/src/main.rs +++ b/processor/monero/src/main.rs @@ -11,11 +11,11 @@ use monero_wallet::rpc::Rpc as MRpc; mod primitives; pub(crate) use crate::primitives::*; -/* mod key_gen; use crate::key_gen::KeyGenParams; mod rpc; use rpc::Rpc; +/* mod scheduler; use scheduler::Scheduler; diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs index a6f6e5fd8..21a202cc2 100644 --- a/processor/monero/src/rpc.rs +++ b/processor/monero/src/rpc.rs @@ -1,81 +1,43 @@ -use bitcoin_serai::rpc::{RpcError, Rpc as BRpc}; +use monero_wallet::rpc::{RpcError, Rpc as RpcTrait}; +use monero_simple_request_rpc::SimpleRequestRpc; use serai_client::primitives::{NetworkId, Coin, Amount}; -use serai_db::Db; use scanner::ScannerFeed; use signers::TransactionPublisher; use crate::{ - db, transaction::Transaction, block::{BlockHeader, Block}, }; #[derive(Clone)] -pub(crate) struct Rpc { - pub(crate) db: D, - pub(crate) rpc: BRpc, +pub(crate) struct Rpc { + pub(crate) rpc: SimpleRequestRpc, } #[async_trait::async_trait] -impl ScannerFeed for Rpc { - const NETWORK: NetworkId = NetworkId::Bitcoin; - const CONFIRMATIONS: u64 = 6; - const WINDOW_LENGTH: u64 = 6; +impl ScannerFeed for Rpc { + const NETWORK: NetworkId = NetworkId::Monero; + // Outputs aren't spendable until 10 blocks later due to the 10-block lock + // Since we assumed scanned outputs are spendable, that sets a minimum confirmation depth of 10 + // A 10-block reorganization hasn't been observed in years and shouldn't occur + const CONFIRMATIONS: u64 = 10; + // The window length should be roughly an hour + const WINDOW_LENGTH: u64 = 30; - const TEN_MINUTES: u64 = 1; + const TEN_MINUTES: u64 = 5; - type Block = Block; + type Block = Block; type EphemeralError = RpcError; async fn latest_finalized_block_number(&self) -> Result { - db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) + Ok(self.rpc.get_height().await?.checked_sub(1).expect("connected to an invalid Monero RPC").try_into().unwrap()) } async fn time_of_block(&self, number: u64) -> Result { - let number = usize::try_from(number).unwrap(); - - /* - The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the - median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve - CLTV). This creates a monotonic median time which we use as the block time. - */ - // This implements `GetMedianTimePast` - let median = { - const MEDIAN_TIMESPAN: usize = 11; - let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN); - for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number { - timestamps.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time); - } - timestamps.sort(); - timestamps[timestamps.len() / 2] - }; - - /* - This block's timestamp is guaranteed to be greater than this median: - https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9 - /src/validation.cpp#L4182-L4184 - - This does not guarantee the median always increases however. Take the following trivial - example, as the window is initially built: - - 0 block has time 0 // Prior blocks: [] - 1 block has time 1 // Prior blocks: [0] - 2 block has time 2 // Prior blocks: [0, 1] - 3 block has time 2 // Prior blocks: [0, 1, 2] - - These two blocks have the same time (both greater than the median of their prior blocks) and - the same median. - - The median will never decrease however. The values pushed onto the window will always be - greater than the median. If a value greater than the median is popped, the median will remain - the same (due to the counterbalance of the pushed value). If a value less than the median is - popped, the median will increase (either to another instance of the same value, yet one - closer to the end of the repeating sequence, or to a higher value). - */ - Ok(median.into()) + todo!("TODO") } async fn unchecked_block_header_by_number( @@ -83,7 +45,7 @@ impl ScannerFeed for Rpc { number: u64, ) -> Result<::Header, Self::EphemeralError> { Ok(BlockHeader( - self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?.header, + self.rpc.get_block_by_number(number.try_into().unwrap()).await? )) } @@ -91,48 +53,13 @@ impl ScannerFeed for Rpc { &self, number: u64, ) -> Result { - Ok(Block( - self.db.clone(), - self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?, - )) + todo!("TODO") } fn dust(coin: Coin) -> Amount { - assert_eq!(coin, Coin::Bitcoin); + assert_eq!(coin, Coin::Monero); - /* - A Taproot input is: - - 36 bytes for the OutPoint - - 0 bytes for the script (+1 byte for the length) - - 4 bytes for the sequence - Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format - - There's also: - - 1 byte for the witness length - - 1 byte for the signature length - - 64 bytes for the signature - which have the SegWit discount. - - (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units - 230 ceil div 4 = 57 vbytes - - Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are: - - 1000 sat/kilo-vbyte for a transaction to be relayed - - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte - The DUST constant needs to be determined by the latter. - Since these are solely relay rules, and may be raised, we require all outputs be spendable - under a 5000 sat/kilo-vbyte fee rate. - - 5000 sat/kilo-vbyte = 5 sat/vbyte - 5 * 57 = 285 sats/spent-output - - Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding - 100 vbytes, tripling the transaction size, then the sats/tx would be < 1000. - - Increase by an order of magnitude, in order to ensure this is actually worth our time, and we - get 10,000 satoshis. This is $5 if 1 BTC = 50,000 USD. - */ - Amount(10_000) + todo!("TODO") } async fn cost_to_aggregate( @@ -147,10 +74,10 @@ impl ScannerFeed for Rpc { } #[async_trait::async_trait] -impl TransactionPublisher for Rpc { +impl TransactionPublisher for Rpc { type EphemeralError = RpcError; async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> { - self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) + self.rpc.publish_transaction(&tx.0).await } } diff --git a/processor/monero/src/scheduler.rs b/processor/monero/src/scheduler.rs index 6e49d23d9..25f17c641 100644 --- a/processor/monero/src/scheduler.rs +++ b/processor/monero/src/scheduler.rs @@ -14,7 +14,6 @@ use serai_db::Db; use primitives::{OutputType, ReceivedOutput, Payment}; use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; -use transaction_chaining_scheduler::{EffectedReceivedOutputs, Scheduler as GenericScheduler}; use crate::{ scan::{offsets_for_key, scanner}, @@ -40,11 +39,11 @@ fn signable_transaction( ) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> { assert!( inputs.len() < - , EffectedReceivedOutputs>>>::MAX_INPUTS + , ()>>::MAX_INPUTS ); assert!( (payments.len() + usize::from(u8::from(change.is_some()))) < - , EffectedReceivedOutputs>>>::MAX_OUTPUTS + , ()>>::MAX_OUTPUTS ); let inputs = inputs.into_iter().map(|input| input.output).collect::>(); @@ -73,7 +72,7 @@ fn signable_transaction( )); let change = change - .map(, EffectedReceivedOutputs>>>::change_address); + .map(, ()>>::change_address); BSignableTransaction::new( inputs.clone(), @@ -90,7 +89,7 @@ fn signable_transaction( } pub(crate) struct Planner; -impl TransactionPlanner, EffectedReceivedOutputs>> for Planner { +impl TransactionPlanner for Planner { type FeeRate = u64; type SignableTransaction = SignableTransaction; @@ -157,7 +156,7 @@ impl TransactionPlanner, EffectedReceivedOutputs>> for Plan inputs: Vec>>, payments: Vec>>>, change: Option>>, - ) -> PlannedTransaction, Self::SignableTransaction, EffectedReceivedOutputs>> { + ) -> PlannedTransaction, Self::SignableTransaction, ()> { let key = inputs.first().unwrap().key(); for input in &inputs { assert_eq!(key, input.key()); @@ -168,23 +167,7 @@ impl TransactionPlanner, EffectedReceivedOutputs>> for Plan Ok(tx) => PlannedTransaction { signable: tx.0, eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, - auxilliary: EffectedReceivedOutputs({ - let tx = tx.1.transaction(); - let scanner = scanner(key); - - let mut res = vec![]; - for output in scanner.scan_transaction(tx) { - res.push(Output::new_with_presumed_origin( - key, - tx, - // It shouldn't matter if this is wrong as we should never try to return these - // We still provide an accurate value to ensure a lack of discrepancies - Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()), - output, - )); - } - res - }), + auxilliary: (), }, Err( TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, @@ -202,4 +185,4 @@ impl TransactionPlanner, EffectedReceivedOutputs>> for Plan } } -pub(crate) type Scheduler = GenericScheduler, Planner>; +pub(crate) type Scheduler = utxo_standard_scheduler::Scheduler; diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index ebd783bfa..d100815de 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -67,6 +67,12 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// The amount of confirmations a block must have to be considered finalized. /// /// This value must be at least `1`. + // This is distinct from `WINDOW_LENGTH` as it's only used for determining the lifetime of the + // key. The key switches to various stages of its lifetime depending on when user transactions + // will hit the Serai network (relative to the time they're made) and when outputs created by + // Serai become available again. If we set a long WINDOW_LENGTH, say two hours, that doesn't mean + // we expect user transactions made within a few minutes of a new key being declared to only + // appear in finalized blocks two hours later. const CONFIRMATIONS: u64; /// The amount of blocks to process in parallel. From a9692401b33a7faa4c4fabe4c59153e0cd4bc3da Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 13 Sep 2024 01:14:47 -0400 Subject: [PATCH 115/179] Remove async-trait from processor/ Part of https://github.com/serai-dex/issues/607. --- processor/bin/Cargo.toml | 1 - processor/bin/src/coordinator.rs | 84 ++-- processor/bitcoin/Cargo.toml | 1 - processor/bitcoin/src/main.rs | 2 - processor/bitcoin/src/primitives/block.rs | 1 - processor/bitcoin/src/rpc.rs | 151 +++--- processor/bitcoin/src/txindex.rs | 148 +++--- processor/ethereum/Cargo.toml | 2 - processor/monero/Cargo.toml | 1 - processor/monero/src/primitives/block.rs | 1 - processor/monero/src/rpc.rs | 63 ++- processor/primitives/Cargo.toml | 2 - processor/primitives/src/block.rs | 1 - processor/primitives/src/task.rs | 95 ++-- processor/scanner/Cargo.toml | 3 - processor/scanner/src/eventuality/mod.rs | 544 +++++++++++----------- processor/scanner/src/index/mod.rs | 94 ++-- processor/scanner/src/lib.rs | 32 +- processor/scanner/src/report/mod.rs | 204 ++++---- processor/scanner/src/scan/mod.rs | 479 +++++++++---------- processor/scanner/src/substrate/mod.rs | 182 ++++---- processor/signers/Cargo.toml | 3 +- processor/signers/src/batch/mod.rs | 192 ++++---- processor/signers/src/coordinator/mod.rs | 228 ++++----- processor/signers/src/cosign/mod.rs | 115 ++--- processor/signers/src/lib.rs | 27 +- processor/signers/src/slash_report.rs | 116 ++--- processor/signers/src/transaction/mod.rs | 5 +- processor/src/tests/scanner.rs | 2 +- 29 files changed, 1441 insertions(+), 1338 deletions(-) diff --git a/processor/bin/Cargo.toml b/processor/bin/Cargo.toml index 01a774ac5..f6da8b7c3 100644 --- a/processor/bin/Cargo.toml +++ b/processor/bin/Cargo.toml @@ -17,7 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } zeroize = { version = "1", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] } diff --git a/processor/bin/src/coordinator.rs b/processor/bin/src/coordinator.rs index ead4a131a..6fe5aea08 100644 --- a/processor/bin/src/coordinator.rs +++ b/processor/bin/src/coordinator.rs @@ -1,3 +1,4 @@ +use core::future::Future; use std::sync::{LazyLock, Arc, Mutex}; use tokio::sync::mpsc; @@ -169,59 +170,74 @@ impl Coordinator { } } -#[async_trait::async_trait] impl signers::Coordinator for CoordinatorSend { type EphemeralError = (); - async fn send( + fn send( &mut self, msg: messages::sign::ProcessorMessage, - ) -> Result<(), Self::EphemeralError> { - self.send(&messages::ProcessorMessage::Sign(msg)); - Ok(()) + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Sign(msg)); + Ok(()) + } } - async fn publish_cosign( + fn publish_cosign( &mut self, block_number: u64, block: [u8; 32], signature: Signature, - ) -> Result<(), Self::EphemeralError> { - self.send(&messages::ProcessorMessage::Coordinator( - messages::coordinator::ProcessorMessage::CosignedBlock { - block_number, - block, - signature: signature.encode(), - }, - )); - Ok(()) + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::CosignedBlock { + block_number, + block, + signature: signature.encode(), + }, + )); + Ok(()) + } } - async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError> { - self.send(&messages::ProcessorMessage::Substrate( - messages::substrate::ProcessorMessage::Batch { batch }, - )); - Ok(()) + fn publish_batch( + &mut self, + batch: Batch, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Substrate( + messages::substrate::ProcessorMessage::Batch { batch }, + )); + Ok(()) + } } - async fn publish_signed_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError> { - self.send(&messages::ProcessorMessage::Coordinator( - messages::coordinator::ProcessorMessage::SignedBatch { batch }, - )); - Ok(()) + fn publish_signed_batch( + &mut self, + batch: SignedBatch, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedBatch { batch }, + )); + Ok(()) + } } - async fn publish_slash_report_signature( + fn publish_slash_report_signature( &mut self, session: Session, signature: Signature, - ) -> Result<(), Self::EphemeralError> { - self.send(&messages::ProcessorMessage::Coordinator( - messages::coordinator::ProcessorMessage::SignedSlashReport { - session, - signature: signature.encode(), - }, - )); - Ok(()) + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedSlashReport { + session, + signature: signature.encode(), + }, + )); + Ok(()) + } } } diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index 2d4958c7d..52cca1ae1 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -17,7 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } rand_core = { version = "0.6", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index 74e174ee2..56bfd619a 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -96,7 +96,6 @@ use serai_client::{ */ /* -#[async_trait] impl TransactionTrait for Transaction { #[cfg(test)] async fn fee(&self, network: &Bitcoin) -> u64 { @@ -210,7 +209,6 @@ impl Bitcoin { } } -#[async_trait] impl Network for Bitcoin { // 2 inputs should be 2 * 230 = 460 weight units // The output should be ~36 bytes, or 144 weight units diff --git a/processor/bitcoin/src/primitives/block.rs b/processor/bitcoin/src/primitives/block.rs index 8221c8b56..e3df7e693 100644 --- a/processor/bitcoin/src/primitives/block.rs +++ b/processor/bitcoin/src/primitives/block.rs @@ -31,7 +31,6 @@ impl fmt::Debug for Block { } } -#[async_trait::async_trait] impl primitives::Block for Block { type Header = BlockHeader; diff --git a/processor/bitcoin/src/rpc.rs b/processor/bitcoin/src/rpc.rs index 23db55703..acd3be858 100644 --- a/processor/bitcoin/src/rpc.rs +++ b/processor/bitcoin/src/rpc.rs @@ -1,3 +1,5 @@ +use core::future::Future; + use bitcoin_serai::rpc::{RpcError, Rpc as BRpc}; use serai_client::primitives::{NetworkId, Coin, Amount}; @@ -18,7 +20,6 @@ pub(crate) struct Rpc { pub(crate) rpc: BRpc, } -#[async_trait::async_trait] impl ScannerFeed for Rpc { const NETWORK: NetworkId = NetworkId::Bitcoin; // 6 confirmations is widely accepted as secure and shouldn't occur @@ -32,71 +33,89 @@ impl ScannerFeed for Rpc { type EphemeralError = RpcError; - async fn latest_finalized_block_number(&self) -> Result { - db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future> { + async move { db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) } } - async fn time_of_block(&self, number: u64) -> Result { - let number = usize::try_from(number).unwrap(); - - /* - The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the - median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve - CLTV). This creates a monotonic median time which we use as the block time. - */ - // This implements `GetMedianTimePast` - let median = { - const MEDIAN_TIMESPAN: usize = 11; - let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN); - for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number { - timestamps.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time); - } - timestamps.sort(); - timestamps[timestamps.len() / 2] - }; - - /* - This block's timestamp is guaranteed to be greater than this median: - https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9 - /src/validation.cpp#L4182-L4184 - - This does not guarantee the median always increases however. Take the following trivial - example, as the window is initially built: - - 0 block has time 0 // Prior blocks: [] - 1 block has time 1 // Prior blocks: [0] - 2 block has time 2 // Prior blocks: [0, 1] - 3 block has time 2 // Prior blocks: [0, 1, 2] - - These two blocks have the same time (both greater than the median of their prior blocks) and - the same median. - - The median will never decrease however. The values pushed onto the window will always be - greater than the median. If a value greater than the median is popped, the median will remain - the same (due to the counterbalance of the pushed value). If a value less than the median is - popped, the median will increase (either to another instance of the same value, yet one - closer to the end of the repeating sequence, or to a higher value). - */ - Ok(median.into()) + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + let number = usize::try_from(number).unwrap(); + + /* + The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the + median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve + CLTV). This creates a monotonic median time which we use as the block time. + */ + // This implements `GetMedianTimePast` + let median = { + const MEDIAN_TIMESPAN: usize = 11; + let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN); + for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number { + timestamps + .push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time); + } + timestamps.sort(); + timestamps[timestamps.len() / 2] + }; + + /* + This block's timestamp is guaranteed to be greater than this median: + https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9 + /src/validation.cpp#L4182-L4184 + + This does not guarantee the median always increases however. Take the following trivial + example, as the window is initially built: + + 0 block has time 0 // Prior blocks: [] + 1 block has time 1 // Prior blocks: [0] + 2 block has time 2 // Prior blocks: [0, 1] + 3 block has time 2 // Prior blocks: [0, 1, 2] + + These two blocks have the same time (both greater than the median of their prior blocks) and + the same median. + + The median will never decrease however. The values pushed onto the window will always be + greater than the median. If a value greater than the median is popped, the median will + remain the same (due to the counterbalance of the pushed value). If a value less than the + median is popped, the median will increase (either to another instance of the same value, + yet one closer to the end of the repeating sequence, or to a higher value). + */ + Ok(median.into()) + } } - async fn unchecked_block_header_by_number( + fn unchecked_block_header_by_number( &self, number: u64, - ) -> Result<::Header, Self::EphemeralError> { - Ok(BlockHeader( - self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?.header, - )) + ) -> impl Send + + Future::Header, Self::EphemeralError>> + { + async move { + Ok(BlockHeader( + self + .rpc + .get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?) + .await? + .header, + )) + } } - async fn unchecked_block_by_number( + fn unchecked_block_by_number( &self, number: u64, - ) -> Result { - Ok(Block( - self.db.clone(), - self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?, - )) + ) -> impl Send + Future> { + async move { + Ok(Block( + self.db.clone(), + self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?, + )) + } } fn dust(coin: Coin) -> Amount { @@ -137,22 +156,26 @@ impl ScannerFeed for Rpc { Amount(10_000) } - async fn cost_to_aggregate( + fn cost_to_aggregate( &self, coin: Coin, _reference_block: &Self::Block, - ) -> Result { - assert_eq!(coin, Coin::Bitcoin); - // TODO - Ok(Amount(0)) + ) -> impl Send + Future> { + async move { + assert_eq!(coin, Coin::Bitcoin); + // TODO + Ok(Amount(0)) + } } } -#[async_trait::async_trait] impl TransactionPublisher for Rpc { type EphemeralError = RpcError; - async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> { - self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) + fn publish( + &self, + tx: Transaction, + ) -> impl Send + Future> { + async move { self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) } } } diff --git a/processor/bitcoin/src/txindex.rs b/processor/bitcoin/src/txindex.rs index 4ed389732..6a55a4c46 100644 --- a/processor/bitcoin/src/txindex.rs +++ b/processor/bitcoin/src/txindex.rs @@ -1,18 +1,4 @@ -/* - We want to be able to return received outputs. We do that by iterating over the inputs to find an - address format we recognize, then setting that address as the address to return to. - - Since inputs only contain the script signatures, yet addresses are for script public keys, we - need to pull up the output spent by an input and read the script public key from that. While we - could use `txindex=1`, and an asynchronous call to the Bitcoin node, we: - - 1) Can maintain a much smaller index ourselves - 2) Don't want the asynchronous call (which would require the flow be async, allowed to - potentially error, and more latent) - 3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet) - - This task builds that index. -*/ +use core::future::Future; use bitcoin_serai::bitcoin::ScriptBuf; @@ -35,72 +21,88 @@ pub(crate) fn script_pubkey_for_on_chain_output( ) } +/* + We want to be able to return received outputs. We do that by iterating over the inputs to find an + address format we recognize, then setting that address as the address to return to. + + Since inputs only contain the script signatures, yet addresses are for script public keys, we + need to pull up the output spent by an input and read the script public key from that. While we + could use `txindex=1`, and an asynchronous call to the Bitcoin node, we: + + 1) Can maintain a much smaller index ourselves + 2) Don't want the asynchronous call (which would require the flow be async, allowed to + potentially error, and more latent) + 3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet) + + This task builds that index. +*/ pub(crate) struct TxIndexTask(pub(crate) Rpc); -#[async_trait::async_trait] impl ContinuallyRan for TxIndexTask { - async fn run_iteration(&mut self) -> Result { - let latest_block_number = self - .0 - .rpc - .get_latest_block_number() - .await - .map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?; - let latest_block_number = u64::try_from(latest_block_number).unwrap(); - // `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself) - let finalized_block_number = - latest_block_number.checked_sub(Rpc::::CONFIRMATIONS - 1).ok_or(format!( - "blockchain only just started and doesn't have {} blocks yet", - Rpc::::CONFIRMATIONS - ))?; - - /* - `finalized_block_number` is the latest block number minus confirmations. The blockchain may - undetectably re-organize though, as while the scanner will maintain an index of finalized - blocks and panics on reorganization, this runs prior to the scanner and that index. - - A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this - saves the script public keys *by the transaction hash an output index*. Accordingly, it isn't - invalidated on reorganization. The only risk would be if the new chain reorganized to - include a transaction to Serai which we didn't index the parents of. If that happens, we'll - panic when we scan the transaction, causing the invariant to be detected. - */ - - let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db); - let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1); - - let mut iterated = false; - for b in next_block ..= finalized_block_number { - iterated = true; - - // Fetch the block - let block_hash = self - .0 - .rpc - .get_block_hash(b.try_into().unwrap()) - .await - .map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?; - let block = self + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let latest_block_number = self .0 .rpc - .get_block(&block_hash) + .get_latest_block_number() .await - .map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?; - - let mut txn = self.0.db.txn(); - - for tx in &block.txdata { - let txid = hash_bytes(tx.compute_txid().to_raw_hash()); - for (o, output) in tx.output.iter().enumerate() { - let o = u32::try_from(o).unwrap(); - // Set the script public key for this transaction - db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes()); + .map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?; + let latest_block_number = u64::try_from(latest_block_number).unwrap(); + // `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself) + let finalized_block_number = + latest_block_number.checked_sub(Rpc::::CONFIRMATIONS - 1).ok_or(format!( + "blockchain only just started and doesn't have {} blocks yet", + Rpc::::CONFIRMATIONS + ))?; + + /* + `finalized_block_number` is the latest block number minus confirmations. The blockchain may + undetectably re-organize though, as while the scanner will maintain an index of finalized + blocks and panics on reorganization, this runs prior to the scanner and that index. + + A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this + saves the script public keys *by the transaction hash an output index*. Accordingly, it + isn't invalidated on reorganization. The only risk would be if the new chain reorganized to + include a transaction to Serai which we didn't index the parents of. If that happens, we'll + panic when we scan the transaction, causing the invariant to be detected. + */ + + let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db); + let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1); + + let mut iterated = false; + for b in next_block ..= finalized_block_number { + iterated = true; + + // Fetch the block + let block_hash = self + .0 + .rpc + .get_block_hash(b.try_into().unwrap()) + .await + .map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?; + let block = self + .0 + .rpc + .get_block(&block_hash) + .await + .map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?; + + let mut txn = self.0.db.txn(); + + for tx in &block.txdata { + let txid = hash_bytes(tx.compute_txid().to_raw_hash()); + for (o, output) in tx.output.iter().enumerate() { + let o = u32::try_from(o).unwrap(); + // Set the script public key for this transaction + db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes()); + } } - } - db::LatestBlockToYieldAsFinalized::set(&mut txn, &b); - txn.commit(); + db::LatestBlockToYieldAsFinalized::set(&mut txn, &b); + txn.commit(); + } + Ok(iterated) } - Ok(iterated) } } diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml index eff47af96..ea65d570d 100644 --- a/processor/ethereum/Cargo.toml +++ b/processor/ethereum/Cargo.toml @@ -17,8 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } - const-hex = { version = "1", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml index f70d6187e..22137b2db 100644 --- a/processor/monero/Cargo.toml +++ b/processor/monero/Cargo.toml @@ -17,7 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } rand_core = { version = "0.6", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } diff --git a/processor/monero/src/primitives/block.rs b/processor/monero/src/primitives/block.rs index 40d0f2969..ad28b0c17 100644 --- a/processor/monero/src/primitives/block.rs +++ b/processor/monero/src/primitives/block.rs @@ -24,7 +24,6 @@ impl primitives::BlockHeader for BlockHeader { #[derive(Clone, Debug)] pub(crate) struct Block(pub(crate) MBlock, Vec); -#[async_trait::async_trait] impl primitives::Block for Block { type Header = BlockHeader; diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs index 21a202cc2..0e0739b8b 100644 --- a/processor/monero/src/rpc.rs +++ b/processor/monero/src/rpc.rs @@ -1,3 +1,5 @@ +use core::future::Future; + use monero_wallet::rpc::{RpcError, Rpc as RpcTrait}; use monero_simple_request_rpc::SimpleRequestRpc; @@ -16,7 +18,6 @@ pub(crate) struct Rpc { pub(crate) rpc: SimpleRequestRpc, } -#[async_trait::async_trait] impl ScannerFeed for Rpc { const NETWORK: NetworkId = NetworkId::Monero; // Outputs aren't spendable until 10 blocks later due to the 10-block lock @@ -32,28 +33,44 @@ impl ScannerFeed for Rpc { type EphemeralError = RpcError; - async fn latest_finalized_block_number(&self) -> Result { - Ok(self.rpc.get_height().await?.checked_sub(1).expect("connected to an invalid Monero RPC").try_into().unwrap()) + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future> { + async move { + Ok( + self + .rpc + .get_height() + .await? + .checked_sub(1) + .expect("connected to an invalid Monero RPC") + .try_into() + .unwrap(), + ) + } } - async fn time_of_block(&self, number: u64) -> Result { - todo!("TODO") + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future> { + async move{todo!("TODO")} } - async fn unchecked_block_header_by_number( + fn unchecked_block_header_by_number( &self, number: u64, - ) -> Result<::Header, Self::EphemeralError> { - Ok(BlockHeader( - self.rpc.get_block_by_number(number.try_into().unwrap()).await? - )) + ) -> impl Send + + Future::Header, Self::EphemeralError>> + { + async move { Ok(BlockHeader(self.rpc.get_block_by_number(number.try_into().unwrap()).await?)) } } - async fn unchecked_block_by_number( + fn unchecked_block_by_number( &self, number: u64, - ) -> Result { - todo!("TODO") + ) -> impl Send + Future> { + async move { todo!("TODO") } } fn dust(coin: Coin) -> Amount { @@ -62,22 +79,26 @@ impl ScannerFeed for Rpc { todo!("TODO") } - async fn cost_to_aggregate( + fn cost_to_aggregate( &self, coin: Coin, _reference_block: &Self::Block, - ) -> Result { - assert_eq!(coin, Coin::Bitcoin); - // TODO - Ok(Amount(0)) + ) -> impl Send + Future> { + async move { + assert_eq!(coin, Coin::Bitcoin); + // TODO + Ok(Amount(0)) + } } } -#[async_trait::async_trait] impl TransactionPublisher for Rpc { type EphemeralError = RpcError; - async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> { - self.rpc.publish_transaction(&tx.0).await + fn publish( + &self, + tx: Transaction, + ) -> impl Send + Future> { + async move { self.rpc.publish_transaction(&tx.0).await } } } diff --git a/processor/primitives/Cargo.toml b/processor/primitives/Cargo.toml index dd1b74ea8..6dd3082b5 100644 --- a/processor/primitives/Cargo.toml +++ b/processor/primitives/Cargo.toml @@ -17,8 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } - group = { version = "0.13", default-features = false } serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs index 4f721d027..da4812470 100644 --- a/processor/primitives/src/block.rs +++ b/processor/primitives/src/block.rs @@ -22,7 +22,6 @@ pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { /// necessary to literally define it as whatever the external network defines as a block. For /// external networks which finalize block(s), this block type should be a representation of all /// transactions within a period finalization (whether block or epoch). -#[async_trait::async_trait] pub trait Block: Send + Sync + Sized + Clone + Debug { /// The type used for this block's header. type Header: BlockHeader; diff --git a/processor/primitives/src/task.rs b/processor/primitives/src/task.rs index a40fb9ff1..e8efc64ca 100644 --- a/processor/primitives/src/task.rs +++ b/processor/primitives/src/task.rs @@ -1,4 +1,4 @@ -use core::time::Duration; +use core::{future::Future, time::Duration}; use std::sync::Arc; use tokio::sync::{mpsc, oneshot, Mutex}; @@ -78,8 +78,7 @@ impl TaskHandle { } /// A task to be continually ran. -#[async_trait::async_trait] -pub trait ContinuallyRan: Sized { +pub trait ContinuallyRan: Sized + Send { /// The amount of seconds before this task should be polled again. const DELAY_BETWEEN_ITERATIONS: u64 = 5; /// The maximum amount of seconds before this task should be run again. @@ -91,60 +90,66 @@ pub trait ContinuallyRan: Sized { /// /// If this returns `true`, all dependents of the task will immediately have a new iteration ran /// (without waiting for whatever timer they were already on). - async fn run_iteration(&mut self) -> Result; + fn run_iteration(&mut self) -> impl Send + Future>; /// Continually run the task. - async fn continually_run(mut self, mut task: Task, dependents: Vec) { - // The default number of seconds to sleep before running the task again - let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS; - // The current number of seconds to sleep before running the task again - // We increment this upon errors in order to not flood the logs with errors - let mut current_sleep_before_next_task = default_sleep_before_next_task; - let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| { - let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task; - // Set a limit of sleeping for two minutes - *current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS); - }; + fn continually_run( + mut self, + mut task: Task, + dependents: Vec, + ) -> impl Send + Future { + async move { + // The default number of seconds to sleep before running the task again + let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS; + // The current number of seconds to sleep before running the task again + // We increment this upon errors in order to not flood the logs with errors + let mut current_sleep_before_next_task = default_sleep_before_next_task; + let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| { + let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task; + // Set a limit of sleeping for two minutes + *current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS); + }; - loop { - // If we were told to close/all handles were dropped, drop it - { - let should_close = task.close.try_recv(); - match should_close { - Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break, - Err(mpsc::error::TryRecvError::Empty) => {} + loop { + // If we were told to close/all handles were dropped, drop it + { + let should_close = task.close.try_recv(); + match should_close { + Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break, + Err(mpsc::error::TryRecvError::Empty) => {} + } } - } - match self.run_iteration().await { - Ok(run_dependents) => { - // Upon a successful (error-free) loop iteration, reset the amount of time we sleep - current_sleep_before_next_task = default_sleep_before_next_task; + match self.run_iteration().await { + Ok(run_dependents) => { + // Upon a successful (error-free) loop iteration, reset the amount of time we sleep + current_sleep_before_next_task = default_sleep_before_next_task; - if run_dependents { - for dependent in &dependents { - dependent.run_now(); + if run_dependents { + for dependent in &dependents { + dependent.run_now(); + } } } + Err(e) => { + log::warn!("{}", e); + increase_sleep_before_next_task(&mut current_sleep_before_next_task); + } } - Err(e) => { - log::warn!("{}", e); - increase_sleep_before_next_task(&mut current_sleep_before_next_task); + + // Don't run the task again for another few seconds UNLESS told to run now + tokio::select! { + () = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {}, + msg = task.run_now.recv() => { + // Check if this is firing because the handle was dropped + if msg.is_none() { + break; + } + }, } } - // Don't run the task again for another few seconds UNLESS told to run now - tokio::select! { - () = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {}, - msg = task.run_now.recv() => { - // Check if this is firing because the handle was dropped - if msg.is_none() { - break; - } - }, - } + task.closed.send(()).unwrap(); } - - task.closed.send(()).unwrap(); } } diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index e3e083296..1ff154cd8 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -17,9 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -# Macros -async-trait = { version = "0.1", default-features = false } - # Encoders hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 5d139c6de..46a5e13b3 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -1,4 +1,4 @@ -use core::marker::PhantomData; +use core::{marker::PhantomData, future::Future}; use std::collections::{HashSet, HashMap}; use group::GroupEncoding; @@ -185,317 +185,323 @@ impl> EventualityTask { } } -#[async_trait::async_trait] impl> ContinuallyRan for EventualityTask { - async fn run_iteration(&mut self) -> Result { - // Fetch the highest acknowledged block - let Some(highest_acknowledged) = ScannerGlobalDb::::highest_acknowledged_block(&self.db) - else { - // If we've never acknowledged a block, return - return Ok(false); - }; + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Fetch the highest acknowledged block + let Some(highest_acknowledged) = ScannerGlobalDb::::highest_acknowledged_block(&self.db) + else { + // If we've never acknowledged a block, return + return Ok(false); + }; + + // A boolean of if we've made any progress to return at the end of the function + let mut made_progress = false; + + // Start by intaking any Burns we have sitting around + // It's important we run this regardless of if we have a new block to handle + made_progress |= self.intake_burns().await?; + + /* + Eventualities increase upon one of two cases: + + 1) We're fulfilling Burns + 2) We acknowledged a block + + We can't know the processor has intaked all Burns it should have when we process block `b`. + We solve this by executing a consensus protocol whenever a resolution for an Eventuality + created to fulfill Burns occurs. Accordingly, we force ourselves to obtain synchrony on + such blocks (and all preceding Burns). + + This means we can only iterate up to the block currently pending acknowledgement. + + We only know blocks will need acknowledgement *for sure* if they were scanned. The only + other causes are key activation and retirement (both scheduled outside the scan window). + This makes the exclusive upper bound the *next block to scan*. + */ + let exclusive_upper_bound = { + // Fetch the next to scan block + let next_to_scan = next_to_scan_for_outputs_block::(&self.db) + .expect("EventualityTask run before writing the start block"); + // If we haven't done any work, return + if next_to_scan == 0 { + return Ok(false); + } + next_to_scan + }; - // A boolean of if we've made any progress to return at the end of the function - let mut made_progress = false; + // Fetch the next block to check + let next_to_check = EventualityDb::::next_to_check_for_eventualities_block(&self.db) + .expect("EventualityTask run before writing the start block"); - // Start by intaking any Burns we have sitting around - // It's important we run this regardless of if we have a new block to handle - made_progress |= self.intake_burns().await?; + // Check all blocks + for b in next_to_check .. exclusive_upper_bound { + let is_block_notable = ScannerGlobalDb::::is_block_notable(&self.db, b); + if is_block_notable { + /* + If this block is notable *and* not acknowledged, break. - /* - Eventualities increase upon one of two cases: + This is so if Burns queued prior to this block's acknowledgement caused any + Eventualities (which may resolve this block), we have them. If it wasn't for that, it'd + be so if this block's acknowledgement caused any Eventualities, we have them, though + those would only potentially resolve in the next block (letting us scan this block + without delay). + */ + if b > highest_acknowledged { + break; + } - 1) We're fulfilling Burns - 2) We acknowledged a block + // Since this block is notable, ensure we've intaked all the Burns preceding it + // We can know with certainty that the channel is fully populated at this time since + // we've acknowledged a newer block (so we've handled the state up to this point and any + // new state will be for the newer block) + #[allow(unused_assignments)] + { + made_progress |= self.intake_burns().await?; + } + } - We can't know the processor has intaked all Burns it should have when we process block `b`. - We solve this by executing a consensus protocol whenever a resolution for an Eventuality - created to fulfill Burns occurs. Accordingly, we force ourselves to obtain synchrony on such - blocks (and all preceding Burns). + // Since we're handling this block, we are making progress + made_progress = true; - This means we can only iterate up to the block currently pending acknowledgement. + let block = self.feed.block_by_number(&self.db, b).await?; - We only know blocks will need acknowledgement *for sure* if they were scanned. The only other - causes are key activation and retirement (both scheduled outside the scan window). This makes - the exclusive upper bound the *next block to scan*. - */ - let exclusive_upper_bound = { - // Fetch the next to scan block - let next_to_scan = next_to_scan_for_outputs_block::(&self.db) - .expect("EventualityTask run before writing the start block"); - // If we haven't done any work, return - if next_to_scan == 0 { - return Ok(false); - } - next_to_scan - }; + log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); - // Fetch the next block to check - let next_to_check = EventualityDb::::next_to_check_for_eventualities_block(&self.db) - .expect("EventualityTask run before writing the start block"); - - // Check all blocks - for b in next_to_check .. exclusive_upper_bound { - let is_block_notable = ScannerGlobalDb::::is_block_notable(&self.db, b); - if is_block_notable { - /* - If this block is notable *and* not acknowledged, break. - - This is so if Burns queued prior to this block's acknowledgement caused any Eventualities - (which may resolve this block), we have them. If it wasn't for that, it'd be so if this - block's acknowledgement caused any Eventualities, we have them, though those would only - potentially resolve in the next block (letting us scan this block without delay). - */ - if b > highest_acknowledged { - break; - } + let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b); - // Since this block is notable, ensure we've intaked all the Burns preceding it - // We can know with certainty that the channel is fully populated at this time since we've - // acknowledged a newer block (so we've handled the state up to this point and any new - // state will be for the newer block) - #[allow(unused_assignments)] - { - made_progress |= self.intake_burns().await?; - } - } + let mut txn = self.db.txn(); - // Since we're handling this block, we are making progress - made_progress = true; + // Fetch the data from the scanner + let scan_data = ScanToEventualityDb::recv_scan_data(&mut txn, b); + assert_eq!(scan_data.block_number, b); + let ReceiverScanData { block_number: _, received_external_outputs, forwards, returns } = + scan_data; + let mut outputs = received_external_outputs; - let block = self.feed.block_by_number(&self.db, b).await?; + for key in &keys { + // If this is the key's activation block, activate it + if key.activation_block_number == b { + Sch::activate_key(&mut txn, key.key); + } - log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); + let completed_eventualities = { + let mut eventualities = EventualityDb::::eventualities(&txn, key.key); + let completed_eventualities = + block.check_for_eventuality_resolutions(&mut eventualities); + EventualityDb::::set_eventualities(&mut txn, key.key, &eventualities); + completed_eventualities + }; - let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b); + for (tx, eventuality) in &completed_eventualities { + log::info!( + "eventuality {} resolved by {}", + hex::encode(eventuality.id()), + hex::encode(tx.as_ref()) + ); + CompletedEventualities::send(&mut txn, &key.key, eventuality.id()); + } - let mut txn = self.db.txn(); + // Fetch all non-External outputs + let mut non_external_outputs = block.scan_for_outputs(key.key); + non_external_outputs.retain(|output| output.kind() != OutputType::External); + // Drop any outputs less than the dust limit + non_external_outputs.retain(|output| { + let balance = output.balance(); + balance.amount.0 >= S::dust(balance.coin).0 + }); - // Fetch the data from the scanner - let scan_data = ScanToEventualityDb::recv_scan_data(&mut txn, b); - assert_eq!(scan_data.block_number, b); - let ReceiverScanData { block_number: _, received_external_outputs, forwards, returns } = - scan_data; - let mut outputs = received_external_outputs; - - for key in &keys { - // If this is the key's activation block, activate it - if key.activation_block_number == b { - Sch::activate_key(&mut txn, key.key); - } + /* + Now that we have all non-External outputs, we filter them to be only the outputs which + are from transactions which resolve our own Eventualities *if* the multisig is retiring. + This implements step 6 of `spec/processor/Multisig Rotation.md`. + + We may receive a Change output. The only issue with accumulating this would be if it + extends the multisig's lifetime (by increasing the amount of outputs yet to be + forwarded). By checking it's one we made, either: + 1) It's a legitimate Change output to be forwarded + 2) It's a Change output created by a user burning coins (specifying the Change address), + which can only be created while the multisig is actively handling `Burn`s (therefore + ensuring this multisig cannot be kept alive ad-infinitum) + + The commentary on Change outputs also applies to Branch/Forwarded. They'll presumably + get ignored if not usable however. + */ + if key.stage == LifetimeStage::Finishing { + non_external_outputs + .retain(|output| completed_eventualities.contains_key(&output.transaction_id())); + } - let completed_eventualities = { - let mut eventualities = EventualityDb::::eventualities(&txn, key.key); - let completed_eventualities = block.check_for_eventuality_resolutions(&mut eventualities); - EventualityDb::::set_eventualities(&mut txn, key.key, &eventualities); - completed_eventualities - }; - - for (tx, eventuality) in &completed_eventualities { - log::info!( - "eventuality {} resolved by {}", - hex::encode(eventuality.id()), - hex::encode(tx.as_ref()) - ); - CompletedEventualities::send(&mut txn, &key.key, eventuality.id()); - } + // Finally, for non-External outputs we didn't make, we check they're worth more than the + // cost to aggregate them to avoid some profitable spam attacks by malicious miners + { + // Fetch and cache the costs to aggregate as this call may be expensive + let coins = non_external_outputs + .iter() + .map(|output| output.balance().coin) + .collect::>(); + let mut costs_to_aggregate = HashMap::new(); + for coin in coins { + costs_to_aggregate.insert( + coin, + self.feed.cost_to_aggregate(coin, &block).await.map_err(|e| { + format!("EventualityTask couldn't fetch cost to aggregate {coin:?} at {b}: {e:?}") + })?, + ); + } - // Fetch all non-External outputs - let mut non_external_outputs = block.scan_for_outputs(key.key); - non_external_outputs.retain(|output| output.kind() != OutputType::External); - // Drop any outputs less than the dust limit - non_external_outputs.retain(|output| { - let balance = output.balance(); - balance.amount.0 >= S::dust(balance.coin).0 - }); - - /* - Now that we have all non-External outputs, we filter them to be only the outputs which - are from transactions which resolve our own Eventualities *if* the multisig is retiring. - This implements step 6 of `spec/processor/Multisig Rotation.md`. - - We may receive a Change output. The only issue with accumulating this would be if it - extends the multisig's lifetime (by increasing the amount of outputs yet to be - forwarded). By checking it's one we made, either: - 1) It's a legitimate Change output to be forwarded - 2) It's a Change output created by a user burning coins (specifying the Change address), - which can only be created while the multisig is actively handling `Burn`s (therefore - ensuring this multisig cannot be kept alive ad-infinitum) - - The commentary on Change outputs also applies to Branch/Forwarded. They'll presumably get - ignored if not usable however. - */ - if key.stage == LifetimeStage::Finishing { - non_external_outputs - .retain(|output| completed_eventualities.contains_key(&output.transaction_id())); - } + // Only retain out outputs/outputs sufficiently worthwhile + non_external_outputs.retain(|output| { + completed_eventualities.contains_key(&output.transaction_id()) || { + let balance = output.balance(); + balance.amount.0 >= (2 * costs_to_aggregate[&balance.coin].0) + } + }); + } - // Finally, for non-External outputs we didn't make, we check they're worth more than the - // cost to aggregate them to avoid some profitable spam attacks by malicious miners - { - // Fetch and cache the costs to aggregate as this call may be expensive - let coins = - non_external_outputs.iter().map(|output| output.balance().coin).collect::>(); - let mut costs_to_aggregate = HashMap::new(); - for coin in coins { - costs_to_aggregate.insert( - coin, - self.feed.cost_to_aggregate(coin, &block).await.map_err(|e| { - format!("EventualityTask couldn't fetch cost to aggregate {coin:?} at {b}: {e:?}") - })?, + // Now, we iterate over all Forwarded outputs and queue their InInstructions + for output in + non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded) + { + let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else { + // Output sent to the forwarding address yet not one we made + continue; + }; + let Some(forwarded) = eventuality.singular_spent_output() else { + // This was a TX made by us, yet someone burned to the forwarding address as it + // doesn't follow the structure of forwarding transactions + continue; + }; + + let Some((return_address, mut in_instruction)) = + ScannerGlobalDb::::return_address_and_in_instruction_for_forwarded_output( + &txn, &forwarded, + ) + else { + // This was a TX made by us, coincidentally with the necessary structure, yet wasn't + // forwarding an output + continue; + }; + + // We use the original amount, minus twice the cost to aggregate + // If the fees we paid to forward this now (less than the cost to aggregate now, yet not + // necessarily the cost to aggregate historically) caused this amount to be less, reduce + // it accordingly + in_instruction.balance.amount.0 = + in_instruction.balance.amount.0.min(output.balance().amount.0); + + queue_output_until_block::( + &mut txn, + b + S::WINDOW_LENGTH, + &OutputWithInInstruction { output: output.clone(), return_address, in_instruction }, ); } - // Only retain out outputs/outputs sufficiently worthwhile - non_external_outputs.retain(|output| { - completed_eventualities.contains_key(&output.transaction_id()) || { - let balance = output.balance(); - balance.amount.0 >= (2 * costs_to_aggregate[&balance.coin].0) - } - }); + // Accumulate all of these outputs + outputs.extend(non_external_outputs); } - // Now, we iterate over all Forwarded outputs and queue their InInstructions - for output in - non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded) + // Update the scheduler { - let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else { - // Output sent to the forwarding address yet not one we made - continue; - }; - let Some(forwarded) = eventuality.singular_spent_output() else { - // This was a TX made by us, yet someone burned to the forwarding address as it doesn't - // follow the structure of forwarding transactions - continue; - }; - - let Some((return_address, mut in_instruction)) = - ScannerGlobalDb::::return_address_and_in_instruction_for_forwarded_output( - &txn, &forwarded, - ) - else { - // This was a TX made by us, coincidentally with the necessary structure, yet wasn't - // forwarding an output - continue; - }; + let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns }; + scheduler_update.outputs.sort_by(sort_outputs); + scheduler_update.forwards.sort_by(sort_outputs); + scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + + let empty = { + let a: core::slice::Iter<'_, OutputFor> = scheduler_update.outputs.iter(); + let b: core::slice::Iter<'_, OutputFor> = scheduler_update.forwards.iter(); + let c = + scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output); + let mut all_outputs = a.chain(b).chain(c).peekable(); + + // If we received any output, sanity check this block is notable + let empty = all_outputs.peek().is_none(); + if !empty { + assert!(is_block_notable, "accumulating output(s) in non-notable block"); + } - // We use the original amount, minus twice the cost to aggregate - // If the fees we paid to forward this now (less than the cost to aggregate now, yet not - // necessarily the cost to aggregate historically) caused this amount to be less, reduce - // it accordingly - in_instruction.balance.amount.0 = - in_instruction.balance.amount.0.min(output.balance().amount.0); - - queue_output_until_block::( - &mut txn, - b + S::WINDOW_LENGTH, - &OutputWithInInstruction { output: output.clone(), return_address, in_instruction }, - ); - } + // Sanity check we've never accumulated these outputs before + for output in all_outputs { + assert!( + !EventualityDb::::prior_accumulated_output(&txn, &output.id()), + "prior accumulated an output with this ID" + ); + EventualityDb::::accumulated_output(&mut txn, &output.id()); + } - // Accumulate all of these outputs - outputs.extend(non_external_outputs); - } + empty + }; - // Update the scheduler - { - let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns }; - scheduler_update.outputs.sort_by(sort_outputs); - scheduler_update.forwards.sort_by(sort_outputs); - scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); - - let empty = { - let a: core::slice::Iter<'_, OutputFor> = scheduler_update.outputs.iter(); - let b: core::slice::Iter<'_, OutputFor> = scheduler_update.forwards.iter(); - let c = scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output); - let mut all_outputs = a.chain(b).chain(c).peekable(); - - // If we received any output, sanity check this block is notable - let empty = all_outputs.peek().is_none(); if !empty { - assert!(is_block_notable, "accumulating output(s) in non-notable block"); + // Accumulate the outputs + /* + This uses the `keys_with_stages` for the current block, yet this block is notable. + Accordingly, all future intaked Burns will use at least this block when determining + what LifetimeStage a key is. That makes the LifetimeStage monotonically incremented. + If this block wasn't notable, we'd potentially intake Burns with the LifetimeStage + determined off an earlier block than this (enabling an earlier LifetimeStage to be + used after a later one was already used). + */ + let new_eventualities = + Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update); + // Intake the new Eventualities + for key in new_eventualities.keys() { + keys + .iter() + .find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice()) + .expect("intaking Eventuality for key which isn't active"); + } + intake_eventualities::(&mut txn, new_eventualities); } + } - // Sanity check we've never accumulated these outputs before - for output in all_outputs { + for key in &keys { + // If this is the block at which forwarding starts for this key, flush it + // We do this after we issue the above update for any efficiencies gained by doing so + if key.block_at_which_forwarding_starts == Some(b) { assert!( - !EventualityDb::::prior_accumulated_output(&txn, &output.id()), - "prior accumulated an output with this ID" + key.key != keys.last().unwrap().key, + "key which was forwarding was the last key (which has no key after it to forward to)" ); - EventualityDb::::accumulated_output(&mut txn, &output.id()); + let new_eventualities = + Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key); + intake_eventualities::(&mut txn, new_eventualities); } - empty - }; - - if !empty { - // Accumulate the outputs - /* - This uses the `keys_with_stages` for the current block, yet this block is notable. - Accordingly, all future intaked Burns will use at least this block when determining - what LifetimeStage a key is. That makes the LifetimeStage monotonically incremented. If - this block wasn't notable, we'd potentially intake Burns with the LifetimeStage - determined off an earlier block than this (enabling an earlier LifetimeStage to be used - after a later one was already used). - */ - let new_eventualities = - Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update); - // Intake the new Eventualities - for key in new_eventualities.keys() { - keys - .iter() - .find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice()) - .expect("intaking Eventuality for key which isn't active"); + // Now that we've intaked any Eventualities caused, check if we're retiring any keys + if key.stage == LifetimeStage::Finishing { + let eventualities = EventualityDb::::eventualities(&txn, key.key); + if eventualities.active_eventualities.is_empty() { + log::info!( + "key {} has finished and is being retired", + hex::encode(key.key.to_bytes().as_ref()) + ); + + // Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never + // has a malleable view of the keys. + ScannerGlobalDb::::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key); + + // We tell the scheduler to retire it now as we're done with it, and this fn doesn't + // require it be called with a canonical order + Sch::retire_key(&mut txn, key.key); + } } - intake_eventualities::(&mut txn, new_eventualities); } - } - for key in &keys { - // If this is the block at which forwarding starts for this key, flush it - // We do this after we issue the above update for any efficiencies gained by doing so - if key.block_at_which_forwarding_starts == Some(b) { - assert!( - key.key != keys.last().unwrap().key, - "key which was forwarding was the last key (which has no key after it to forward to)" - ); - let new_eventualities = - Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key); - intake_eventualities::(&mut txn, new_eventualities); - } + // Update the next-to-check block + EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); - // Now that we've intaked any Eventualities caused, check if we're retiring any keys - if key.stage == LifetimeStage::Finishing { - let eventualities = EventualityDb::::eventualities(&txn, key.key); - if eventualities.active_eventualities.is_empty() { - log::info!( - "key {} has finished and is being retired", - hex::encode(key.key.to_bytes().as_ref()) - ); - - // Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never - // has a malleable view of the keys. - ScannerGlobalDb::::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key); - - // We tell the scheduler to retire it now as we're done with it, and this fn doesn't - // require it be called with a canonical order - Sch::retire_key(&mut txn, key.key); - } + // If this block was notable, update the latest-handled notable block + if is_block_notable { + EventualityDb::::set_latest_handled_notable_block(&mut txn, b); } - } - - // Update the next-to-check block - EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); - // If this block was notable, update the latest-handled notable block - if is_block_notable { - EventualityDb::::set_latest_handled_notable_block(&mut txn, b); + txn.commit(); } - txn.commit(); + // Run dependents if we successfully checked any blocks + Ok(made_progress) } - - // Run dependents if we successfully checked any blocks - Ok(made_progress) } } diff --git a/processor/scanner/src/index/mod.rs b/processor/scanner/src/index/mod.rs index 930ce55ac..03abc8a81 100644 --- a/processor/scanner/src/index/mod.rs +++ b/processor/scanner/src/index/mod.rs @@ -1,5 +1,6 @@ -use serai_db::{Get, DbTxn, Db}; +use core::future::Future; +use serai_db::{Get, DbTxn, Db}; use primitives::{task::ContinuallyRan, BlockHeader}; use crate::ScannerFeed; @@ -56,58 +57,59 @@ impl IndexTask { } } -#[async_trait::async_trait] impl ContinuallyRan for IndexTask { - async fn run_iteration(&mut self) -> Result { - // Fetch the latest finalized block - let our_latest_finalized = IndexDb::latest_finalized_block(&self.db) - .expect("IndexTask run before writing the start block"); - let latest_finalized = match self.feed.latest_finalized_block_number().await { - Ok(latest_finalized) => latest_finalized, - Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?, - }; + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Fetch the latest finalized block + let our_latest_finalized = IndexDb::latest_finalized_block(&self.db) + .expect("IndexTask run before writing the start block"); + let latest_finalized = match self.feed.latest_finalized_block_number().await { + Ok(latest_finalized) => latest_finalized, + Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?, + }; - if latest_finalized < our_latest_finalized { - // Explicitly log this as an error as returned ephemeral errors are logged with debug - // This doesn't panic as the node should sync along our indexed chain, and if it doesn't, - // we'll panic at that point in time - log::error!( - "node is out of sync, latest finalized {} is behind our indexed {}", - latest_finalized, - our_latest_finalized - ); - Err("node is out of sync".to_string())?; - } + if latest_finalized < our_latest_finalized { + // Explicitly log this as an error as returned ephemeral errors are logged with debug + // This doesn't panic as the node should sync along our indexed chain, and if it doesn't, + // we'll panic at that point in time + log::error!( + "node is out of sync, latest finalized {} is behind our indexed {}", + latest_finalized, + our_latest_finalized + ); + Err("node is out of sync".to_string())?; + } - // Index the hashes of all blocks until the latest finalized block - for b in (our_latest_finalized + 1) ..= latest_finalized { - let block = match self.feed.unchecked_block_header_by_number(b).await { - Ok(block) => block, - Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, - }; + // Index the hashes of all blocks until the latest finalized block + for b in (our_latest_finalized + 1) ..= latest_finalized { + let block = match self.feed.unchecked_block_header_by_number(b).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, + }; - // Check this descends from our indexed chain - { - let expected_parent = - IndexDb::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block"); - if block.parent() != expected_parent { - panic!( - "current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})", - hex::encode(block.parent()), - b - 1, - hex::encode(expected_parent) - ); + // Check this descends from our indexed chain + { + let expected_parent = + IndexDb::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block"); + if block.parent() != expected_parent { + panic!( + "current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})", + hex::encode(block.parent()), + b - 1, + hex::encode(expected_parent) + ); + } } + + // Update the latest finalized block + let mut txn = self.db.txn(); + IndexDb::set_block(&mut txn, b, block.id()); + IndexDb::set_latest_finalized_block(&mut txn, b); + txn.commit(); } - // Update the latest finalized block - let mut txn = self.db.txn(); - IndexDb::set_block(&mut txn, b, block.id()); - IndexDb::set_latest_finalized_block(&mut txn, b); - txn.commit(); + // Have dependents run if we updated the latest finalized block + Ok(our_latest_finalized != latest_finalized) } - - // Have dependents run if we updated the latest finalized block - Ok(our_latest_finalized != latest_finalized) } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index d100815de..a5c5c0387 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -2,7 +2,7 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] -use core::{marker::PhantomData, fmt::Debug}; +use core::{marker::PhantomData, future::Future, fmt::Debug}; use std::{io, collections::HashMap}; use group::GroupEncoding; @@ -59,7 +59,6 @@ impl BlockExt for B { /// A feed usable to scan a blockchain. /// /// This defines the primitive types used, along with various getters necessary for indexing. -#[async_trait::async_trait] pub trait ScannerFeed: 'static + Send + Sync + Clone { /// The ID of the network being scanned for. const NETWORK: NetworkId; @@ -110,38 +109,43 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// /// The block number is its zero-indexed position within a linear view of the external network's /// consensus. The genesis block accordingly has block number 0. - async fn latest_finalized_block_number(&self) -> Result; + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future>; /// Fetch the timestamp of a block (represented in seconds since the epoch). /// /// This must be monotonically incrementing. Two blocks may share a timestamp. - async fn time_of_block(&self, number: u64) -> Result; + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future>; /// Fetch a block header by its number. /// /// This does not check the returned BlockHeader is the header for the block we indexed. - async fn unchecked_block_header_by_number( + fn unchecked_block_header_by_number( &self, number: u64, - ) -> Result<::Header, Self::EphemeralError>; + ) -> impl Send + Future::Header, Self::EphemeralError>>; /// Fetch a block by its number. /// /// This does not check the returned Block is the block we indexed. - async fn unchecked_block_by_number( + fn unchecked_block_by_number( &self, number: u64, - ) -> Result; + ) -> impl Send + Future>; /// Fetch a block by its number. /// /// Panics if the block requested wasn't indexed. - async fn block_by_number( + fn block_by_number( &self, getter: &(impl Send + Sync + Get), number: u64, - ) -> Result { - let block = match self.unchecked_block_by_number(number).await { + ) -> impl Send + Future> { + async move {let block = match self.unchecked_block_by_number(number).await { Ok(block) => block, Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?, }; @@ -159,7 +163,7 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { } } - Ok(block) + Ok(block)} } /// The dust threshold for the specified coin. @@ -171,11 +175,11 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { /// The cost to aggregate an input as of the specified block. /// /// This is defined as the transaction fee for a 2-input, 1-output transaction. - async fn cost_to_aggregate( + fn cost_to_aggregate( &self, coin: Coin, reference_block: &Self::Block, - ) -> Result; + ) -> impl Send + Future>; } /// The key type for this ScannerFeed. diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs index 5fd2c7eb7..afb1b6720 100644 --- a/processor/scanner/src/report/mod.rs +++ b/processor/scanner/src/report/mod.rs @@ -1,4 +1,4 @@ -use core::marker::PhantomData; +use core::{marker::PhantomData, future::Future}; use scale::Encode; use serai_db::{DbTxn, Db}; @@ -65,113 +65,119 @@ impl ReportTask { } } -#[async_trait::async_trait] impl ContinuallyRan for ReportTask { - async fn run_iteration(&mut self) -> Result { - let highest_reportable = { - // Fetch the next to scan block - let next_to_scan = next_to_scan_for_outputs_block::(&self.db) + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let highest_reportable = { + // Fetch the next to scan block + let next_to_scan = next_to_scan_for_outputs_block::(&self.db) + .expect("ReportTask run before writing the start block"); + // If we haven't done any work, return + if next_to_scan == 0 { + return Ok(false); + } + // The last scanned block is the block prior to this + #[allow(clippy::let_and_return)] + let last_scanned = next_to_scan - 1; + // The last scanned block is the highest reportable block as we only scan blocks within a + // window where it's safe to immediately report the block + // See `eventuality.rs` for more info + last_scanned + }; + + let next_to_potentially_report = ReportDb::::next_to_potentially_report_block(&self.db) .expect("ReportTask run before writing the start block"); - // If we haven't done any work, return - if next_to_scan == 0 { - return Ok(false); - } - // The last scanned block is the block prior to this - #[allow(clippy::let_and_return)] - let last_scanned = next_to_scan - 1; - // The last scanned block is the highest reportable block as we only scan blocks within a - // window where it's safe to immediately report the block - // See `eventuality.rs` for more info - last_scanned - }; - - let next_to_potentially_report = ReportDb::::next_to_potentially_report_block(&self.db) - .expect("ReportTask run before writing the start block"); - - for b in next_to_potentially_report ..= highest_reportable { - let mut txn = self.db.txn(); - - // Receive the InInstructions for this block - // We always do this as we can't trivially tell if we should recv InInstructions before we do - let InInstructionData { - external_key_for_session_to_sign_batch, - returnable_in_instructions: in_instructions, - } = ScanToReportDb::::recv_in_instructions(&mut txn, b); - let notable = ScannerGlobalDb::::is_block_notable(&txn, b); - if !notable { - assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions"); - } - // If this block is notable, create the Batch(s) for it - if notable { - let network = S::NETWORK; - let block_hash = index::block_id(&txn, b); - let mut batch_id = ReportDb::::acquire_batch_id(&mut txn, b); - - // start with empty batch - let mut batches = - vec![Batch { network, id: batch_id, block: BlockHash(block_hash), instructions: vec![] }]; - // We also track the return information for the InInstructions within a Batch in case they - // error - let mut return_information = vec![vec![]]; - - for Returnable { return_address, in_instruction } in in_instructions { - let balance = in_instruction.balance; - - let batch = batches.last_mut().unwrap(); - batch.instructions.push(in_instruction); - - // check if batch is over-size - if batch.encode().len() > MAX_BATCH_SIZE { - // pop the last instruction so it's back in size - let in_instruction = batch.instructions.pop().unwrap(); - - // bump the id for the new batch - batch_id = ReportDb::::acquire_batch_id(&mut txn, b); - - // make a new batch with this instruction included - batches.push(Batch { - network, - id: batch_id, - block: BlockHash(block_hash), - instructions: vec![in_instruction], - }); - // Since we're allocating a new batch, allocate a new set of return addresses for it - return_information.push(vec![]); - } - // For the set of return addresses for the InInstructions for the batch we just pushed - // onto, push this InInstruction's return addresses - return_information - .last_mut() - .unwrap() - .push(return_address.map(|address| ReturnInformation { address, balance })); + for b in next_to_potentially_report ..= highest_reportable { + let mut txn = self.db.txn(); + + // Receive the InInstructions for this block + // We always do this as we can't trivially tell if we should recv InInstructions before we + // do + let InInstructionData { + external_key_for_session_to_sign_batch, + returnable_in_instructions: in_instructions, + } = ScanToReportDb::::recv_in_instructions(&mut txn, b); + let notable = ScannerGlobalDb::::is_block_notable(&txn, b); + if !notable { + assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions"); } + // If this block is notable, create the Batch(s) for it + if notable { + let network = S::NETWORK; + let block_hash = index::block_id(&txn, b); + let mut batch_id = ReportDb::::acquire_batch_id(&mut txn, b); + + // start with empty batch + let mut batches = vec![Batch { + network, + id: batch_id, + block: BlockHash(block_hash), + instructions: vec![], + }]; + // We also track the return information for the InInstructions within a Batch in case + // they error + let mut return_information = vec![vec![]]; + + for Returnable { return_address, in_instruction } in in_instructions { + let balance = in_instruction.balance; + + let batch = batches.last_mut().unwrap(); + batch.instructions.push(in_instruction); + + // check if batch is over-size + if batch.encode().len() > MAX_BATCH_SIZE { + // pop the last instruction so it's back in size + let in_instruction = batch.instructions.pop().unwrap(); + + // bump the id for the new batch + batch_id = ReportDb::::acquire_batch_id(&mut txn, b); + + // make a new batch with this instruction included + batches.push(Batch { + network, + id: batch_id, + block: BlockHash(block_hash), + instructions: vec![in_instruction], + }); + // Since we're allocating a new batch, allocate a new set of return addresses for it + return_information.push(vec![]); + } + + // For the set of return addresses for the InInstructions for the batch we just pushed + // onto, push this InInstruction's return addresses + return_information + .last_mut() + .unwrap() + .push(return_address.map(|address| ReturnInformation { address, balance })); + } - // Save the return addresses to the database - assert_eq!(batches.len(), return_information.len()); - for (batch, return_information) in batches.iter().zip(&return_information) { - assert_eq!(batch.instructions.len(), return_information.len()); - ReportDb::::save_external_key_for_session_to_sign_batch( - &mut txn, - batch.id, - &external_key_for_session_to_sign_batch, - ); - ReportDb::::save_return_information(&mut txn, batch.id, return_information); - } + // Save the return addresses to the database + assert_eq!(batches.len(), return_information.len()); + for (batch, return_information) in batches.iter().zip(&return_information) { + assert_eq!(batch.instructions.len(), return_information.len()); + ReportDb::::save_external_key_for_session_to_sign_batch( + &mut txn, + batch.id, + &external_key_for_session_to_sign_batch, + ); + ReportDb::::save_return_information(&mut txn, batch.id, return_information); + } - for batch in batches { - Batches::send(&mut txn, &batch); - BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch); + for batch in batches { + Batches::send(&mut txn, &batch); + BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch); + } } - } - // Update the next to potentially report block - ReportDb::::set_next_to_potentially_report_block(&mut txn, b + 1); + // Update the next to potentially report block + ReportDb::::set_next_to_potentially_report_block(&mut txn, b + 1); - txn.commit(); - } + txn.commit(); + } - // Run dependents if we decided to report any blocks - Ok(next_to_potentially_report <= highest_reportable) + // Run dependents if we decided to report any blocks + Ok(next_to_potentially_report <= highest_reportable) + } } } diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index 91c97f60f..c54dc3e05 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -1,3 +1,4 @@ +use core::future::Future; use std::collections::HashMap; use scale::Decode; @@ -107,258 +108,262 @@ impl ScanTask { } } -#[async_trait::async_trait] impl ContinuallyRan for ScanTask { - async fn run_iteration(&mut self) -> Result { - // Fetch the safe to scan block - let latest_scannable = - latest_scannable_block::(&self.db).expect("ScanTask run before writing the start block"); - // Fetch the next block to scan - let next_to_scan = ScanDb::::next_to_scan_for_outputs_block(&self.db) - .expect("ScanTask run before writing the start block"); - - for b in next_to_scan ..= latest_scannable { - let block = self.feed.block_by_number(&self.db, b).await?; - - log::info!("scanning block: {} ({b})", hex::encode(block.id())); - - let mut txn = self.db.txn(); - - assert_eq!(ScanDb::::next_to_scan_for_outputs_block(&txn).unwrap(), b); - - // Tidy the keys, then fetch them - // We don't have to tidy them here, we just have to somewhere, so why not here? - ScannerGlobalDb::::tidy_keys(&mut txn); - let keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) - .expect("scanning for a blockchain without any keys set"); - - // The scan data for this block - let mut scan_data = SenderScanData { - block_number: b, - received_external_outputs: vec![], - forwards: vec![], - returns: vec![], - }; - // The InInstructions for this block - let mut in_instructions = vec![]; - - // The outputs queued for this block - let queued_outputs = { - let mut queued_outputs = ScanDb::::take_queued_outputs(&mut txn, b); - // Sort the queued outputs in case they weren't queued in a deterministic fashion - queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output)); - queued_outputs - }; - for queued_output in queued_outputs { - in_instructions.push(( - queued_output.output.id(), - Returnable { - return_address: queued_output.return_address, - in_instruction: queued_output.in_instruction, - }, - )); - scan_data.received_external_outputs.push(queued_output.output); - } - - // We subtract the cost to aggregate from some outputs we scan - // This cost is fetched with an asynchronous function which may be non-trivial - // We cache the result of this function here to avoid calling it multiple times - let mut costs_to_aggregate = HashMap::with_capacity(1); - - // Scan for each key - for key in &keys { - for output in block.scan_for_outputs(key.key) { - assert_eq!(output.key(), key.key); - - /* - The scan task runs ahead of time, obtaining ordering on the external network's blocks - with relation to events on the Serai network. This is done via publishing a Batch which - contains the InInstructions from External outputs. Accordingly, the scan process only - has to yield External outputs. - - It'd appear to make sense to scan for all outputs, and after scanning for all outputs, - yield all outputs. The issue is we can't identify outputs we created here. We can only - identify the outputs we receive and their *declared intention*. - - We only want to handle Change/Branch/Forwarded outputs we made ourselves. For - Forwarded, the reasoning is obvious (retiring multisigs should only downsize, yet - accepting new outputs solely because they claim to be Forwarded would increase the size - of the multisig). For Change/Branch, it's because such outputs which aren't ours are - pointless. They wouldn't hurt to accumulate though. - - The issue is they would hurt to accumulate. We want to filter outputs which are less - than their cost to aggregate, a variable itself variable to the current blockchain. We - can filter such outputs here, yet if we drop a Change output, we create an insolvency. - We'd need to track the loss and offset it later. That means we can't filter such - outputs, as we expect any Change output we make. - - The issue is the Change outputs we don't make. Someone can create an output declaring - to be Change, yet not actually Change. If we don't filter it, it'd be queued for - accumulation, yet it may cost more to accumulate than it's worth. - - The solution is to let the Eventuality task, which does know if we made an output or - not (or rather, if a transaction is identical to a transaction which should exist - regarding effects) decide to keep/yield the outputs which we should only keep if we - made them (as Serai itself should not make worthless outputs, so we can assume they're - worthwhile, and even if they're not economically, they are technically). - - The alternative, we drop outputs here with a generic filter rule and then report back - the insolvency created, still doesn't work as we'd only be creating an insolvency if - the output was actually made by us (and not simply someone else sending in). We can - have the Eventuality task report the insolvency, yet that requires the scanner be - responsible for such filter logic. It's more flexible, and has a cleaner API, - to do so at a higher level. - */ - if output.kind() != OutputType::External { - // While we don't report these outputs, we still need consensus on this block and - // accordingly still need to set it as notable - let balance = output.balance(); - // We ensure it's over the dust limit to prevent people sending 1 satoshi from causing - // an invocation of a consensus/signing protocol - if balance.amount.0 >= S::dust(balance.coin).0 { - ScannerGlobalDb::::flag_notable_due_to_non_external_output(&mut txn, b); - } - continue; - } - - // Check this isn't dust - let balance_to_use = { - let mut balance = output.balance(); - - // First, subtract 2 * the cost to aggregate, as detailed in - // `spec/processor/UTXO Management.md` - - // We cache this, so if it isn't yet cached, insert it into the cache - if let std::collections::hash_map::Entry::Vacant(e) = - costs_to_aggregate.entry(balance.coin) - { - e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| { - format!( - "ScanTask couldn't fetch cost to aggregate {:?} at {b}: {e:?}", - balance.coin - ) - })?); - } - let cost_to_aggregate = costs_to_aggregate[&balance.coin]; - balance.amount.0 -= 2 * cost_to_aggregate.0; + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Fetch the safe to scan block + let latest_scannable = + latest_scannable_block::(&self.db).expect("ScanTask run before writing the start block"); + // Fetch the next block to scan + let next_to_scan = ScanDb::::next_to_scan_for_outputs_block(&self.db) + .expect("ScanTask run before writing the start block"); + + for b in next_to_scan ..= latest_scannable { + let block = self.feed.block_by_number(&self.db, b).await?; + + log::info!("scanning block: {} ({b})", hex::encode(block.id())); + + let mut txn = self.db.txn(); + + assert_eq!(ScanDb::::next_to_scan_for_outputs_block(&txn).unwrap(), b); + + // Tidy the keys, then fetch them + // We don't have to tidy them here, we just have to somewhere, so why not here? + ScannerGlobalDb::::tidy_keys(&mut txn); + let keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) + .expect("scanning for a blockchain without any keys set"); + + // The scan data for this block + let mut scan_data = SenderScanData { + block_number: b, + received_external_outputs: vec![], + forwards: vec![], + returns: vec![], + }; + // The InInstructions for this block + let mut in_instructions = vec![]; + + // The outputs queued for this block + let queued_outputs = { + let mut queued_outputs = ScanDb::::take_queued_outputs(&mut txn, b); + // Sort the queued outputs in case they weren't queued in a deterministic fashion + queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + queued_outputs + }; + for queued_output in queued_outputs { + in_instructions.push(( + queued_output.output.id(), + Returnable { + return_address: queued_output.return_address, + in_instruction: queued_output.in_instruction, + }, + )); + scan_data.received_external_outputs.push(queued_output.output); + } - // Now, check it's still past the dust threshold - if balance.amount.0 < S::dust(balance.coin).0 { + // We subtract the cost to aggregate from some outputs we scan + // This cost is fetched with an asynchronous function which may be non-trivial + // We cache the result of this function here to avoid calling it multiple times + let mut costs_to_aggregate = HashMap::with_capacity(1); + + // Scan for each key + for key in &keys { + for output in block.scan_for_outputs(key.key) { + assert_eq!(output.key(), key.key); + + /* + The scan task runs ahead of time, obtaining ordering on the external network's blocks + with relation to events on the Serai network. This is done via publishing a Batch + which contains the InInstructions from External outputs. Accordingly, the scan + process only has to yield External outputs. + + It'd appear to make sense to scan for all outputs, and after scanning for all + outputs, yield all outputs. The issue is we can't identify outputs we created here. + We can only identify the outputs we receive and their *declared intention*. + + We only want to handle Change/Branch/Forwarded outputs we made ourselves. For + Forwarded, the reasoning is obvious (retiring multisigs should only downsize, yet + accepting new outputs solely because they claim to be Forwarded would increase the + size of the multisig). For Change/Branch, it's because such outputs which aren't ours + are pointless. They wouldn't hurt to accumulate though. + + The issue is they would hurt to accumulate. We want to filter outputs which are less + than their cost to aggregate, a variable itself variable to the current blockchain. + We can filter such outputs here, yet if we drop a Change output, we create an + insolvency. We'd need to track the loss and offset it later. That means we can't + filter such outputs, as we expect any Change output we make. + + The issue is the Change outputs we don't make. Someone can create an output declaring + to be Change, yet not actually Change. If we don't filter it, it'd be queued for + accumulation, yet it may cost more to accumulate than it's worth. + + The solution is to let the Eventuality task, which does know if we made an output or + not (or rather, if a transaction is identical to a transaction which should exist + regarding effects) decide to keep/yield the outputs which we should only keep if we + made them (as Serai itself should not make worthless outputs, so we can assume + they're worthwhile, and even if they're not economically, they are technically). + + The alternative, we drop outputs here with a generic filter rule and then report back + the insolvency created, still doesn't work as we'd only be creating an insolvency if + the output was actually made by us (and not simply someone else sending in). We can + have the Eventuality task report the insolvency, yet that requires the scanner be + responsible for such filter logic. It's more flexible, and has a cleaner API, + to do so at a higher level. + */ + if output.kind() != OutputType::External { + // While we don't report these outputs, we still need consensus on this block and + // accordingly still need to set it as notable + let balance = output.balance(); + // We ensure it's over the dust limit to prevent people sending 1 satoshi from + // causing an invocation of a consensus/signing protocol + if balance.amount.0 >= S::dust(balance.coin).0 { + ScannerGlobalDb::::flag_notable_due_to_non_external_output(&mut txn, b); + } continue; } - balance - }; + // Check this isn't dust + let balance_to_use = { + let mut balance = output.balance(); + + // First, subtract 2 * the cost to aggregate, as detailed in + // `spec/processor/UTXO Management.md` + + // We cache this, so if it isn't yet cached, insert it into the cache + if let std::collections::hash_map::Entry::Vacant(e) = + costs_to_aggregate.entry(balance.coin) + { + e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| { + format!( + "ScanTask couldn't fetch cost to aggregate {:?} at {b}: {e:?}", + balance.coin + ) + })?); + } + let cost_to_aggregate = costs_to_aggregate[&balance.coin]; + balance.amount.0 -= 2 * cost_to_aggregate.0; - // Fetch the InInstruction/return addr for this output - let output_with_in_instruction = match in_instruction_from_output::(&output) { - (return_address, Some(instruction)) => OutputWithInInstruction { - output, - return_address, - in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use }, - }, - (Some(address), None) => { - // Since there was no instruction here, return this since we parsed a return address - if key.stage != LifetimeStage::Finishing { - scan_data.returns.push(Return { address, output }); + // Now, check it's still past the dust threshold + if balance.amount.0 < S::dust(balance.coin).0 { + continue; } - continue; - } - // Since we didn't receive an instruction nor can we return this, queue this for - // accumulation and move on - (None, None) => { - if key.stage != LifetimeStage::Finishing { - scan_data.received_external_outputs.push(output); + + balance + }; + + // Fetch the InInstruction/return addr for this output + let output_with_in_instruction = match in_instruction_from_output::(&output) { + (return_address, Some(instruction)) => OutputWithInInstruction { + output, + return_address, + in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use }, + }, + (Some(address), None) => { + // Since there was no instruction here, return this since we parsed a return + // address + if key.stage != LifetimeStage::Finishing { + scan_data.returns.push(Return { address, output }); + } + continue; + } + // Since we didn't receive an instruction nor can we return this, queue this for + // accumulation and move on + (None, None) => { + if key.stage != LifetimeStage::Finishing { + scan_data.received_external_outputs.push(output); + } + continue; + } + }; + + // Drop External outputs if they're to a multisig which won't report them + // This means we should report any External output we save to disk here + #[allow(clippy::match_same_arms)] + match key.stage { + // This multisig isn't yet reporting its External outputs to avoid a DoS + // Queue the output to be reported when this multisig starts reporting + LifetimeStage::ActiveYetNotReporting => { + ScanDb::::queue_output_until_block( + &mut txn, + key.block_at_which_reporting_starts, + &output_with_in_instruction, + ); + continue; + } + // We should report External outputs in these cases + LifetimeStage::Active | LifetimeStage::UsingNewForChange => {} + // We should report External outputs only once forwarded, where they'll appear as + // OutputType::Forwarded. We save them now for when they appear + LifetimeStage::Forwarding => { + // When the forwarded output appears, we can see which Plan it's associated with + // and from there recover this output + scan_data.forwards.push(output_with_in_instruction); + continue; + } + // We should drop these as we should not be handling new External outputs at this + // time + LifetimeStage::Finishing => { + continue; } - continue; - } - }; - - // Drop External outputs if they're to a multisig which won't report them - // This means we should report any External output we save to disk here - #[allow(clippy::match_same_arms)] - match key.stage { - // This multisig isn't yet reporting its External outputs to avoid a DoS - // Queue the output to be reported when this multisig starts reporting - LifetimeStage::ActiveYetNotReporting => { - ScanDb::::queue_output_until_block( - &mut txn, - key.block_at_which_reporting_starts, - &output_with_in_instruction, - ); - continue; - } - // We should report External outputs in these cases - LifetimeStage::Active | LifetimeStage::UsingNewForChange => {} - // We should report External outputs only once forwarded, where they'll appear as - // OutputType::Forwarded. We save them now for when they appear - LifetimeStage::Forwarding => { - // When the forwarded output appears, we can see which Plan it's associated with and - // from there recover this output - scan_data.forwards.push(output_with_in_instruction); - continue; - } - // We should drop these as we should not be handling new External outputs at this - // time - LifetimeStage::Finishing => { - continue; } + // Ensures we didn't miss a `continue` above + assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange)); + + in_instructions.push(( + output_with_in_instruction.output.id(), + Returnable { + return_address: output_with_in_instruction.return_address, + in_instruction: output_with_in_instruction.in_instruction, + }, + )); + scan_data.received_external_outputs.push(output_with_in_instruction.output); } - // Ensures we didn't miss a `continue` above - assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange)); - - in_instructions.push(( - output_with_in_instruction.output.id(), - Returnable { - return_address: output_with_in_instruction.return_address, - in_instruction: output_with_in_instruction.in_instruction, - }, - )); - scan_data.received_external_outputs.push(output_with_in_instruction.output); } - } - // Sort the InInstructions by the output ID - in_instructions.sort_by(|(output_id_a, _), (output_id_b, _)| { - use core::cmp::{Ordering, Ord}; - let res = output_id_a.as_ref().cmp(output_id_b.as_ref()); - assert!(res != Ordering::Equal, "two outputs within a collection had the same ID"); - res - }); - // Check we haven't prior reported an InInstruction for this output - // This is a sanity check which is intended to prevent multiple instances of sriXYZ on-chain - // due to a single output - for (id, _) in &in_instructions { - assert!( - !ScanDb::::prior_reported_in_instruction_for_output(&txn, id), - "prior reported an InInstruction for an output with this ID" + // Sort the InInstructions by the output ID + in_instructions.sort_by(|(output_id_a, _), (output_id_b, _)| { + use core::cmp::{Ordering, Ord}; + let res = output_id_a.as_ref().cmp(output_id_b.as_ref()); + assert!(res != Ordering::Equal, "two outputs within a collection had the same ID"); + res + }); + // Check we haven't prior reported an InInstruction for this output + // This is a sanity check which is intended to prevent multiple instances of sriXYZ + // on-chain due to a single output + for (id, _) in &in_instructions { + assert!( + !ScanDb::::prior_reported_in_instruction_for_output(&txn, id), + "prior reported an InInstruction for an output with this ID" + ); + ScanDb::::reported_in_instruction_for_output(&mut txn, id); + } + // Reformat the InInstructions to just the InInstructions + let in_instructions = in_instructions + .into_iter() + .map(|(_id, in_instruction)| in_instruction) + .collect::>(); + // Send the InInstructions to the report task + // We need to also specify which key is responsible for signing the Batch for these, which + // will always be the oldest key (as the new key signing the Batch signifies handover + // acceptance) + ScanToReportDb::::send_in_instructions( + &mut txn, + b, + &InInstructionData { + external_key_for_session_to_sign_batch: keys[0].key, + returnable_in_instructions: in_instructions, + }, ); - ScanDb::::reported_in_instruction_for_output(&mut txn, id); + + // Send the scan data to the eventuality task + ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); + // Update the next to scan block + ScanDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); + txn.commit(); } - // Reformat the InInstructions to just the InInstructions - let in_instructions = - in_instructions.into_iter().map(|(_id, in_instruction)| in_instruction).collect::>(); - // Send the InInstructions to the report task - // We need to also specify which key is responsible for signing the Batch for these, which - // will always be the oldest key (as the new key signing the Batch signifies handover - // acceptance) - ScanToReportDb::::send_in_instructions( - &mut txn, - b, - &InInstructionData { - external_key_for_session_to_sign_batch: keys[0].key, - returnable_in_instructions: in_instructions, - }, - ); - // Send the scan data to the eventuality task - ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); - // Update the next to scan block - ScanDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); - txn.commit(); + // Run dependents if we successfully scanned any blocks + Ok(next_to_scan <= latest_scannable) } - - // Run dependents if we successfully scanned any blocks - Ok(next_to_scan <= latest_scannable) } } diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs index fc97daf33..a7302e5ce 100644 --- a/processor/scanner/src/substrate/mod.rs +++ b/processor/scanner/src/substrate/mod.rs @@ -1,4 +1,4 @@ -use core::marker::PhantomData; +use core::{marker::PhantomData, future::Future}; use serai_db::{DbTxn, Db}; @@ -52,115 +52,121 @@ impl SubstrateTask { } } -#[async_trait::async_trait] impl ContinuallyRan for SubstrateTask { - async fn run_iteration(&mut self) -> Result { - let mut made_progress = false; - loop { - // Fetch the next action to handle - let mut txn = self.db.txn(); - let Some(action) = SubstrateDb::::next_action(&mut txn) else { - drop(txn); - return Ok(made_progress); - }; - - match action { - Action::AcknowledgeBatch(AcknowledgeBatch { - batch_id, - in_instruction_succeededs, - mut burns, - key_to_activate, - }) => { - // Check if we have the information for this batch - let Some(block_number) = report::take_block_number_for_batch::(&mut txn, batch_id) - else { - // If we don't, drop this txn (restoring the action to the database) - drop(txn); - return Ok(made_progress); - }; - - { - let external_key_for_session_to_sign_batch = - report::take_external_key_for_session_to_sign_batch::(&mut txn, batch_id).unwrap(); - AcknowledgedBatches::send(&mut txn, &external_key_for_session_to_sign_batch, batch_id); - } + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + loop { + // Fetch the next action to handle + let mut txn = self.db.txn(); + let Some(action) = SubstrateDb::::next_action(&mut txn) else { + drop(txn); + return Ok(made_progress); + }; + + match action { + Action::AcknowledgeBatch(AcknowledgeBatch { + batch_id, + in_instruction_succeededs, + mut burns, + key_to_activate, + }) => { + // Check if we have the information for this batch + let Some(block_number) = report::take_block_number_for_batch::(&mut txn, batch_id) + else { + // If we don't, drop this txn (restoring the action to the database) + drop(txn); + return Ok(made_progress); + }; + + { + let external_key_for_session_to_sign_batch = + report::take_external_key_for_session_to_sign_batch::(&mut txn, batch_id) + .unwrap(); + AcknowledgedBatches::send( + &mut txn, + &external_key_for_session_to_sign_batch, + batch_id, + ); + } + + // Mark we made progress and handle this + made_progress = true; - // Mark we made progress and handle this - made_progress = true; - - assert!( - ScannerGlobalDb::::is_block_notable(&txn, block_number), - "acknowledging a block which wasn't notable" - ); - if let Some(prior_highest_acknowledged_block) = - ScannerGlobalDb::::highest_acknowledged_block(&txn) - { - // If a single block produced multiple Batches, the block number won't increment assert!( - block_number >= prior_highest_acknowledged_block, - "acknowledging blocks out-of-order" + ScannerGlobalDb::::is_block_notable(&txn, block_number), + "acknowledging a block which wasn't notable" ); - for b in (prior_highest_acknowledged_block + 1) .. block_number { + if let Some(prior_highest_acknowledged_block) = + ScannerGlobalDb::::highest_acknowledged_block(&txn) + { + // If a single block produced multiple Batches, the block number won't increment assert!( - !ScannerGlobalDb::::is_block_notable(&txn, b), - "skipped acknowledging a block which was notable" + block_number >= prior_highest_acknowledged_block, + "acknowledging blocks out-of-order" ); + for b in (prior_highest_acknowledged_block + 1) .. block_number { + assert!( + !ScannerGlobalDb::::is_block_notable(&txn, b), + "skipped acknowledging a block which was notable" + ); + } } - } - ScannerGlobalDb::::set_highest_acknowledged_block(&mut txn, block_number); - if let Some(key_to_activate) = key_to_activate { - ScannerGlobalDb::::queue_key( - &mut txn, - block_number + S::WINDOW_LENGTH, - key_to_activate, - ); - } + ScannerGlobalDb::::set_highest_acknowledged_block(&mut txn, block_number); + if let Some(key_to_activate) = key_to_activate { + ScannerGlobalDb::::queue_key( + &mut txn, + block_number + S::WINDOW_LENGTH, + key_to_activate, + ); + } - // Return the balances for any InInstructions which failed to execute - { - let return_information = report::take_return_information::(&mut txn, batch_id) - .expect("didn't save the return information for Batch we published"); - assert_eq!( + // Return the balances for any InInstructions which failed to execute + { + let return_information = report::take_return_information::(&mut txn, batch_id) + .expect("didn't save the return information for Batch we published"); + assert_eq!( in_instruction_succeededs.len(), return_information.len(), "amount of InInstruction succeededs differed from amount of return information saved" ); - // We map these into standard Burns - for (succeeded, return_information) in - in_instruction_succeededs.into_iter().zip(return_information) - { - if succeeded { - continue; - } - - if let Some(report::ReturnInformation { address, balance }) = return_information { - burns.push(OutInstructionWithBalance { - instruction: OutInstruction { address: address.into(), data: None }, - balance, - }); + // We map these into standard Burns + for (succeeded, return_information) in + in_instruction_succeededs.into_iter().zip(return_information) + { + if succeeded { + continue; + } + + if let Some(report::ReturnInformation { address, balance }) = return_information { + burns.push(OutInstructionWithBalance { + instruction: OutInstruction { address: address.into(), data: None }, + balance, + }); + } } } - } - // We send these Burns as stemming from this block we just acknowledged - // This causes them to be acted on after we accumulate the outputs from this block - SubstrateToEventualityDb::send_burns::(&mut txn, block_number, burns); - } + // We send these Burns as stemming from this block we just acknowledged + // This causes them to be acted on after we accumulate the outputs from this block + SubstrateToEventualityDb::send_burns::(&mut txn, block_number, burns); + } - Action::QueueBurns(burns) => { - // We can instantly handle this so long as we've handled all prior actions - made_progress = true; + Action::QueueBurns(burns) => { + // We can instantly handle this so long as we've handled all prior actions + made_progress = true; - let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(&txn) - .expect("queueing Burns yet never acknowledged a block"); + let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(&txn) + .expect("queueing Burns yet never acknowledged a block"); - SubstrateToEventualityDb::send_burns::(&mut txn, queue_as_of, burns); + SubstrateToEventualityDb::send_burns::(&mut txn, queue_as_of, burns); + } } - } - txn.commit(); + txn.commit(); + } } } } diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml index 7b7ef0980..652228969 100644 --- a/processor/signers/Cargo.toml +++ b/processor/signers/Cargo.toml @@ -14,13 +14,12 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] -ignored = ["borsh", "scale"] +ignored = ["borsh"] [lints] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } rand_core = { version = "0.6", default-features = false } zeroize = { version = "1", default-features = false, features = ["std"] } diff --git a/processor/signers/src/batch/mod.rs b/processor/signers/src/batch/mod.rs index f08fb5e29..b8ad7ccb4 100644 --- a/processor/signers/src/batch/mod.rs +++ b/processor/signers/src/batch/mod.rs @@ -1,3 +1,4 @@ +use core::future::Future; use std::collections::HashSet; use ciphersuite::{group::GroupEncoding, Ristretto}; @@ -75,114 +76,115 @@ impl BatchSignerTask { } } -#[async_trait::async_trait] impl ContinuallyRan for BatchSignerTask { - async fn run_iteration(&mut self) -> Result { - let mut iterated = false; - - // Check for new batches to sign - loop { - let mut txn = self.db.txn(); - let Some(batch) = BatchesToSign::try_recv(&mut txn, &self.external_key) else { - break; - }; - iterated = true; - - // Save this to the database as a transaction to sign - self.active_signing_protocols.insert(batch.id); - ActiveSigningProtocols::set( - &mut txn, - self.session, - &self.active_signing_protocols.iter().copied().collect(), - ); - Batches::set(&mut txn, batch.id, &batch); - - let mut machines = Vec::with_capacity(self.keys.len()); - for keys in &self.keys { - machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch))); - } - for msg in self.attempt_manager.register(VariantSignId::Batch(batch.id), machines) { - BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); - } + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + // Check for new batches to sign + loop { + let mut txn = self.db.txn(); + let Some(batch) = BatchesToSign::try_recv(&mut txn, &self.external_key) else { + break; + }; + iterated = true; + + // Save this to the database as a transaction to sign + self.active_signing_protocols.insert(batch.id); + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + Batches::set(&mut txn, batch.id, &batch); + + let mut machines = Vec::with_capacity(self.keys.len()); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch))); + } + for msg in self.attempt_manager.register(VariantSignId::Batch(batch.id), machines) { + BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } - txn.commit(); - } + txn.commit(); + } - // Check for acknowledged Batches (meaning we should no longer sign for these Batches) - loop { - let mut txn = self.db.txn(); - let Some(id) = AcknowledgedBatches::try_recv(&mut txn, &self.external_key) else { - break; - }; - - { - let last_acknowledged = LastAcknowledgedBatch::get(&txn); - if Some(id) > last_acknowledged { - LastAcknowledgedBatch::set(&mut txn, &id); + // Check for acknowledged Batches (meaning we should no longer sign for these Batches) + loop { + let mut txn = self.db.txn(); + let Some(id) = AcknowledgedBatches::try_recv(&mut txn, &self.external_key) else { + break; + }; + + { + let last_acknowledged = LastAcknowledgedBatch::get(&txn); + if Some(id) > last_acknowledged { + LastAcknowledgedBatch::set(&mut txn, &id); + } } - } - /* - We may have yet to register this signing protocol. + /* + We may have yet to register this signing protocol. - While `BatchesToSign` is populated before `AcknowledgedBatches`, we could theoretically have - `BatchesToSign` populated with a new batch _while iterating over `AcknowledgedBatches`_, and - then have `AcknowledgedBatched` populated. In that edge case, we will see the - acknowledgement notification before we see the transaction. + While `BatchesToSign` is populated before `AcknowledgedBatches`, we could theoretically + have `BatchesToSign` populated with a new batch _while iterating over + `AcknowledgedBatches`_, and then have `AcknowledgedBatched` populated. In that edge case, + we will see the acknowledgement notification before we see the transaction. - In such a case, we break (dropping the txn, re-queueing the acknowledgement notification). - On the task's next iteration, we'll process the Batch from `BatchesToSign` and be - able to make progress. - */ - if !self.active_signing_protocols.remove(&id) { - break; + In such a case, we break (dropping the txn, re-queueing the acknowledgement notification). + On the task's next iteration, we'll process the Batch from `BatchesToSign` and be + able to make progress. + */ + if !self.active_signing_protocols.remove(&id) { + break; + } + iterated = true; + + // Since it was, remove this as an active signing protocol + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + // Clean up the database + Batches::del(&mut txn, id); + SignedBatches::del(&mut txn, id); + + // We retire with a txn so we either successfully flag this Batch as acknowledged, and + // won't re-register it (making this retire safe), or we don't flag it, meaning we will + // re-register it, yet that's safe as we have yet to retire it + self.attempt_manager.retire(&mut txn, VariantSignId::Batch(id)); + + txn.commit(); } - iterated = true; - - // Since it was, remove this as an active signing protocol - ActiveSigningProtocols::set( - &mut txn, - self.session, - &self.active_signing_protocols.iter().copied().collect(), - ); - // Clean up the database - Batches::del(&mut txn, id); - SignedBatches::del(&mut txn, id); - - // We retire with a txn so we either successfully flag this Batch as acknowledged, and - // won't re-register it (making this retire safe), or we don't flag it, meaning we will - // re-register it, yet that's safe as we have yet to retire it - self.attempt_manager.retire(&mut txn, VariantSignId::Batch(id)); - - txn.commit(); - } - // Handle any messages sent to us - loop { - let mut txn = self.db.txn(); - let Some(msg) = CoordinatorToBatchSignerMessages::try_recv(&mut txn, self.session) else { - break; - }; - iterated = true; - - match self.attempt_manager.handle(msg) { - Response::Messages(msgs) => { - for msg in msgs { - BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToBatchSignerMessages::try_recv(&mut txn, self.session) else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::Batch(id) = id else { panic!("BatchSignerTask signed a non-Batch") }; + let batch = + Batches::get(&txn, id).expect("signed a Batch we didn't save to the database"); + let signed_batch = SignedBatch { batch, signature: signature.into() }; + SignedBatches::set(&mut txn, signed_batch.batch.id, &signed_batch); } } - Response::Signature { id, signature } => { - let VariantSignId::Batch(id) = id else { panic!("BatchSignerTask signed a non-Batch") }; - let batch = - Batches::get(&txn, id).expect("signed a Batch we didn't save to the database"); - let signed_batch = SignedBatch { batch, signature: signature.into() }; - SignedBatches::set(&mut txn, signed_batch.batch.id, &signed_batch); - } + + txn.commit(); } - txn.commit(); + Ok(iterated) } - - Ok(iterated) } } diff --git a/processor/signers/src/coordinator/mod.rs b/processor/signers/src/coordinator/mod.rs index e749f8410..1e3c84d2a 100644 --- a/processor/signers/src/coordinator/mod.rs +++ b/processor/signers/src/coordinator/mod.rs @@ -1,3 +1,5 @@ +use core::future::Future; + use scale::Decode; use serai_db::{DbTxn, Db}; @@ -19,149 +21,157 @@ impl CoordinatorTask { } } -#[async_trait::async_trait] impl ContinuallyRan for CoordinatorTask { - async fn run_iteration(&mut self) -> Result { - let mut iterated = false; + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + for session in RegisteredKeys::get(&self.db).unwrap_or(vec![]) { + // Publish the messages generated by this key's signers + loop { + let mut txn = self.db.txn(); + let Some(msg) = CosignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; - for session in RegisteredKeys::get(&self.db).unwrap_or(vec![]) { - // Publish the messages generated by this key's signers - loop { - let mut txn = self.db.txn(); - let Some(msg) = CosignerToCoordinatorMessages::try_recv(&mut txn, session) else { - break; - }; - iterated = true; + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; - self - .coordinator - .send(msg) - .await - .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + txn.commit(); + } - txn.commit(); - } + loop { + let mut txn = self.db.txn(); + let Some(msg) = BatchSignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; - loop { - let mut txn = self.db.txn(); - let Some(msg) = BatchSignerToCoordinatorMessages::try_recv(&mut txn, session) else { - break; - }; - iterated = true; + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; - self - .coordinator - .send(msg) - .await - .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + txn.commit(); + } - txn.commit(); - } + loop { + let mut txn = self.db.txn(); + let Some(msg) = SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session) + else { + break; + }; + iterated = true; - loop { - let mut txn = self.db.txn(); - let Some(msg) = SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session) else { - break; - }; - iterated = true; + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; - self - .coordinator - .send(msg) - .await - .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + txn.commit(); + } - txn.commit(); - } + loop { + let mut txn = self.db.txn(); + let Some(msg) = TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session) + else { + break; + }; + iterated = true; - loop { - let mut txn = self.db.txn(); - let Some(msg) = TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session) else { - break; - }; - iterated = true; + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; - self - .coordinator - .send(msg) - .await - .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + txn.commit(); + } - txn.commit(); + // Publish the cosigns from this session + { + let mut txn = self.db.txn(); + while let Some(((block_number, block_id), signature)) = + Cosign::try_recv(&mut txn, session) + { + iterated = true; + self + .coordinator + .publish_cosign( + block_number, + block_id, + <_>::decode(&mut signature.as_slice()).unwrap(), + ) + .await + .map_err(|e| format!("couldn't publish Cosign: {e:?}"))?; + } + txn.commit(); + } + + // If this session signed its slash report, publish its signature + { + let mut txn = self.db.txn(); + if let Some(slash_report_signature) = SlashReportSignature::try_recv(&mut txn, session) { + iterated = true; + + self + .coordinator + .publish_slash_report_signature( + session, + <_>::decode(&mut slash_report_signature.as_slice()).unwrap(), + ) + .await + .map_err(|e| { + format!("couldn't send slash report signature to the coordinator: {e:?}") + })?; + + txn.commit(); + } + } } - // Publish the cosigns from this session + // Publish the Batches { let mut txn = self.db.txn(); - while let Some(((block_number, block_id), signature)) = Cosign::try_recv(&mut txn, session) - { + while let Some(batch) = scanner::Batches::try_recv(&mut txn) { iterated = true; self .coordinator - .publish_cosign(block_number, block_id, <_>::decode(&mut signature.as_slice()).unwrap()) + .publish_batch(batch) .await - .map_err(|e| format!("couldn't publish Cosign: {e:?}"))?; + .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; } txn.commit(); } - // If this session signed its slash report, publish its signature + // Publish the signed Batches { let mut txn = self.db.txn(); - if let Some(slash_report_signature) = SlashReportSignature::try_recv(&mut txn, session) { + // The last acknowledged Batch may exceed the last Batch we published if we didn't sign for + // the prior Batch(es) (and accordingly didn't publish them) + let last_batch = + crate::batch::last_acknowledged_batch(&txn).max(db::LastPublishedBatch::get(&txn)); + let mut next_batch = last_batch.map_or(0, |id| id + 1); + while let Some(batch) = crate::batch::signed_batch(&txn, next_batch) { iterated = true; - + db::LastPublishedBatch::set(&mut txn, &batch.batch.id); self .coordinator - .publish_slash_report_signature( - session, - <_>::decode(&mut slash_report_signature.as_slice()).unwrap(), - ) + .publish_signed_batch(batch) .await - .map_err(|e| { - format!("couldn't send slash report signature to the coordinator: {e:?}") - })?; - - txn.commit(); + .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; + next_batch += 1; } + txn.commit(); } - } - - // Publish the Batches - { - let mut txn = self.db.txn(); - while let Some(batch) = scanner::Batches::try_recv(&mut txn) { - iterated = true; - self - .coordinator - .publish_batch(batch) - .await - .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; - } - txn.commit(); - } - // Publish the signed Batches - { - let mut txn = self.db.txn(); - // The last acknowledged Batch may exceed the last Batch we published if we didn't sign for - // the prior Batch(es) (and accordingly didn't publish them) - let last_batch = - crate::batch::last_acknowledged_batch(&txn).max(db::LastPublishedBatch::get(&txn)); - let mut next_batch = last_batch.map_or(0, |id| id + 1); - while let Some(batch) = crate::batch::signed_batch(&txn, next_batch) { - iterated = true; - db::LastPublishedBatch::set(&mut txn, &batch.batch.id); - self - .coordinator - .publish_signed_batch(batch) - .await - .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; - next_batch += 1; - } - txn.commit(); + Ok(iterated) } - - Ok(iterated) } } diff --git a/processor/signers/src/cosign/mod.rs b/processor/signers/src/cosign/mod.rs index 41db80504..2de18e868 100644 --- a/processor/signers/src/cosign/mod.rs +++ b/processor/signers/src/cosign/mod.rs @@ -1,3 +1,5 @@ +use core::future::Future; + use ciphersuite::Ristretto; use frost::dkg::ThresholdKeys; @@ -48,75 +50,76 @@ impl CosignerTask { } } -#[async_trait::async_trait] impl ContinuallyRan for CosignerTask { - async fn run_iteration(&mut self) -> Result { - let mut iterated = false; - - // Check the cosign to work on - { - let mut txn = self.db.txn(); - if let Some(cosign) = ToCosign::get(&txn, self.session) { - // If this wasn't already signed for... - if LatestCosigned::get(&txn, self.session) < Some(cosign.0) { - // If this isn't the cosign we're currently working on, meaning it's fresh - if self.current_cosign != Some(cosign) { - // Retire the current cosign - if let Some(current_cosign) = self.current_cosign { - assert!(current_cosign.0 < cosign.0); - self.attempt_manager.retire(&mut txn, VariantSignId::Cosign(current_cosign.0)); - } + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + // Check the cosign to work on + { + let mut txn = self.db.txn(); + if let Some(cosign) = ToCosign::get(&txn, self.session) { + // If this wasn't already signed for... + if LatestCosigned::get(&txn, self.session) < Some(cosign.0) { + // If this isn't the cosign we're currently working on, meaning it's fresh + if self.current_cosign != Some(cosign) { + // Retire the current cosign + if let Some(current_cosign) = self.current_cosign { + assert!(current_cosign.0 < cosign.0); + self.attempt_manager.retire(&mut txn, VariantSignId::Cosign(current_cosign.0)); + } - // Set the cosign being worked on - self.current_cosign = Some(cosign); + // Set the cosign being worked on + self.current_cosign = Some(cosign); - let mut machines = Vec::with_capacity(self.keys.len()); - { - let message = cosign_block_msg(cosign.0, cosign.1); - for keys in &self.keys { - machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone())); + let mut machines = Vec::with_capacity(self.keys.len()); + { + let message = cosign_block_msg(cosign.0, cosign.1); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone())); + } + } + for msg in self.attempt_manager.register(VariantSignId::Cosign(cosign.0), machines) { + CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); } - } - for msg in self.attempt_manager.register(VariantSignId::Cosign(cosign.0), machines) { - CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); - } - txn.commit(); + txn.commit(); + } } } } - } - // Handle any messages sent to us - loop { - let mut txn = self.db.txn(); - let Some(msg) = CoordinatorToCosignerMessages::try_recv(&mut txn, self.session) else { - break; - }; - iterated = true; - - match self.attempt_manager.handle(msg) { - Response::Messages(msgs) => { - for msg in msgs { - CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToCosignerMessages::try_recv(&mut txn, self.session) else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::Cosign(block_number) = id else { + panic!("CosignerTask signed a non-Cosign") + }; + assert_eq!(Some(block_number), self.current_cosign.map(|cosign| cosign.0)); + + let cosign = self.current_cosign.take().unwrap(); + LatestCosigned::set(&mut txn, self.session, &cosign.0); + // Send the cosign + Cosign::send(&mut txn, self.session, &(cosign, Signature::from(signature).encode())); } } - Response::Signature { id, signature } => { - let VariantSignId::Cosign(block_number) = id else { - panic!("CosignerTask signed a non-Cosign") - }; - assert_eq!(Some(block_number), self.current_cosign.map(|cosign| cosign.0)); - - let cosign = self.current_cosign.take().unwrap(); - LatestCosigned::set(&mut txn, self.session, &cosign.0); - // Send the cosign - Cosign::send(&mut txn, self.session, &(cosign, Signature::from(signature).encode())); - } + + txn.commit(); } - txn.commit(); + Ok(iterated) } - - Ok(iterated) } } diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index e06dd07f9..c76fbd325 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -2,7 +2,7 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] -use core::{fmt::Debug, marker::PhantomData}; +use core::{future::Future, fmt::Debug, marker::PhantomData}; use std::collections::HashMap; use zeroize::Zeroizing; @@ -43,7 +43,6 @@ mod transaction; use transaction::TransactionSignerTask; /// A connection to the Coordinator which messages can be published with. -#[async_trait::async_trait] pub trait Coordinator: 'static + Send + Sync { /// An error encountered when interacting with a coordinator. /// @@ -52,32 +51,38 @@ pub trait Coordinator: 'static + Send + Sync { type EphemeralError: Debug; /// Send a `messages::sign::ProcessorMessage`. - async fn send(&mut self, message: ProcessorMessage) -> Result<(), Self::EphemeralError>; + fn send( + &mut self, + message: ProcessorMessage, + ) -> impl Send + Future>; /// Publish a cosign. - async fn publish_cosign( + fn publish_cosign( &mut self, block_number: u64, block_id: [u8; 32], signature: Signature, - ) -> Result<(), Self::EphemeralError>; + ) -> impl Send + Future>; /// Publish a `Batch`. - async fn publish_batch(&mut self, batch: Batch) -> Result<(), Self::EphemeralError>; + fn publish_batch(&mut self, batch: Batch) + -> impl Send + Future>; /// Publish a `SignedBatch`. - async fn publish_signed_batch(&mut self, batch: SignedBatch) -> Result<(), Self::EphemeralError>; + fn publish_signed_batch( + &mut self, + batch: SignedBatch, + ) -> impl Send + Future>; /// Publish a slash report's signature. - async fn publish_slash_report_signature( + fn publish_slash_report_signature( &mut self, session: Session, signature: Signature, - ) -> Result<(), Self::EphemeralError>; + ) -> impl Send + Future>; } /// An object capable of publishing a transaction. -#[async_trait::async_trait] pub trait TransactionPublisher: 'static + Send + Sync + Clone { /// An error encountered when publishing a transaction. /// @@ -92,7 +97,7 @@ pub trait TransactionPublisher: 'static + Send + Sync + Clone { /// /// The transaction already being present in the mempool/on-chain MUST NOT be considered an /// error. - async fn publish(&self, tx: T) -> Result<(), Self::EphemeralError>; + fn publish(&self, tx: T) -> impl Send + Future>; } struct Tasks { diff --git a/processor/signers/src/slash_report.rs b/processor/signers/src/slash_report.rs index 19a2523b9..e040798cd 100644 --- a/processor/signers/src/slash_report.rs +++ b/processor/signers/src/slash_report.rs @@ -1,4 +1,4 @@ -use core::marker::PhantomData; +use core::{marker::PhantomData, future::Future}; use ciphersuite::Ristretto; use frost::dkg::ThresholdKeys; @@ -51,70 +51,72 @@ impl SlashReportSignerTask { } } -#[async_trait::async_trait] impl ContinuallyRan for SlashReportSignerTask { - async fn run_iteration(&mut self) -> Result { - let mut iterated = false; - - // Check for the slash report to sign - if !self.has_slash_report { - let mut txn = self.db.txn(); - let Some(slash_report) = SlashReport::try_recv(&mut txn, self.session) else { - return Ok(false); - }; - // We only commit this upon successfully signing this slash report - drop(txn); - iterated = true; - - self.has_slash_report = true; - - let mut machines = Vec::with_capacity(self.keys.len()); - { - let message = report_slashes_message( - &ValidatorSet { network: S::NETWORK, session: self.session }, - &SlashReportStruct(slash_report.try_into().unwrap()), - ); - for keys in &self.keys { - machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone())); + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + // Check for the slash report to sign + if !self.has_slash_report { + let mut txn = self.db.txn(); + let Some(slash_report) = SlashReport::try_recv(&mut txn, self.session) else { + return Ok(false); + }; + // We only commit this upon successfully signing this slash report + drop(txn); + iterated = true; + + self.has_slash_report = true; + + let mut machines = Vec::with_capacity(self.keys.len()); + { + let message = report_slashes_message( + &ValidatorSet { network: S::NETWORK, session: self.session }, + &SlashReportStruct(slash_report.try_into().unwrap()), + ); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone())); + } } + let mut txn = self.db.txn(); + for msg in self.attempt_manager.register(VariantSignId::SlashReport(self.session), machines) + { + SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + txn.commit(); } - let mut txn = self.db.txn(); - for msg in self.attempt_manager.register(VariantSignId::SlashReport(self.session), machines) { - SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); - } - txn.commit(); - } - // Handle any messages sent to us - loop { - let mut txn = self.db.txn(); - let Some(msg) = CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, self.session) - else { - break; - }; - iterated = true; - - match self.attempt_manager.handle(msg) { - Response::Messages(msgs) => { - for msg in msgs { - SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, self.session) + else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::SlashReport(session) = id else { + panic!("SlashReportSignerTask signed a non-SlashReport") + }; + assert_eq!(session, self.session); + // Drain the channel + SlashReport::try_recv(&mut txn, self.session).unwrap(); + // Send the signature + SlashReportSignature::send(&mut txn, session, &Signature::from(signature).encode()); } } - Response::Signature { id, signature } => { - let VariantSignId::SlashReport(session) = id else { - panic!("SlashReportSignerTask signed a non-SlashReport") - }; - assert_eq!(session, self.session); - // Drain the channel - SlashReport::try_recv(&mut txn, self.session).unwrap(); - // Send the signature - SlashReportSignature::send(&mut txn, session, &Signature::from(signature).encode()); - } + + txn.commit(); } - txn.commit(); + Ok(iterated) } - - Ok(iterated) } } diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index b9b62e753..f089e9318 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -1,3 +1,4 @@ +use core::future::Future; use std::{ collections::HashSet, time::{Duration, Instant}, @@ -88,11 +89,10 @@ impl> } } -#[async_trait::async_trait] impl>> ContinuallyRan for TransactionSignerTask { - async fn run_iteration(&mut self) -> Result { + fn run_iteration(&mut self) -> impl Send + Future> {async{ let mut iterated = false; // Check for new transactions to sign @@ -233,3 +233,4 @@ impl> Ok(iterated) } } +} diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index 6421c499a..a40e465c8 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -71,7 +71,7 @@ pub async fn test_scanner( let block_id = block.id(); // Verify the Scanner picked them up - let verify_event = |mut scanner: ScannerHandle| async { + let verify_event = |mut scanner: ScannerHandle| async move { let outputs = match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs } => { From 3db4983e0981dd5efc679375b98624ed9ba138d8 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 13 Sep 2024 02:12:32 -0400 Subject: [PATCH 116/179] Define subaddress indexes to use (1, 0) is the external address. (2, *) are the internal addresses. --- Cargo.lock | 7 ------- processor/monero/src/primitives/block.rs | 13 +++++++++++-- processor/monero/src/primitives/mod.rs | 7 +++++++ processor/monero/src/primitives/output.rs | 16 +++++++++++++++- 4 files changed, 33 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b3419a85e..01edbcfe1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8123,7 +8123,6 @@ dependencies = [ name = "serai-bitcoin-processor" version = "0.1.0" dependencies = [ - "async-trait", "bitcoin-serai", "borsh", "ciphersuite", @@ -8349,7 +8348,6 @@ version = "0.1.0" name = "serai-ethereum-processor" version = "0.1.0" dependencies = [ - "async-trait", "borsh", "const-hex", "env_logger", @@ -8514,7 +8512,6 @@ dependencies = [ name = "serai-monero-processor" version = "0.1.0" dependencies = [ - "async-trait", "borsh", "ciphersuite", "dalek-ff-group", @@ -8644,7 +8641,6 @@ dependencies = [ name = "serai-processor-bin" version = "0.1.0" dependencies = [ - "async-trait", "borsh", "ciphersuite", "dkg", @@ -8718,7 +8714,6 @@ dependencies = [ name = "serai-processor-primitives" version = "0.1.0" dependencies = [ - "async-trait", "borsh", "group", "log", @@ -8732,7 +8727,6 @@ dependencies = [ name = "serai-processor-scanner" version = "0.1.0" dependencies = [ - "async-trait", "borsh", "group", "hex", @@ -8762,7 +8756,6 @@ dependencies = [ name = "serai-processor-signers" version = "0.1.0" dependencies = [ - "async-trait", "borsh", "ciphersuite", "frost-schnorrkel", diff --git a/processor/monero/src/primitives/block.rs b/processor/monero/src/primitives/block.rs index ad28b0c17..634a0fbbe 100644 --- a/processor/monero/src/primitives/block.rs +++ b/processor/monero/src/primitives/block.rs @@ -2,13 +2,13 @@ use std::collections::HashMap; use ciphersuite::{Ciphersuite, Ed25519}; -use monero_wallet::{transaction::Transaction, block::Block as MBlock}; +use monero_wallet::{transaction::Transaction, block::Block as MBlock, ViewPairError, GuaranteedViewPair, GuaranteedScanner}; use serai_client::networks::monero::Address; use primitives::{ReceivedOutput, EventualityTracker}; -use crate::{output::Output, transaction::Eventuality}; +use crate::{EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, output::Output, transaction::Eventuality}; #[derive(Clone, Debug)] pub(crate) struct BlockHeader(pub(crate) MBlock); @@ -37,6 +37,15 @@ impl primitives::Block for Block { } fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { + let view_pair = match GuaranteedViewPair::new(key.0, additional_key) { + Ok(view_pair) => view_pair, + Err(ViewPairError::TorsionedSpendKey) => unreachable!("dalek_ff_group::EdwardsPoint has torsion"), + }; + let mut scanner = GuaranteedScanner::new(view_pair); + scanner.register_subaddress(EXTERNAL_SUBADDRESS.unwrap()); + scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap()); + scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap()); + scanner.register_subaddress(FORWARDED_SUBADDRESS.unwrap()); todo!("TODO") } diff --git a/processor/monero/src/primitives/mod.rs b/processor/monero/src/primitives/mod.rs index fba52dd96..de0573995 100644 --- a/processor/monero/src/primitives/mod.rs +++ b/processor/monero/src/primitives/mod.rs @@ -1,3 +1,10 @@ +use monero_wallet::address::SubaddressIndex; + pub(crate) mod output; pub(crate) mod transaction; pub(crate) mod block; + +pub(crate) const EXTERNAL_SUBADDRESS: Option = SubaddressIndex::new(1, 0); +pub(crate) const BRANCH_SUBADDRESS: Option = SubaddressIndex::new(2, 0); +pub(crate) const CHANGE_SUBADDRESS: Option = SubaddressIndex::new(2, 1); +pub(crate) const FORWARDED_SUBADDRESS: Option = SubaddressIndex::new(2, 2); diff --git a/processor/monero/src/primitives/output.rs b/processor/monero/src/primitives/output.rs index d3eb3be3b..385429c2f 100644 --- a/processor/monero/src/primitives/output.rs +++ b/processor/monero/src/primitives/output.rs @@ -14,6 +14,8 @@ use serai_client::{ use primitives::{OutputType, ReceivedOutput}; +use crate::{EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS}; + #[rustfmt::skip] #[derive( Clone, Copy, PartialEq, Eq, Default, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, @@ -44,7 +46,19 @@ impl ReceivedOutput<::G, Address> for Output { type TransactionId = [u8; 32]; fn kind(&self) -> OutputType { - todo!("TODO") + if self.0.subaddress() == EXTERNAL_SUBADDRESS { + return OutputType::External; + } + if self.0.subaddress() == BRANCH_SUBADDRESS { + return OutputType::Branch; + } + if self.0.subaddress() == CHANGE_SUBADDRESS { + return OutputType::Change; + } + if self.0.subaddress() == FORWARDED_SUBADDRESS { + return OutputType::Forwarded; + } + unreachable!("scanned output to unknown subaddress"); } fn id(&self) -> Self::Id { From 68f46d1437ad4edae7d265c8d83176261378e7f6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 13 Sep 2024 05:10:37 -0400 Subject: [PATCH 117/179] cargo fmt signers/scanner --- processor/scanner/src/lib.rs | 36 ++-- processor/signers/src/lib.rs | 6 +- processor/signers/src/transaction/mod.rs | 254 ++++++++++++----------- 3 files changed, 151 insertions(+), 145 deletions(-) diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index a5c5c0387..6ac452237 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -145,25 +145,27 @@ pub trait ScannerFeed: 'static + Send + Sync + Clone { getter: &(impl Send + Sync + Get), number: u64, ) -> impl Send + Future> { - async move {let block = match self.unchecked_block_by_number(number).await { - Ok(block) => block, - Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?, - }; - - // Check the ID of this block is the expected ID - { - let expected = crate::index::block_id(getter, number); - if block.id() != expected { - panic!( - "finalized chain reorganized from {} to {} at {}", - hex::encode(expected), - hex::encode(block.id()), - number, - ); + async move { + let block = match self.unchecked_block_by_number(number).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?, + }; + + // Check the ID of this block is the expected ID + { + let expected = crate::index::block_id(getter, number); + if block.id() != expected { + panic!( + "finalized chain reorganized from {} to {} at {}", + hex::encode(expected), + hex::encode(block.id()), + number, + ); + } } - } - Ok(block)} + Ok(block) + } } /// The dust threshold for the specified coin. diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index c76fbd325..a6714fdf6 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -65,8 +65,10 @@ pub trait Coordinator: 'static + Send + Sync { ) -> impl Send + Future>; /// Publish a `Batch`. - fn publish_batch(&mut self, batch: Batch) - -> impl Send + Future>; + fn publish_batch( + &mut self, + batch: Batch, + ) -> impl Send + Future>; /// Publish a `SignedBatch`. fn publish_signed_batch( diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index f089e9318..efb202173 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -92,145 +92,147 @@ impl> impl>> ContinuallyRan for TransactionSignerTask { - fn run_iteration(&mut self) -> impl Send + Future> {async{ - let mut iterated = false; - - // Check for new transactions to sign - loop { - let mut txn = self.db.txn(); - let Some(tx) = TransactionsToSign::::try_recv(&mut txn, &self.keys[0].group_key()) else { - break; - }; - iterated = true; - - // Save this to the database as a transaction to sign - self.active_signing_protocols.insert(tx.id()); - ActiveSigningProtocols::set( - &mut txn, - self.session, - &self.active_signing_protocols.iter().copied().collect(), - ); - { - let mut buf = Vec::with_capacity(256); - tx.write(&mut buf).unwrap(); - SerializedSignableTransactions::set(&mut txn, tx.id(), &buf); - } - - let mut machines = Vec::with_capacity(self.keys.len()); - for keys in &self.keys { - machines.push(tx.clone().sign(keys.clone())); - } - for msg in self.attempt_manager.register(VariantSignId::Transaction(tx.id()), machines) { - TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); - } + fn run_iteration(&mut self) -> impl Send + Future> { + async { + let mut iterated = false; + + // Check for new transactions to sign + loop { + let mut txn = self.db.txn(); + let Some(tx) = TransactionsToSign::::try_recv(&mut txn, &self.keys[0].group_key()) + else { + break; + }; + iterated = true; + + // Save this to the database as a transaction to sign + self.active_signing_protocols.insert(tx.id()); + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + { + let mut buf = Vec::with_capacity(256); + tx.write(&mut buf).unwrap(); + SerializedSignableTransactions::set(&mut txn, tx.id(), &buf); + } - txn.commit(); - } + let mut machines = Vec::with_capacity(self.keys.len()); + for keys in &self.keys { + machines.push(tx.clone().sign(keys.clone())); + } + for msg in self.attempt_manager.register(VariantSignId::Transaction(tx.id()), machines) { + TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } - // Check for completed Eventualities (meaning we should no longer sign for these transactions) - loop { - let mut txn = self.db.txn(); - let Some(id) = CompletedEventualities::try_recv(&mut txn, &self.keys[0].group_key()) else { - break; - }; - - /* - We may have yet to register this signing protocol. - - While `TransactionsToSign` is populated before `CompletedEventualities`, we could - theoretically have `TransactionsToSign` populated with a new transaction _while iterating - over `CompletedEventualities`_, and then have `CompletedEventualities` populated. In that - edge case, we will see the completion notification before we see the transaction. - - In such a case, we break (dropping the txn, re-queueing the completion notification). On - the task's next iteration, we'll process the transaction from `TransactionsToSign` and be - able to make progress. - */ - if !self.active_signing_protocols.remove(&id) { - break; + txn.commit(); } - iterated = true; - - // Since it was, remove this as an active signing protocol - ActiveSigningProtocols::set( - &mut txn, - self.session, - &self.active_signing_protocols.iter().copied().collect(), - ); - // Clean up the database - SerializedSignableTransactions::del(&mut txn, id); - SerializedTransactions::del(&mut txn, id); - - // We retire with a txn so we either successfully flag this Eventuality as completed, and - // won't re-register it (making this retire safe), or we don't flag it, meaning we will - // re-register it, yet that's safe as we have yet to retire it - self.attempt_manager.retire(&mut txn, VariantSignId::Transaction(id)); - - txn.commit(); - } - // Handle any messages sent to us - loop { - let mut txn = self.db.txn(); - let Some(msg) = CoordinatorToTransactionSignerMessages::try_recv(&mut txn, self.session) - else { - break; - }; - iterated = true; - - match self.attempt_manager.handle(msg) { - Response::Messages(msgs) => { - for msg in msgs { - TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); - } + // Check for completed Eventualities (meaning we should no longer sign for these transactions) + loop { + let mut txn = self.db.txn(); + let Some(id) = CompletedEventualities::try_recv(&mut txn, &self.keys[0].group_key()) else { + break; + }; + + /* + We may have yet to register this signing protocol. + + While `TransactionsToSign` is populated before `CompletedEventualities`, we could + theoretically have `TransactionsToSign` populated with a new transaction _while iterating + over `CompletedEventualities`_, and then have `CompletedEventualities` populated. In that + edge case, we will see the completion notification before we see the transaction. + + In such a case, we break (dropping the txn, re-queueing the completion notification). On + the task's next iteration, we'll process the transaction from `TransactionsToSign` and be + able to make progress. + */ + if !self.active_signing_protocols.remove(&id) { + break; } - Response::Signature { id, signature: signed_tx } => { - let signed_tx: TransactionFor = signed_tx.into(); - - // Save this transaction to the database - { - let mut buf = Vec::with_capacity(256); - signed_tx.write(&mut buf).unwrap(); - SerializedTransactions::set( - &mut txn, - match id { - VariantSignId::Transaction(id) => id, - _ => panic!("TransactionSignerTask signed a non-transaction"), - }, - &buf, - ); - } + iterated = true; + + // Since it was, remove this as an active signing protocol + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + // Clean up the database + SerializedSignableTransactions::del(&mut txn, id); + SerializedTransactions::del(&mut txn, id); + + // We retire with a txn so we either successfully flag this Eventuality as completed, and + // won't re-register it (making this retire safe), or we don't flag it, meaning we will + // re-register it, yet that's safe as we have yet to retire it + self.attempt_manager.retire(&mut txn, VariantSignId::Transaction(id)); + + txn.commit(); + } - match self.publisher.publish(signed_tx).await { - Ok(()) => {} - Err(e) => log::warn!("couldn't broadcast transaction: {e:?}"), + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToTransactionSignerMessages::try_recv(&mut txn, self.session) + else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature: signed_tx } => { + let signed_tx: TransactionFor = signed_tx.into(); + + // Save this transaction to the database + { + let mut buf = Vec::with_capacity(256); + signed_tx.write(&mut buf).unwrap(); + SerializedTransactions::set( + &mut txn, + match id { + VariantSignId::Transaction(id) => id, + _ => panic!("TransactionSignerTask signed a non-transaction"), + }, + &buf, + ); + } + + match self.publisher.publish(signed_tx).await { + Ok(()) => {} + Err(e) => log::warn!("couldn't broadcast transaction: {e:?}"), + } } } + + txn.commit(); } - txn.commit(); - } + // If it's been five minutes since the last publication, republish the transactions for all + // active signing protocols + if Instant::now().duration_since(self.last_publication) > Duration::from_secs(5 * 60) { + for tx in &self.active_signing_protocols { + let Some(tx_buf) = SerializedTransactions::get(&self.db, *tx) else { continue }; + let mut tx_buf = tx_buf.as_slice(); + let tx = TransactionFor::::read(&mut tx_buf).unwrap(); + assert!(tx_buf.is_empty()); + + self + .publisher + .publish(tx) + .await + .map_err(|e| format!("couldn't re-broadcast transactions: {e:?}"))?; + } - // If it's been five minutes since the last publication, republish the transactions for all - // active signing protocols - if Instant::now().duration_since(self.last_publication) > Duration::from_secs(5 * 60) { - for tx in &self.active_signing_protocols { - let Some(tx_buf) = SerializedTransactions::get(&self.db, *tx) else { continue }; - let mut tx_buf = tx_buf.as_slice(); - let tx = TransactionFor::::read(&mut tx_buf).unwrap(); - assert!(tx_buf.is_empty()); - - self - .publisher - .publish(tx) - .await - .map_err(|e| format!("couldn't re-broadcast transactions: {e:?}"))?; + self.last_publication = Instant::now(); } - self.last_publication = Instant::now(); + Ok(iterated) } - - Ok(iterated) } } -} From 558a6d8f2c870323b0488af275b8739288d9392a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 13 Sep 2024 05:11:07 -0400 Subject: [PATCH 118/179] Monero Processor scan, check_for_eventuality_resolutions --- Cargo.lock | 3 + processor/monero/Cargo.toml | 2 + processor/monero/src/lib.rs | 283 ------------------ processor/monero/src/primitives/block.rs | 46 ++- processor/monero/src/primitives/output.rs | 2 +- .../monero/src/primitives/transaction.rs | 2 +- processor/monero/src/rpc.rs | 2 +- 7 files changed, 44 insertions(+), 296 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 01edbcfe1..b08cde037 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5105,6 +5105,7 @@ dependencies = [ "hex", "modular-frost", "monero-address", + "monero-clsag", "monero-rpc", "monero-serai", "monero-simple-request-rpc", @@ -8534,8 +8535,10 @@ dependencies = [ "serai-processor-signers", "serai-processor-utxo-scheduler", "serai-processor-utxo-scheduler-primitives", + "serai-processor-view-keys", "tokio", "zalloc", + "zeroize", ] [[package]] diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml index 22137b2db..6f9ce40a5 100644 --- a/processor/monero/Cargo.toml +++ b/processor/monero/Cargo.toml @@ -18,6 +18,7 @@ workspace = true [dependencies] rand_core = { version = "0.6", default-features = false } +zeroize = { version = "1", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } @@ -41,6 +42,7 @@ tokio = { version = "1", default-features = false, features = ["rt-multi-thread" serai-db = { path = "../../common/db" } key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } +view-keys = { package = "serai-processor-view-keys", path = "../view-keys" } primitives = { package = "serai-processor-primitives", path = "../primitives" } scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } diff --git a/processor/monero/src/lib.rs b/processor/monero/src/lib.rs index f9b334ef5..46ce16d3b 100644 --- a/processor/monero/src/lib.rs +++ b/processor/monero/src/lib.rs @@ -1,119 +1,4 @@ /* -#![cfg_attr(docsrs, feature(doc_auto_cfg))] -#![doc = include_str!("../README.md")] -#![deny(missing_docs)] - -use std::{time::Duration, collections::HashMap, io}; - -use async_trait::async_trait; - -use zeroize::Zeroizing; - -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; - -use ciphersuite::group::{ff::Field, Group}; -use dalek_ff_group::{Scalar, EdwardsPoint}; -use frost::{curve::Ed25519, ThresholdKeys}; - -use monero_simple_request_rpc::SimpleRequestRpc; -use monero_wallet::{ - ringct::RctType, - transaction::Transaction, - block::Block, - rpc::{FeeRate, RpcError, Rpc}, - address::{Network as MoneroNetwork, SubaddressIndex}, - ViewPair, GuaranteedViewPair, WalletOutput, OutputWithDecoys, GuaranteedScanner, - send::{ - SendError, Change, SignableTransaction as MSignableTransaction, Eventuality, TransactionMachine, - }, -}; -#[cfg(test)] -use monero_wallet::Scanner; - -use tokio::time::sleep; - -pub use serai_client::{ - primitives::{MAX_DATA_LEN, Coin, NetworkId, Amount, Balance}, - networks::monero::Address, -}; - -use crate::{ - Payment, additional_key, - networks::{ - NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, - Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, - }, - multisigs::scheduler::utxo::Scheduler, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Output(WalletOutput); - -const EXTERNAL_SUBADDRESS: Option = SubaddressIndex::new(0, 0); -const BRANCH_SUBADDRESS: Option = SubaddressIndex::new(1, 0); -const CHANGE_SUBADDRESS: Option = SubaddressIndex::new(2, 0); -const FORWARD_SUBADDRESS: Option = SubaddressIndex::new(3, 0); - -impl OutputTrait for Output { - // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. - // While we already are immune, thanks to using featured address, this doesn't hurt and is - // technically more efficient. - type Id = [u8; 32]; - - fn kind(&self) -> OutputType { - match self.0.subaddress() { - EXTERNAL_SUBADDRESS => OutputType::External, - BRANCH_SUBADDRESS => OutputType::Branch, - CHANGE_SUBADDRESS => OutputType::Change, - FORWARD_SUBADDRESS => OutputType::Forwarded, - _ => panic!("unrecognized address was scanned for"), - } - } - - fn id(&self) -> Self::Id { - self.0.key().compress().to_bytes() - } - - fn tx_id(&self) -> [u8; 32] { - self.0.transaction() - } - - fn key(&self) -> EdwardsPoint { - EdwardsPoint(self.0.key() - (EdwardsPoint::generator().0 * self.0.key_offset())) - } - - fn presumed_origin(&self) -> Option
{ - None - } - - fn balance(&self) -> Balance { - Balance { coin: Coin::Monero, amount: Amount(self.0.commitment().amount) } - } - - fn data(&self) -> &[u8] { - let Some(data) = self.0.arbitrary_data().first() else { return &[] }; - // If the data is too large, prune it - // This should cause decoding the instruction to fail, and trigger a refund as appropriate - if data.len() > usize::try_from(MAX_DATA_LEN).unwrap() { - return &[]; - } - data - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - self.0.write(writer)?; - Ok(()) - } - - fn read(reader: &mut R) -> io::Result { - Ok(Output(WalletOutput::read(reader)?)) - } -} - // TODO: Consider ([u8; 32], TransactionPruned) #[async_trait] impl TransactionTrait for Transaction { @@ -227,29 +112,6 @@ impl BlockTrait for Block { } } -#[derive(Clone, Debug)] -pub struct Monero { - rpc: SimpleRequestRpc, -} -// Shim required for testing/debugging purposes due to generic arguments also necessitating trait -// bounds -impl PartialEq for Monero { - fn eq(&self, _: &Self) -> bool { - true - } -} -impl Eq for Monero {} - -#[allow(clippy::needless_pass_by_value)] // Needed to satisfy API expectations -fn map_rpc_err(err: RpcError) -> NetworkError { - if let RpcError::InvalidNode(reason) = &err { - log::error!("Monero RpcError::InvalidNode({reason})"); - } else { - log::debug!("Monero RpcError {err:?}"); - } - NetworkError::ConnectionError -} - enum MakeSignableTransactionResult { Fee(u64), SignableTransaction(MSignableTransaction), @@ -461,20 +323,6 @@ impl Monero { #[async_trait] impl Network for Monero { - type Curve = Ed25519; - - type Transaction = Transaction; - type Block = Block; - - type Output = Output; - type SignableTransaction = SignableTransaction; - type Eventuality = Eventuality; - type TransactionMachine = TransactionMachine; - - type Scheduler = Scheduler; - - type Address = Address; - const NETWORK: NetworkId = NetworkId::Monero; const ID: &'static str = "Monero"; const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120; @@ -488,9 +336,6 @@ impl Network for Monero { // TODO const COST_TO_AGGREGATE: u64 = 0; - // Monero doesn't require/benefit from tweaking - fn tweak_keys(_: &mut ThresholdKeys) {} - #[cfg(test)] async fn external_address(&self, key: EdwardsPoint) -> Address { Self::address_internal(key, EXTERNAL_SUBADDRESS) @@ -508,121 +353,6 @@ impl Network for Monero { Some(Self::address_internal(key, FORWARD_SUBADDRESS)) } - async fn get_latest_block_number(&self) -> Result { - // Monero defines height as chain length, so subtract 1 for block number - Ok(self.rpc.get_height().await.map_err(map_rpc_err)? - 1) - } - - async fn get_block(&self, number: usize) -> Result { - Ok( - self - .rpc - .get_block(self.rpc.get_block_hash(number).await.map_err(map_rpc_err)?) - .await - .map_err(map_rpc_err)?, - ) - } - - async fn get_outputs(&self, block: &Block, key: EdwardsPoint) -> Vec { - let outputs = loop { - match self - .rpc - .get_scannable_block(block.clone()) - .await - .map_err(|e| format!("{e:?}")) - .and_then(|block| Self::scanner(key).scan(block).map_err(|e| format!("{e:?}"))) - { - Ok(outputs) => break outputs, - Err(e) => { - log::error!("couldn't scan block {}: {e:?}", hex::encode(block.id())); - sleep(Duration::from_secs(60)).await; - continue; - } - } - }; - - // Miner transactions are required to explicitly state their timelock, so this does exclude - // those (which have an extended timelock we don't want to deal with) - let raw_outputs = outputs.not_additionally_locked(); - let mut outputs = Vec::with_capacity(raw_outputs.len()); - for output in raw_outputs { - // This should be pointless as we shouldn't be able to scan for any other subaddress - // This just helps ensures nothing invalid makes it through - assert!([EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARD_SUBADDRESS] - .contains(&output.subaddress())); - - outputs.push(Output(output)); - } - - outputs - } - - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Block, - ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { - let mut res = HashMap::new(); - if eventualities.map.is_empty() { - return res; - } - - async fn check_block( - network: &Monero, - eventualities: &mut EventualitiesTracker, - block: &Block, - res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, - ) { - for hash in &block.transactions { - let tx = { - let mut tx; - while { - tx = network.rpc.get_transaction(*hash).await; - tx.is_err() - } { - log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap()); - sleep(Duration::from_secs(60)).await; - } - tx.unwrap() - }; - - if let Some((_, eventuality)) = eventualities.map.get(&tx.prefix().extra) { - if eventuality.matches(&tx.clone().into()) { - res.insert( - eventualities.map.remove(&tx.prefix().extra).unwrap().0, - (block.number().unwrap(), tx.id(), tx), - ); - } - } - } - - eventualities.block_number += 1; - assert_eq!(eventualities.block_number, block.number().unwrap()); - } - - for block_num in (eventualities.block_number + 1) .. block.number().unwrap() { - let block = { - let mut block; - while { - block = self.get_block(block_num).await; - block.is_err() - } { - log::error!("couldn't get block {}: {}", block_num, block.err().unwrap()); - sleep(Duration::from_secs(60)).await; - } - block.unwrap() - }; - - check_block(self, eventualities, &block, &mut res).await; - } - - // Also check the current block - check_block(self, eventualities, block, &mut res).await; - assert_eq!(eventualities.block_number, block.number().unwrap()); - - res - } - async fn needed_fee( &self, block_number: usize, @@ -687,19 +417,6 @@ impl Network for Monero { } } - async fn confirm_completion( - &self, - eventuality: &Eventuality, - id: &[u8; 32], - ) -> Result, NetworkError> { - let tx = self.rpc.get_transaction(*id).await.map_err(map_rpc_err)?; - if eventuality.matches(&tx.clone().into()) { - Ok(Some(tx)) - } else { - Ok(None) - } - } - #[cfg(test)] async fn get_block_number(&self, id: &[u8; 32]) -> usize { self.rpc.get_block(*id).await.unwrap().number().unwrap() diff --git a/processor/monero/src/primitives/block.rs b/processor/monero/src/primitives/block.rs index 634a0fbbe..62715f8cb 100644 --- a/processor/monero/src/primitives/block.rs +++ b/processor/monero/src/primitives/block.rs @@ -1,14 +1,22 @@ use std::collections::HashMap; +use zeroize::Zeroizing; + use ciphersuite::{Ciphersuite, Ed25519}; -use monero_wallet::{transaction::Transaction, block::Block as MBlock, ViewPairError, GuaranteedViewPair, GuaranteedScanner}; +use monero_wallet::{ + block::Block as MBlock, rpc::ScannableBlock as MScannableBlock, + ViewPairError, GuaranteedViewPair, ScanError, GuaranteedScanner, +}; use serai_client::networks::monero::Address; use primitives::{ReceivedOutput, EventualityTracker}; - -use crate::{EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, output::Output, transaction::Eventuality}; +use view_keys::view_key; +use crate::{ + EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, output::Output, + transaction::Eventuality, +}; #[derive(Clone, Debug)] pub(crate) struct BlockHeader(pub(crate) MBlock); @@ -22,7 +30,7 @@ impl primitives::BlockHeader for BlockHeader { } #[derive(Clone, Debug)] -pub(crate) struct Block(pub(crate) MBlock, Vec); +pub(crate) struct Block(pub(crate) MScannableBlock); impl primitives::Block for Block { type Header = BlockHeader; @@ -33,20 +41,26 @@ impl primitives::Block for Block { type Eventuality = Eventuality; fn id(&self) -> [u8; 32] { - self.0.hash() + self.0.block.hash() } fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { - let view_pair = match GuaranteedViewPair::new(key.0, additional_key) { + let view_pair = match GuaranteedViewPair::new(key.0, Zeroizing::new(*view_key::(0))) { Ok(view_pair) => view_pair, - Err(ViewPairError::TorsionedSpendKey) => unreachable!("dalek_ff_group::EdwardsPoint has torsion"), - }; + Err(ViewPairError::TorsionedSpendKey) => { + unreachable!("dalek_ff_group::EdwardsPoint had torsion") + } + }; let mut scanner = GuaranteedScanner::new(view_pair); scanner.register_subaddress(EXTERNAL_SUBADDRESS.unwrap()); scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap()); scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap()); scanner.register_subaddress(FORWARDED_SUBADDRESS.unwrap()); - todo!("TODO") + match scanner.scan(self.0.clone()) { + Ok(outputs) => outputs.not_additionally_locked().into_iter().map(Output).collect(), + Err(ScanError::UnsupportedProtocol(version)) => panic!("Monero unexpectedly hard-forked (version {version})"), + Err(ScanError::InvalidScannableBlock(reason)) => panic!("fetched an invalid scannable block from the RPC: {reason}"), + } } #[allow(clippy::type_complexity)] @@ -57,6 +71,18 @@ impl primitives::Block for Block { >::TransactionId, Self::Eventuality, > { - todo!("TODO") + let mut res = HashMap::new(); + assert_eq!(self.0.block.transactions.len(), self.0.transactions.len()); + for (hash, tx) in self.0.block.transactions.iter().zip(&self.0.transactions) { + if let Some(eventuality) = eventualities.active_eventualities.get(&tx.prefix().extra) { + if eventuality.eventuality.matches(tx) { + res.insert( + *hash, + eventualities.active_eventualities.remove(&tx.prefix().extra).unwrap(), + ); + } + } + } + res } } diff --git a/processor/monero/src/primitives/output.rs b/processor/monero/src/primitives/output.rs index 385429c2f..d66fd983e 100644 --- a/processor/monero/src/primitives/output.rs +++ b/processor/monero/src/primitives/output.rs @@ -33,7 +33,7 @@ impl AsMut<[u8]> for OutputId { } #[derive(Clone, PartialEq, Eq, Debug)] -pub(crate) struct Output(WalletOutput); +pub(crate) struct Output(pub(crate) WalletOutput); impl Output { pub(crate) fn new(output: WalletOutput) -> Self { diff --git a/processor/monero/src/primitives/transaction.rs b/processor/monero/src/primitives/transaction.rs index 1ba494719..f6765cd9b 100644 --- a/processor/monero/src/primitives/transaction.rs +++ b/processor/monero/src/primitives/transaction.rs @@ -83,7 +83,7 @@ impl scheduler::SignableTransaction for SignableTransaction { pub(crate) struct Eventuality { id: [u8; 32], singular_spent_output: Option, - eventuality: MEventuality, + pub(crate) eventuality: MEventuality, } impl primitives::Eventuality for Eventuality { diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs index 0e0739b8b..d826802b1 100644 --- a/processor/monero/src/rpc.rs +++ b/processor/monero/src/rpc.rs @@ -54,7 +54,7 @@ impl ScannerFeed for Rpc { &self, number: u64, ) -> impl Send + Future> { - async move{todo!("TODO")} + async move { todo!("TODO") } } fn unchecked_block_header_by_number( From 81651cb51dce3131a0434dc6c5e846c8f3d10965 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 13 Sep 2024 19:24:45 -0400 Subject: [PATCH 119/179] Monero time_for_block, dust --- processor/monero/src/lib.rs | 61 ------------------------ processor/monero/src/primitives/block.rs | 17 +++---- processor/monero/src/rpc.rs | 41 ++++++++++++++-- 3 files changed, 47 insertions(+), 72 deletions(-) diff --git a/processor/monero/src/lib.rs b/processor/monero/src/lib.rs index 46ce16d3b..1cde14144 100644 --- a/processor/monero/src/lib.rs +++ b/processor/monero/src/lib.rs @@ -54,64 +54,6 @@ impl SignableTransactionTrait for SignableTransaction { } } -#[async_trait] -impl BlockTrait for Block { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.hash() - } - - fn parent(&self) -> Self::Id { - self.header.previous - } - - async fn time(&self, rpc: &Monero) -> u64 { - // Constant from Monero - const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: usize = 60; - - // If Monero doesn't have enough blocks to build a window, it doesn't define a network time - if (self.number().unwrap() + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { - // Use the block number as the time - return u64::try_from(self.number().unwrap()).unwrap(); - } - - let mut timestamps = vec![self.header.timestamp]; - let mut parent = self.parent(); - while timestamps.len() < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { - let mut parent_block; - while { - parent_block = rpc.rpc.get_block(parent).await; - parent_block.is_err() - } { - log::error!("couldn't get parent block when trying to get block time: {parent_block:?}"); - sleep(Duration::from_secs(5)).await; - } - let parent_block = parent_block.unwrap(); - timestamps.push(parent_block.header.timestamp); - parent = parent_block.parent(); - - if parent_block.number().unwrap() == 0 { - break; - } - } - timestamps.sort(); - - // Because 60 has two medians, Monero's epee picks the in-between value, calculated by the - // following formula (from the "get_mid" function) - let n = timestamps.len() / 2; - let a = timestamps[n - 1]; - let b = timestamps[n]; - #[rustfmt::skip] // Enables Ctrl+F'ing for everything after the `= ` - let res = (a/2) + (b/2) + ((a - 2*(a/2)) + (b - 2*(b/2)))/2; - // Technically, res may be 1 if all prior blocks had a timestamp by 0, which would break - // monotonicity with our above definition of height as time - // Monero also solely requires the block's time not be less than the median, it doesn't ensure - // it advances the median forward - // Ensure monotonicity despite both these issues by adding the block number to the median time - res + u64::try_from(self.number().unwrap()).unwrap() - } -} - enum MakeSignableTransactionResult { Fee(u64), SignableTransaction(MSignableTransaction), @@ -330,9 +272,6 @@ impl Network for Monero { const MAX_OUTPUTS: usize = 16; - // 0.01 XMR - const DUST: u64 = 10000000000; - // TODO const COST_TO_AGGREGATE: u64 = 0; diff --git a/processor/monero/src/primitives/block.rs b/processor/monero/src/primitives/block.rs index 62715f8cb..130e5ac82 100644 --- a/processor/monero/src/primitives/block.rs +++ b/processor/monero/src/primitives/block.rs @@ -5,8 +5,8 @@ use zeroize::Zeroizing; use ciphersuite::{Ciphersuite, Ed25519}; use monero_wallet::{ - block::Block as MBlock, rpc::ScannableBlock as MScannableBlock, - ViewPairError, GuaranteedViewPair, ScanError, GuaranteedScanner, + block::Block as MBlock, rpc::ScannableBlock as MScannableBlock, ViewPairError, + GuaranteedViewPair, ScanError, GuaranteedScanner, }; use serai_client::networks::monero::Address; @@ -58,8 +58,12 @@ impl primitives::Block for Block { scanner.register_subaddress(FORWARDED_SUBADDRESS.unwrap()); match scanner.scan(self.0.clone()) { Ok(outputs) => outputs.not_additionally_locked().into_iter().map(Output).collect(), - Err(ScanError::UnsupportedProtocol(version)) => panic!("Monero unexpectedly hard-forked (version {version})"), - Err(ScanError::InvalidScannableBlock(reason)) => panic!("fetched an invalid scannable block from the RPC: {reason}"), + Err(ScanError::UnsupportedProtocol(version)) => { + panic!("Monero unexpectedly hard-forked (version {version})") + } + Err(ScanError::InvalidScannableBlock(reason)) => { + panic!("fetched an invalid scannable block from the RPC: {reason}") + } } } @@ -76,10 +80,7 @@ impl primitives::Block for Block { for (hash, tx) in self.0.block.transactions.iter().zip(&self.0.transactions) { if let Some(eventuality) = eventualities.active_eventualities.get(&tx.prefix().extra) { if eventuality.eventuality.matches(tx) { - res.insert( - *hash, - eventualities.active_eventualities.remove(&tx.prefix().extra).unwrap(), - ); + res.insert(*hash, eventualities.active_eventualities.remove(&tx.prefix().extra).unwrap()); } } } diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs index d826802b1..9244b23f3 100644 --- a/processor/monero/src/rpc.rs +++ b/processor/monero/src/rpc.rs @@ -54,7 +54,38 @@ impl ScannerFeed for Rpc { &self, number: u64, ) -> impl Send + Future> { - async move { todo!("TODO") } + async move { + // Constant from Monero + const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; + + // If Monero doesn't have enough blocks to build a window, it doesn't define a network time + if (number + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { + return Ok(0); + } + + // Fetch all the timestamps within the window + let block_for_time_of = self.rpc.get_block_by_number(number.try_into().unwrap()).await?; + let mut timestamps = vec![block_for_time_of.header.timestamp]; + let mut parent = block_for_time_of.header.previous; + for _ in 1 .. BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { + let parent_block = self.rpc.get_block(parent).await?; + timestamps.push(parent_block.header.timestamp); + parent = parent_block.header.previous; + } + timestamps.sort(); + + // Because there are two timestamps equidistance from the ends, Monero's epee picks the + // in-between value, calculated by the following formula (from the "get_mid" function) + let n = timestamps.len() / 2; + let a = timestamps[n - 1]; + let b = timestamps[n]; + #[rustfmt::skip] // Enables Ctrl+F'ing for everything after the `= ` + let res = (a/2) + (b/2) + ((a - 2*(a/2)) + (b - 2*(b/2)))/2; + + // Monero does check that the new block's time is greater than the median, causing the median + // to be monotonic + Ok(res) + } } fn unchecked_block_header_by_number( @@ -66,17 +97,21 @@ impl ScannerFeed for Rpc { async move { Ok(BlockHeader(self.rpc.get_block_by_number(number.try_into().unwrap()).await?)) } } + #[rustfmt::skip] // It wants to improperly format the `async move` to a single line fn unchecked_block_by_number( &self, number: u64, ) -> impl Send + Future> { - async move { todo!("TODO") } + async move { + Ok(Block(self.rpc.get_scannable_block_by_number(number.try_into().unwrap()).await?)) + } } fn dust(coin: Coin) -> Amount { assert_eq!(coin, Coin::Monero); - todo!("TODO") + // 0.01 XMR + Amount(10_000_000_000) } fn cost_to_aggregate( From 4bfeb9de32595170ce9934de67d1a222f609633b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 13 Sep 2024 23:51:53 -0400 Subject: [PATCH 120/179] Add a database of all Monero outs into the processor Enables synchronous transaction creation (which requires synchronous decoy selection). --- Cargo.lock | 1 + networks/monero/rpc/src/lib.rs | 6 +- processor/monero/Cargo.toml | 1 + processor/monero/src/decoys.rs | 294 ++++++++++++++++++++++++++++++ processor/monero/src/lib.rs | 140 -------------- processor/monero/src/main.rs | 2 + processor/monero/src/rpc.rs | 27 +-- processor/monero/src/scheduler.rs | 142 +++++++++++++++ 8 files changed, 457 insertions(+), 156 deletions(-) create mode 100644 processor/monero/src/decoys.rs diff --git a/Cargo.lock b/Cargo.lock index b08cde037..9e34ea3c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8515,6 +8515,7 @@ version = "0.1.0" dependencies = [ "borsh", "ciphersuite", + "curve25519-dalek", "dalek-ff-group", "dkg", "flexible-transcript", diff --git a/networks/monero/rpc/src/lib.rs b/networks/monero/rpc/src/lib.rs index 4c5055ccc..3c8d337a9 100644 --- a/networks/monero/rpc/src/lib.rs +++ b/networks/monero/rpc/src/lib.rs @@ -249,7 +249,7 @@ fn rpc_point(point: &str) -> Result { /// While no implementors are directly provided, [monero-simple-request-rpc]( /// https://github.com/serai-dex/serai/tree/develop/networks/monero/rpc/simple-request /// ) is recommended. -pub trait Rpc: Sync + Clone + Debug { +pub trait Rpc: Sync + Clone { /// Perform a POST request to the specified route with the specified body. /// /// The implementor is left to handle anything such as authentication. @@ -1003,10 +1003,10 @@ pub trait Rpc: Sync + Clone + Debug { /// An implementation is provided for any satisfier of `Rpc`. It is not recommended to use an `Rpc` /// object to satisfy this. This should be satisfied by a local store of the output distribution, /// both for performance and to prevent potential attacks a remote node can perform. -pub trait DecoyRpc: Sync + Clone + Debug { +pub trait DecoyRpc: Sync { /// Get the height the output distribution ends at. /// - /// This is equivalent to the hight of the blockchain it's for. This is intended to be cheaper + /// This is equivalent to the height of the blockchain it's for. This is intended to be cheaper /// than fetching the entire output distribution. fn get_output_distribution_end_height( &self, diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml index 6f9ce40a5..436f327e5 100644 --- a/processor/monero/Cargo.toml +++ b/processor/monero/Cargo.toml @@ -25,6 +25,7 @@ scale = { package = "parity-scale-codec", version = "3", default-features = fals borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } +curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] } dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ed25519"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ed25519"] } diff --git a/processor/monero/src/decoys.rs b/processor/monero/src/decoys.rs new file mode 100644 index 000000000..000463d00 --- /dev/null +++ b/processor/monero/src/decoys.rs @@ -0,0 +1,294 @@ +use core::{ + future::Future, + ops::{Bound, RangeBounds}, +}; + +use curve25519_dalek::{ + scalar::Scalar, + edwards::{CompressedEdwardsY, EdwardsPoint}, +}; +use monero_wallet::{ + DEFAULT_LOCK_WINDOW, + primitives::Commitment, + transaction::{Timelock, Input, Pruned, Transaction}, + rpc::{OutputInformation, RpcError, Rpc as MRpcTrait, DecoyRpc}, +}; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, Db, create_db}; + +use primitives::task::ContinuallyRan; +use scanner::ScannerFeed; + +use crate::Rpc; + +#[derive(BorshSerialize, BorshDeserialize)] +struct EncodableOutputInformation { + height: u64, + timelocked: bool, + key: [u8; 32], + commitment: [u8; 32], +} + +create_db! { + MoneroProcessorDecoys { + NextToIndexBlock: () -> u64, + PriorIndexedBlock: () -> [u8; 32], + DistributionStartBlock: () -> u64, + Distribution: () -> Vec, + Out: (index: u64) -> EncodableOutputInformation, + } +} + +/* + We want to be able to select decoys when planning transactions, but planning transactions is a + synchronous process. We store the decoys to a local database and have our database implement + `DecoyRpc` to achieve synchronous decoy selection. + + This is only needed as the transactions we sign must have decoys decided and agreed upon. With + FCMP++s, we'll be able to sign transactions without the membership proof, letting any signer + prove for membership after the fact (with their local views). Until then, this task remains. +*/ +pub(crate) struct DecoysTask { + pub(crate) rpc: Rpc, + pub(crate) current_distribution: Vec, +} + +impl ContinuallyRan for DecoysTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let finalized_block_number = self + .rpc + .rpc + .get_height() + .await + .map_err(|e| format!("couldn't fetch latest block number: {e:?}"))? + .checked_sub(Rpc::::CONFIRMATIONS.try_into().unwrap()) + .ok_or(format!( + "blockchain only just started and doesn't have {} blocks yet", + Rpc::::CONFIRMATIONS + ))?; + + if NextToIndexBlock::get(&self.rpc.db).is_none() { + let distribution = self + .rpc + .rpc + .get_output_distribution(..= finalized_block_number) + .await + .map_err(|e| format!("failed to get output distribution: {e:?}"))?; + if distribution.is_empty() { + Err("distribution was empty".to_string())?; + } + + let distribution_start_block = finalized_block_number - (distribution.len() - 1); + // There may have been a reorg between the time of getting the distribution and the time of + // getting this block. This is an invariant and assumed not to have happened in the split + // second it's possible. + let block = self + .rpc + .rpc + .get_block_by_number(distribution_start_block) + .await + .map_err(|e| format!("failed to get the start block for the distribution: {e:?}"))?; + + let mut txn = self.rpc.db.txn(); + NextToIndexBlock::set(&mut txn, &distribution_start_block.try_into().unwrap()); + PriorIndexedBlock::set(&mut txn, &block.header.previous); + DistributionStartBlock::set(&mut txn, &u64::try_from(distribution_start_block).unwrap()); + txn.commit(); + } + + let next_to_index_block = + usize::try_from(NextToIndexBlock::get(&self.rpc.db).unwrap()).unwrap(); + if next_to_index_block >= finalized_block_number { + return Ok(false); + } + + for b in next_to_index_block ..= finalized_block_number { + // Fetch the block + let block = self + .rpc + .rpc + .get_block_by_number(b) + .await + .map_err(|e| format!("decoys task failed to fetch block: {e:?}"))?; + let prior = PriorIndexedBlock::get(&self.rpc.db).unwrap(); + if block.header.previous != prior { + panic!( + "decoys task detected reorg: expected {}, found {}", + hex::encode(prior), + hex::encode(block.header.previous) + ); + } + + // Fetch the transactions in the block + let transactions = self + .rpc + .rpc + .get_pruned_transactions(&block.transactions) + .await + .map_err(|e| format!("failed to get the pruned transactions within a block: {e:?}"))?; + + fn outputs( + list: &mut Vec, + block_number: u64, + tx: Transaction, + ) { + match tx { + Transaction::V1 { .. } => {} + Transaction::V2 { prefix, proofs } => { + for (i, output) in prefix.outputs.into_iter().enumerate() { + list.push(EncodableOutputInformation { + // This is correct per the documentation on OutputInformation, which this maps to + height: block_number, + timelocked: prefix.additional_timelock != Timelock::None, + key: output.key.to_bytes(), + commitment: if matches!( + prefix.inputs.first().expect("Monero transaction had no inputs"), + Input::Gen(_) + ) { + Commitment::new( + Scalar::ONE, + output.amount.expect("miner transaction outputs didn't have amounts set"), + ) + .calculate() + .compress() + .to_bytes() + } else { + proofs + .as_ref() + .expect("non-miner V2 transaction didn't have proofs") + .base + .commitments + .get(i) + .expect("amount of commitments didn't match amount of outputs") + .compress() + .to_bytes() + }, + }); + } + } + } + } + + let block_hash = block.hash(); + + let b = u64::try_from(b).unwrap(); + let mut encodable = Vec::with_capacity(2 * (1 + block.transactions.len())); + outputs(&mut encodable, b, block.miner_transaction.into()); + for transaction in transactions { + outputs(&mut encodable, b, transaction); + } + + let existing_outputs = self.current_distribution.last().copied().unwrap_or(0); + let now_outputs = existing_outputs + u64::try_from(encodable.len()).unwrap(); + self.current_distribution.push(now_outputs); + + let mut txn = self.rpc.db.txn(); + NextToIndexBlock::set(&mut txn, &(b + 1)); + PriorIndexedBlock::set(&mut txn, &block_hash); + // TODO: Don't write the entire 10 MB distribution to the DB every two minutes + Distribution::set(&mut txn, &self.current_distribution); + for (b, out) in (existing_outputs .. now_outputs).zip(encodable) { + Out::set(&mut txn, b, &out); + } + txn.commit(); + } + Ok(true) + } + } +} + +// TODO: Cache the distribution in a static +pub(crate) struct Decoys<'a, G: Get>(&'a G); +impl<'a, G: Sync + Get> DecoyRpc for Decoys<'a, G> { + #[rustfmt::skip] + fn get_output_distribution_end_height( + &self, + ) -> impl Send + Future> { + async move { + Ok(NextToIndexBlock::get(self.0).map_or(0, |b| usize::try_from(b).unwrap() + 1)) + } + } + fn get_output_distribution( + &self, + range: impl Send + RangeBounds, + ) -> impl Send + Future, RpcError>> { + async move { + let from = match range.start_bound() { + Bound::Included(from) => *from, + Bound::Excluded(from) => from.checked_add(1).ok_or_else(|| { + RpcError::InternalError("range's from wasn't representable".to_string()) + })?, + Bound::Unbounded => 0, + }; + let to = match range.end_bound() { + Bound::Included(to) => *to, + Bound::Excluded(to) => to + .checked_sub(1) + .ok_or_else(|| RpcError::InternalError("range's to wasn't representable".to_string()))?, + Bound::Unbounded => { + panic!("requested distribution till latest block, which is non-deterministic") + } + }; + if from > to { + Err(RpcError::InternalError(format!( + "malformed range: inclusive start {from}, inclusive end {to}" + )))?; + } + + let distribution_start_block = usize::try_from( + DistributionStartBlock::get(self.0).expect("never populated the distribution start block"), + ) + .unwrap(); + let len_of_distribution_until_to = + to.checked_sub(distribution_start_block).ok_or_else(|| { + RpcError::InternalError( + "requested distribution until a block when the distribution had yet to start" + .to_string(), + ) + })? + + 1; + let distribution = Distribution::get(self.0).expect("never populated the distribution"); + assert!( + distribution.len() >= len_of_distribution_until_to, + "requested distribution until block we have yet to index" + ); + Ok( + distribution[from.saturating_sub(distribution_start_block) .. len_of_distribution_until_to] + .to_vec(), + ) + } + } + fn get_outs( + &self, + _indexes: &[u64], + ) -> impl Send + Future, RpcError>> { + async move { unimplemented!("get_outs is unused") } + } + fn get_unlocked_outputs( + &self, + indexes: &[u64], + height: usize, + fingerprintable_deterministic: bool, + ) -> impl Send + Future>, RpcError>> { + assert!(fingerprintable_deterministic, "processor wasn't using deterministic output selection"); + async move { + let mut res = vec![]; + for index in indexes { + let out = Out::get(self.0, *index).expect("requested output we didn't index"); + let unlocked = (!out.timelocked) && + ((usize::try_from(out.height).unwrap() + DEFAULT_LOCK_WINDOW) <= height); + res.push(unlocked.then(|| CompressedEdwardsY(out.key).decompress()).flatten().map(|key| { + [ + key, + CompressedEdwardsY(out.commitment) + .decompress() + .expect("output with invalid commitment"), + ] + })); + } + Ok(res) + } + } +} diff --git a/processor/monero/src/lib.rs b/processor/monero/src/lib.rs index 1cde14144..52ebb6cbe 100644 --- a/processor/monero/src/lib.rs +++ b/processor/monero/src/lib.rs @@ -107,146 +107,6 @@ impl Monero { Ok(FeeRate::new(fee.max(MINIMUM_FEE), 10000).unwrap()) } - async fn make_signable_transaction( - &self, - block_number: usize, - plan_id: &[u8; 32], - inputs: &[Output], - payments: &[Payment], - change: &Option
, - calculating_fee: bool, - ) -> Result, NetworkError> { - for payment in payments { - assert_eq!(payment.balance.coin, Coin::Monero); - } - - // TODO2: Use an fee representative of several blocks, cached inside Self - let block_for_fee = self.get_block(block_number).await?; - let fee_rate = self.median_fee(&block_for_fee).await?; - - // Determine the RCT proofs to make based off the hard fork - // TODO: Make a fn for this block which is duplicated with tests - let rct_type = match block_for_fee.header.hardfork_version { - 14 => RctType::ClsagBulletproof, - 15 | 16 => RctType::ClsagBulletproofPlus, - _ => panic!("Monero hard forked and the processor wasn't updated for it"), - }; - - let mut transcript = - RecommendedTranscript::new(b"Serai Processor Monero Transaction Transcript"); - transcript.append_message(b"plan", plan_id); - - // All signers need to select the same decoys - // All signers use the same height and a seeded RNG to make sure they do so. - let mut inputs_actual = Vec::with_capacity(inputs.len()); - for input in inputs { - inputs_actual.push( - OutputWithDecoys::fingerprintable_deterministic_new( - &mut ChaCha20Rng::from_seed(transcript.rng_seed(b"decoys")), - &self.rpc, - // TODO: Have Decoys take RctType - match rct_type { - RctType::ClsagBulletproof => 11, - RctType::ClsagBulletproofPlus => 16, - _ => panic!("selecting decoys for an unsupported RctType"), - }, - block_number + 1, - input.0.clone(), - ) - .await - .map_err(map_rpc_err)?, - ); - } - - // Monero requires at least two outputs - // If we only have one output planned, add a dummy payment - let mut payments = payments.to_vec(); - let outputs = payments.len() + usize::from(u8::from(change.is_some())); - if outputs == 0 { - return Ok(None); - } else if outputs == 1 { - payments.push(Payment { - address: Address::new( - ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0)) - .unwrap() - .legacy_address(MoneroNetwork::Mainnet), - ) - .unwrap(), - balance: Balance { coin: Coin::Monero, amount: Amount(0) }, - data: None, - }); - } - - let payments = payments - .into_iter() - .map(|payment| (payment.address.into(), payment.balance.amount.0)) - .collect::>(); - - match MSignableTransaction::new( - rct_type, - // Use the plan ID as the outgoing view key - Zeroizing::new(*plan_id), - inputs_actual, - payments, - Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), - vec![], - fee_rate, - ) { - Ok(signable) => Ok(Some({ - if calculating_fee { - MakeSignableTransactionResult::Fee(signable.necessary_fee()) - } else { - MakeSignableTransactionResult::SignableTransaction(signable) - } - })), - Err(e) => match e { - SendError::UnsupportedRctType => { - panic!("trying to use an RctType unsupported by monero-wallet") - } - SendError::NoInputs | - SendError::InvalidDecoyQuantity | - SendError::NoOutputs | - SendError::TooManyOutputs | - SendError::NoChange | - SendError::TooMuchArbitraryData | - SendError::TooLargeTransaction | - SendError::WrongPrivateKey => { - panic!("created an invalid Monero transaction: {e}"); - } - SendError::MultiplePaymentIds => { - panic!("multiple payment IDs despite not supporting integrated addresses"); - } - SendError::NotEnoughFunds { inputs, outputs, necessary_fee } => { - log::debug!( - "Monero NotEnoughFunds. inputs: {:?}, outputs: {:?}, necessary_fee: {necessary_fee:?}", - inputs, - outputs - ); - match necessary_fee { - Some(necessary_fee) => { - // If we're solely calculating the fee, return the fee this TX will cost - if calculating_fee { - Ok(Some(MakeSignableTransactionResult::Fee(necessary_fee))) - } else { - // If we're actually trying to make the TX, return None - Ok(None) - } - } - // We didn't have enough funds to even cover the outputs - None => { - // Ensure we're not misinterpreting this - assert!(outputs > inputs); - Ok(None) - } - } - } - SendError::MaliciousSerialization | SendError::ClsagError(_) | SendError::FrostError(_) => { - panic!("supposedly unreachable (at this time) Monero error: {e}"); - } - }, - } - } - #[cfg(test)] fn test_view_pair() -> ViewPair { ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::ONE.0)).unwrap() diff --git a/processor/monero/src/main.rs b/processor/monero/src/main.rs index eda24b566..5b32e0f1f 100644 --- a/processor/monero/src/main.rs +++ b/processor/monero/src/main.rs @@ -15,6 +15,8 @@ mod key_gen; use crate::key_gen::KeyGenParams; mod rpc; use rpc::Rpc; + +mod decoys; /* mod scheduler; use scheduler::Scheduler; diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs index 9244b23f3..58e6cf8bf 100644 --- a/processor/monero/src/rpc.rs +++ b/processor/monero/src/rpc.rs @@ -5,6 +5,7 @@ use monero_simple_request_rpc::SimpleRequestRpc; use serai_client::primitives::{NetworkId, Coin, Amount}; +use serai_db::Db; use scanner::ScannerFeed; use signers::TransactionPublisher; @@ -14,11 +15,12 @@ use crate::{ }; #[derive(Clone)] -pub(crate) struct Rpc { +pub(crate) struct Rpc { + pub(crate) db: D, pub(crate) rpc: SimpleRequestRpc, } -impl ScannerFeed for Rpc { +impl ScannerFeed for Rpc { const NETWORK: NetworkId = NetworkId::Monero; // Outputs aren't spendable until 10 blocks later due to the 10-block lock // Since we assumed scanned outputs are spendable, that sets a minimum confirmation depth of 10 @@ -37,16 +39,15 @@ impl ScannerFeed for Rpc { &self, ) -> impl Send + Future> { async move { - Ok( - self - .rpc - .get_height() - .await? - .checked_sub(1) - .expect("connected to an invalid Monero RPC") - .try_into() - .unwrap(), - ) + // The decoys task only indexes finalized blocks + crate::decoys::NextToIndexBlock::get(&self.db) + .ok_or_else(|| { + RpcError::InternalError("decoys task hasn't indexed any blocks yet".to_string()) + })? + .checked_sub(1) + .ok_or_else(|| { + RpcError::InternalError("only the genesis block has been indexed".to_string()) + }) } } @@ -127,7 +128,7 @@ impl ScannerFeed for Rpc { } } -impl TransactionPublisher for Rpc { +impl TransactionPublisher for Rpc { type EphemeralError = RpcError; fn publish( diff --git a/processor/monero/src/scheduler.rs b/processor/monero/src/scheduler.rs index 25f17c641..7666ec4f8 100644 --- a/processor/monero/src/scheduler.rs +++ b/processor/monero/src/scheduler.rs @@ -1,3 +1,144 @@ +async fn make_signable_transaction( +block_number: usize, +plan_id: &[u8; 32], +inputs: &[Output], +payments: &[Payment], +change: &Option
, +calculating_fee: bool, +) -> Result, NetworkError> { +for payment in payments { + assert_eq!(payment.balance.coin, Coin::Monero); +} + +// TODO2: Use an fee representative of several blocks, cached inside Self +let block_for_fee = self.get_block(block_number).await?; +let fee_rate = self.median_fee(&block_for_fee).await?; + +// Determine the RCT proofs to make based off the hard fork +// TODO: Make a fn for this block which is duplicated with tests +let rct_type = match block_for_fee.header.hardfork_version { + 14 => RctType::ClsagBulletproof, + 15 | 16 => RctType::ClsagBulletproofPlus, + _ => panic!("Monero hard forked and the processor wasn't updated for it"), +}; + +let mut transcript = + RecommendedTranscript::new(b"Serai Processor Monero Transaction Transcript"); +transcript.append_message(b"plan", plan_id); + +// All signers need to select the same decoys +// All signers use the same height and a seeded RNG to make sure they do so. +let mut inputs_actual = Vec::with_capacity(inputs.len()); +for input in inputs { + inputs_actual.push( + OutputWithDecoys::fingerprintable_deterministic_new( + &mut ChaCha20Rng::from_seed(transcript.rng_seed(b"decoys")), + &self.rpc, + // TODO: Have Decoys take RctType + match rct_type { + RctType::ClsagBulletproof => 11, + RctType::ClsagBulletproofPlus => 16, + _ => panic!("selecting decoys for an unsupported RctType"), + }, + block_number + 1, + input.0.clone(), + ) + .await + .map_err(map_rpc_err)?, + ); +} + +// Monero requires at least two outputs +// If we only have one output planned, add a dummy payment +let mut payments = payments.to_vec(); +let outputs = payments.len() + usize::from(u8::from(change.is_some())); +if outputs == 0 { + return Ok(None); +} else if outputs == 1 { + payments.push(Payment { + address: Address::new( + ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0)) + .unwrap() + .legacy_address(MoneroNetwork::Mainnet), + ) + .unwrap(), + balance: Balance { coin: Coin::Monero, amount: Amount(0) }, + data: None, + }); +} + +let payments = payments + .into_iter() + .map(|payment| (payment.address.into(), payment.balance.amount.0)) + .collect::>(); + +match MSignableTransaction::new( + rct_type, + // Use the plan ID as the outgoing view key + Zeroizing::new(*plan_id), + inputs_actual, + payments, + Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), + vec![], + fee_rate, +) { + Ok(signable) => Ok(Some({ + if calculating_fee { + MakeSignableTransactionResult::Fee(signable.necessary_fee()) + } else { + MakeSignableTransactionResult::SignableTransaction(signable) + } + })), + Err(e) => match e { + SendError::UnsupportedRctType => { + panic!("trying to use an RctType unsupported by monero-wallet") + } + SendError::NoInputs | + SendError::InvalidDecoyQuantity | + SendError::NoOutputs | + SendError::TooManyOutputs | + SendError::NoChange | + SendError::TooMuchArbitraryData | + SendError::TooLargeTransaction | + SendError::WrongPrivateKey => { + panic!("created an invalid Monero transaction: {e}"); + } + SendError::MultiplePaymentIds => { + panic!("multiple payment IDs despite not supporting integrated addresses"); + } + SendError::NotEnoughFunds { inputs, outputs, necessary_fee } => { + log::debug!( + "Monero NotEnoughFunds. inputs: {:?}, outputs: {:?}, necessary_fee: {necessary_fee:?}", + inputs, + outputs + ); + match necessary_fee { + Some(necessary_fee) => { + // If we're solely calculating the fee, return the fee this TX will cost + if calculating_fee { + Ok(Some(MakeSignableTransactionResult::Fee(necessary_fee))) + } else { + // If we're actually trying to make the TX, return None + Ok(None) + } + } + // We didn't have enough funds to even cover the outputs + None => { + // Ensure we're not misinterpreting this + assert!(outputs > inputs); + Ok(None) + } + } + } + SendError::MaliciousSerialization | SendError::ClsagError(_) | SendError::FrostError(_) => { + panic!("supposedly unreachable (at this time) Monero error: {e}"); + } + }, +} +} + + +/* use ciphersuite::{Ciphersuite, Secp256k1}; use bitcoin_serai::{ @@ -186,3 +327,4 @@ impl TransactionPlanner for Planner { } pub(crate) type Scheduler = utxo_standard_scheduler::Scheduler; +*/ From 47e9333c1037e548aef1bc3b76333387b829b9d6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 01:09:35 -0400 Subject: [PATCH 121/179] Allow scheduler's creation of transactions to be async and error I don't love this, but it's the only way to select decoys without using a local database. While the prior commit added such a databse, the performance of it presumably wasn't viable, and while TODOs marked the needed improvements, it was still messy with an immense scope re: any auditing. The relevant scheduler functions now take `&self` (intentional, as all mutations should be via the `&mut impl DbTxn` passed). The calls to `&self` are expected to be completely deterministic (as usual). --- processor/bin/src/lib.rs | 25 +- processor/bitcoin/src/main.rs | 4 +- processor/bitcoin/src/scheduler.rs | 89 ++-- processor/monero/src/decoys.rs | 294 ------------ processor/monero/src/main.rs | 1 - processor/monero/src/rpc.rs | 27 +- processor/scanner/src/eventuality/mod.rs | 44 +- processor/scanner/src/lib.rs | 28 +- .../scheduler/utxo/primitives/src/lib.rs | 238 +++++----- processor/scheduler/utxo/standard/src/lib.rs | 439 ++++++++++-------- .../utxo/transaction-chaining/src/lib.rs | 368 ++++++++------- 11 files changed, 713 insertions(+), 844 deletions(-) delete mode 100644 processor/monero/src/decoys.rs diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs index 67ea61507..7758b1ead 100644 --- a/processor/bin/src/lib.rs +++ b/processor/bin/src/lib.rs @@ -15,7 +15,7 @@ use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel}; use primitives::EncodableG; use ::key_gen::{KeyGenParams, KeyGen}; -use scheduler::SignableTransaction; +use scheduler::{SignableTransaction, TransactionFor}; use scanner::{ScannerFeed, Scanner, KeyFor, Scheduler}; use signers::{TransactionPublisher, Signers}; @@ -161,22 +161,23 @@ async fn first_block_after_time(feed: &S, serai_time: u64) -> u6 pub async fn main_loop< S: ScannerFeed, K: KeyGenParams>>, - Sch: Scheduler< - S, - SignableTransaction: SignableTransaction, - >, - P: TransactionPublisher<::Transaction>, + Sch: Clone + + Scheduler< + S, + SignableTransaction: SignableTransaction, + >, >( mut db: Db, feed: S, - publisher: P, + scheduler: Sch, + publisher: impl TransactionPublisher>, ) { let mut coordinator = Coordinator::new(db.clone()); let mut key_gen = key_gen::(); - let mut scanner = Scanner::new::(db.clone(), feed.clone()).await; + let mut scanner = Scanner::new(db.clone(), feed.clone(), scheduler.clone()).await; let mut signers = - Signers::::new(db.clone(), coordinator.coordinator_send(), publisher); + Signers::::new(db.clone(), coordinator.coordinator_send(), publisher); loop { let db_clone = db.clone(); @@ -242,8 +243,10 @@ pub async fn main_loop< if session == Session(0) { assert!(scanner.is_none()); let start_block = first_block_after_time(&feed, serai_time).await; - scanner = - Some(Scanner::initialize::(db_clone, feed.clone(), start_block, key.0).await); + scanner = Some( + Scanner::initialize(db_clone, feed.clone(), scheduler.clone(), start_block, key.0) + .await, + ); } } messages::substrate::CoordinatorMessage::SlashesReported { session } => { diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index 56bfd619a..d029ad8b5 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -22,7 +22,7 @@ use crate::key_gen::KeyGenParams; mod rpc; use rpc::Rpc; mod scheduler; -use scheduler::Scheduler; +use scheduler::{Planner, Scheduler}; // Our custom code for Bitcoin mod db; @@ -57,7 +57,7 @@ async fn main() { tokio::spawn(TxIndexTask(feed.clone()).continually_run(index_task, vec![])); core::mem::forget(index_handle); - bin::main_loop::<_, KeyGenParams, Scheduler<_>, Rpc>(db, feed.clone(), feed).await; + bin::main_loop::<_, KeyGenParams, _>(db, feed.clone(), Scheduler::new(Planner), feed).await; } /* diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs index 6e49d23d9..b6554bdae 100644 --- a/processor/bitcoin/src/scheduler.rs +++ b/processor/bitcoin/src/scheduler.rs @@ -1,3 +1,5 @@ +use core::future::Future; + use ciphersuite::{Ciphersuite, Secp256k1}; use bitcoin_serai::{ @@ -89,8 +91,10 @@ fn signable_transaction( .map(|bst| (SignableTransaction { inputs, payments, change, fee_per_vbyte }, bst)) } +#[derive(Clone)] pub(crate) struct Planner; impl TransactionPlanner, EffectedReceivedOutputs>> for Planner { + type EphemeralError = (); type FeeRate = u64; type SignableTransaction = SignableTransaction; @@ -153,50 +157,59 @@ impl TransactionPlanner, EffectedReceivedOutputs>> for Plan } fn plan( + &self, fee_rate: Self::FeeRate, inputs: Vec>>, payments: Vec>>>, change: Option>>, - ) -> PlannedTransaction, Self::SignableTransaction, EffectedReceivedOutputs>> { - let key = inputs.first().unwrap().key(); - for input in &inputs { - assert_eq!(key, input.key()); - } + ) -> impl Send + + Future< + Output = Result< + PlannedTransaction, Self::SignableTransaction, EffectedReceivedOutputs>>, + Self::EphemeralError, + >, + > { + async move { + let key = inputs.first().unwrap().key(); + for input in &inputs { + assert_eq!(key, input.key()); + } - let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); - match signable_transaction::(fee_rate, inputs.clone(), payments, change) { - Ok(tx) => PlannedTransaction { - signable: tx.0, - eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, - auxilliary: EffectedReceivedOutputs({ - let tx = tx.1.transaction(); - let scanner = scanner(key); - - let mut res = vec![]; - for output in scanner.scan_transaction(tx) { - res.push(Output::new_with_presumed_origin( - key, - tx, - // It shouldn't matter if this is wrong as we should never try to return these - // We still provide an accurate value to ensure a lack of discrepancies - Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()), - output, - )); - } - res + let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); + match signable_transaction::(fee_rate, inputs.clone(), payments, change) { + Ok(tx) => Ok(PlannedTransaction { + signable: tx.0, + eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, + auxilliary: EffectedReceivedOutputs({ + let tx = tx.1.transaction(); + let scanner = scanner(key); + + let mut res = vec![]; + for output in scanner.scan_transaction(tx) { + res.push(Output::new_with_presumed_origin( + key, + tx, + // It shouldn't matter if this is wrong as we should never try to return these + // We still provide an accurate value to ensure a lack of discrepancies + Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()), + output, + )); + } + res + }), }), - }, - Err( - TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, - ) => panic!("malformed arguments to plan"), - // No data, we have a minimum fee rate, we checked the amount of inputs/outputs - Err( - TransactionError::TooMuchData | - TransactionError::TooLowFee | - TransactionError::TooLargeTransaction, - ) => unreachable!(), - Err(TransactionError::NotEnoughFunds { .. }) => { - panic!("plan called for a transaction without enough funds") + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to plan"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { .. }) => { + panic!("plan called for a transaction without enough funds") + } } } } diff --git a/processor/monero/src/decoys.rs b/processor/monero/src/decoys.rs deleted file mode 100644 index 000463d00..000000000 --- a/processor/monero/src/decoys.rs +++ /dev/null @@ -1,294 +0,0 @@ -use core::{ - future::Future, - ops::{Bound, RangeBounds}, -}; - -use curve25519_dalek::{ - scalar::Scalar, - edwards::{CompressedEdwardsY, EdwardsPoint}, -}; -use monero_wallet::{ - DEFAULT_LOCK_WINDOW, - primitives::Commitment, - transaction::{Timelock, Input, Pruned, Transaction}, - rpc::{OutputInformation, RpcError, Rpc as MRpcTrait, DecoyRpc}, -}; - -use borsh::{BorshSerialize, BorshDeserialize}; -use serai_db::{Get, DbTxn, Db, create_db}; - -use primitives::task::ContinuallyRan; -use scanner::ScannerFeed; - -use crate::Rpc; - -#[derive(BorshSerialize, BorshDeserialize)] -struct EncodableOutputInformation { - height: u64, - timelocked: bool, - key: [u8; 32], - commitment: [u8; 32], -} - -create_db! { - MoneroProcessorDecoys { - NextToIndexBlock: () -> u64, - PriorIndexedBlock: () -> [u8; 32], - DistributionStartBlock: () -> u64, - Distribution: () -> Vec, - Out: (index: u64) -> EncodableOutputInformation, - } -} - -/* - We want to be able to select decoys when planning transactions, but planning transactions is a - synchronous process. We store the decoys to a local database and have our database implement - `DecoyRpc` to achieve synchronous decoy selection. - - This is only needed as the transactions we sign must have decoys decided and agreed upon. With - FCMP++s, we'll be able to sign transactions without the membership proof, letting any signer - prove for membership after the fact (with their local views). Until then, this task remains. -*/ -pub(crate) struct DecoysTask { - pub(crate) rpc: Rpc, - pub(crate) current_distribution: Vec, -} - -impl ContinuallyRan for DecoysTask { - fn run_iteration(&mut self) -> impl Send + Future> { - async move { - let finalized_block_number = self - .rpc - .rpc - .get_height() - .await - .map_err(|e| format!("couldn't fetch latest block number: {e:?}"))? - .checked_sub(Rpc::::CONFIRMATIONS.try_into().unwrap()) - .ok_or(format!( - "blockchain only just started and doesn't have {} blocks yet", - Rpc::::CONFIRMATIONS - ))?; - - if NextToIndexBlock::get(&self.rpc.db).is_none() { - let distribution = self - .rpc - .rpc - .get_output_distribution(..= finalized_block_number) - .await - .map_err(|e| format!("failed to get output distribution: {e:?}"))?; - if distribution.is_empty() { - Err("distribution was empty".to_string())?; - } - - let distribution_start_block = finalized_block_number - (distribution.len() - 1); - // There may have been a reorg between the time of getting the distribution and the time of - // getting this block. This is an invariant and assumed not to have happened in the split - // second it's possible. - let block = self - .rpc - .rpc - .get_block_by_number(distribution_start_block) - .await - .map_err(|e| format!("failed to get the start block for the distribution: {e:?}"))?; - - let mut txn = self.rpc.db.txn(); - NextToIndexBlock::set(&mut txn, &distribution_start_block.try_into().unwrap()); - PriorIndexedBlock::set(&mut txn, &block.header.previous); - DistributionStartBlock::set(&mut txn, &u64::try_from(distribution_start_block).unwrap()); - txn.commit(); - } - - let next_to_index_block = - usize::try_from(NextToIndexBlock::get(&self.rpc.db).unwrap()).unwrap(); - if next_to_index_block >= finalized_block_number { - return Ok(false); - } - - for b in next_to_index_block ..= finalized_block_number { - // Fetch the block - let block = self - .rpc - .rpc - .get_block_by_number(b) - .await - .map_err(|e| format!("decoys task failed to fetch block: {e:?}"))?; - let prior = PriorIndexedBlock::get(&self.rpc.db).unwrap(); - if block.header.previous != prior { - panic!( - "decoys task detected reorg: expected {}, found {}", - hex::encode(prior), - hex::encode(block.header.previous) - ); - } - - // Fetch the transactions in the block - let transactions = self - .rpc - .rpc - .get_pruned_transactions(&block.transactions) - .await - .map_err(|e| format!("failed to get the pruned transactions within a block: {e:?}"))?; - - fn outputs( - list: &mut Vec, - block_number: u64, - tx: Transaction, - ) { - match tx { - Transaction::V1 { .. } => {} - Transaction::V2 { prefix, proofs } => { - for (i, output) in prefix.outputs.into_iter().enumerate() { - list.push(EncodableOutputInformation { - // This is correct per the documentation on OutputInformation, which this maps to - height: block_number, - timelocked: prefix.additional_timelock != Timelock::None, - key: output.key.to_bytes(), - commitment: if matches!( - prefix.inputs.first().expect("Monero transaction had no inputs"), - Input::Gen(_) - ) { - Commitment::new( - Scalar::ONE, - output.amount.expect("miner transaction outputs didn't have amounts set"), - ) - .calculate() - .compress() - .to_bytes() - } else { - proofs - .as_ref() - .expect("non-miner V2 transaction didn't have proofs") - .base - .commitments - .get(i) - .expect("amount of commitments didn't match amount of outputs") - .compress() - .to_bytes() - }, - }); - } - } - } - } - - let block_hash = block.hash(); - - let b = u64::try_from(b).unwrap(); - let mut encodable = Vec::with_capacity(2 * (1 + block.transactions.len())); - outputs(&mut encodable, b, block.miner_transaction.into()); - for transaction in transactions { - outputs(&mut encodable, b, transaction); - } - - let existing_outputs = self.current_distribution.last().copied().unwrap_or(0); - let now_outputs = existing_outputs + u64::try_from(encodable.len()).unwrap(); - self.current_distribution.push(now_outputs); - - let mut txn = self.rpc.db.txn(); - NextToIndexBlock::set(&mut txn, &(b + 1)); - PriorIndexedBlock::set(&mut txn, &block_hash); - // TODO: Don't write the entire 10 MB distribution to the DB every two minutes - Distribution::set(&mut txn, &self.current_distribution); - for (b, out) in (existing_outputs .. now_outputs).zip(encodable) { - Out::set(&mut txn, b, &out); - } - txn.commit(); - } - Ok(true) - } - } -} - -// TODO: Cache the distribution in a static -pub(crate) struct Decoys<'a, G: Get>(&'a G); -impl<'a, G: Sync + Get> DecoyRpc for Decoys<'a, G> { - #[rustfmt::skip] - fn get_output_distribution_end_height( - &self, - ) -> impl Send + Future> { - async move { - Ok(NextToIndexBlock::get(self.0).map_or(0, |b| usize::try_from(b).unwrap() + 1)) - } - } - fn get_output_distribution( - &self, - range: impl Send + RangeBounds, - ) -> impl Send + Future, RpcError>> { - async move { - let from = match range.start_bound() { - Bound::Included(from) => *from, - Bound::Excluded(from) => from.checked_add(1).ok_or_else(|| { - RpcError::InternalError("range's from wasn't representable".to_string()) - })?, - Bound::Unbounded => 0, - }; - let to = match range.end_bound() { - Bound::Included(to) => *to, - Bound::Excluded(to) => to - .checked_sub(1) - .ok_or_else(|| RpcError::InternalError("range's to wasn't representable".to_string()))?, - Bound::Unbounded => { - panic!("requested distribution till latest block, which is non-deterministic") - } - }; - if from > to { - Err(RpcError::InternalError(format!( - "malformed range: inclusive start {from}, inclusive end {to}" - )))?; - } - - let distribution_start_block = usize::try_from( - DistributionStartBlock::get(self.0).expect("never populated the distribution start block"), - ) - .unwrap(); - let len_of_distribution_until_to = - to.checked_sub(distribution_start_block).ok_or_else(|| { - RpcError::InternalError( - "requested distribution until a block when the distribution had yet to start" - .to_string(), - ) - })? + - 1; - let distribution = Distribution::get(self.0).expect("never populated the distribution"); - assert!( - distribution.len() >= len_of_distribution_until_to, - "requested distribution until block we have yet to index" - ); - Ok( - distribution[from.saturating_sub(distribution_start_block) .. len_of_distribution_until_to] - .to_vec(), - ) - } - } - fn get_outs( - &self, - _indexes: &[u64], - ) -> impl Send + Future, RpcError>> { - async move { unimplemented!("get_outs is unused") } - } - fn get_unlocked_outputs( - &self, - indexes: &[u64], - height: usize, - fingerprintable_deterministic: bool, - ) -> impl Send + Future>, RpcError>> { - assert!(fingerprintable_deterministic, "processor wasn't using deterministic output selection"); - async move { - let mut res = vec![]; - for index in indexes { - let out = Out::get(self.0, *index).expect("requested output we didn't index"); - let unlocked = (!out.timelocked) && - ((usize::try_from(out.height).unwrap() + DEFAULT_LOCK_WINDOW) <= height); - res.push(unlocked.then(|| CompressedEdwardsY(out.key).decompress()).flatten().map(|key| { - [ - key, - CompressedEdwardsY(out.commitment) - .decompress() - .expect("output with invalid commitment"), - ] - })); - } - Ok(res) - } - } -} diff --git a/processor/monero/src/main.rs b/processor/monero/src/main.rs index 5b32e0f1f..344b6c487 100644 --- a/processor/monero/src/main.rs +++ b/processor/monero/src/main.rs @@ -16,7 +16,6 @@ use crate::key_gen::KeyGenParams; mod rpc; use rpc::Rpc; -mod decoys; /* mod scheduler; use scheduler::Scheduler; diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs index 58e6cf8bf..9244b23f3 100644 --- a/processor/monero/src/rpc.rs +++ b/processor/monero/src/rpc.rs @@ -5,7 +5,6 @@ use monero_simple_request_rpc::SimpleRequestRpc; use serai_client::primitives::{NetworkId, Coin, Amount}; -use serai_db::Db; use scanner::ScannerFeed; use signers::TransactionPublisher; @@ -15,12 +14,11 @@ use crate::{ }; #[derive(Clone)] -pub(crate) struct Rpc { - pub(crate) db: D, +pub(crate) struct Rpc { pub(crate) rpc: SimpleRequestRpc, } -impl ScannerFeed for Rpc { +impl ScannerFeed for Rpc { const NETWORK: NetworkId = NetworkId::Monero; // Outputs aren't spendable until 10 blocks later due to the 10-block lock // Since we assumed scanned outputs are spendable, that sets a minimum confirmation depth of 10 @@ -39,15 +37,16 @@ impl ScannerFeed for Rpc { &self, ) -> impl Send + Future> { async move { - // The decoys task only indexes finalized blocks - crate::decoys::NextToIndexBlock::get(&self.db) - .ok_or_else(|| { - RpcError::InternalError("decoys task hasn't indexed any blocks yet".to_string()) - })? - .checked_sub(1) - .ok_or_else(|| { - RpcError::InternalError("only the genesis block has been indexed".to_string()) - }) + Ok( + self + .rpc + .get_height() + .await? + .checked_sub(1) + .expect("connected to an invalid Monero RPC") + .try_into() + .unwrap(), + ) } } @@ -128,7 +127,7 @@ impl ScannerFeed for Rpc { } } -impl TransactionPublisher for Rpc { +impl TransactionPublisher for Rpc { type EphemeralError = RpcError; fn publish( diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 46a5e13b3..99fea2fbd 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, future::Future}; +use core::future::Future; use std::collections::{HashSet, HashMap}; use group::GroupEncoding; @@ -102,11 +102,11 @@ fn intake_eventualities( pub(crate) struct EventualityTask> { db: D, feed: S, - scheduler: PhantomData, + scheduler: Sch, } impl> EventualityTask { - pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self { + pub(crate) fn new(mut db: D, feed: S, scheduler: Sch, start_block: u64) -> Self { if EventualityDb::::next_to_check_for_eventualities_block(&db).is_none() { // Initialize the DB let mut txn = db.txn(); @@ -114,7 +114,7 @@ impl> EventualityTask { txn.commit(); } - Self { db, feed, scheduler: PhantomData } + Self { db, feed, scheduler } } #[allow(clippy::type_complexity)] @@ -167,15 +167,19 @@ impl> EventualityTask { { intaked_any = true; - let new_eventualities = Sch::fulfill( - &mut txn, - &block, - &keys_with_stages, - burns - .into_iter() - .filter_map(|burn| Payment::>::try_from(burn).ok()) - .collect(), - ); + let new_eventualities = self + .scheduler + .fulfill( + &mut txn, + &block, + &keys_with_stages, + burns + .into_iter() + .filter_map(|burn| Payment::>::try_from(burn).ok()) + .collect(), + ) + .await + .map_err(|e| format!("failed to queue fulfilling payments: {e:?}"))?; intake_eventualities::(&mut txn, new_eventualities); } txn.commit(); @@ -443,8 +447,11 @@ impl> ContinuallyRan for EventualityTas determined off an earlier block than this (enabling an earlier LifetimeStage to be used after a later one was already used). */ - let new_eventualities = - Sch::update(&mut txn, &block, &keys_with_stages, scheduler_update); + let new_eventualities = self + .scheduler + .update(&mut txn, &block, &keys_with_stages, scheduler_update) + .await + .map_err(|e| format!("failed to update scheduler: {e:?}"))?; // Intake the new Eventualities for key in new_eventualities.keys() { keys @@ -464,8 +471,11 @@ impl> ContinuallyRan for EventualityTas key.key != keys.last().unwrap().key, "key which was forwarding was the last key (which has no key after it to forward to)" ); - let new_eventualities = - Sch::flush_key(&mut txn, &block, key.key, keys.last().unwrap().key); + let new_eventualities = self + .scheduler + .flush_key(&mut txn, &block, key.key, keys.last().unwrap().key) + .await + .map_err(|e| format!("failed to flush key from scheduler: {e:?}"))?; intake_eventualities::(&mut txn, new_eventualities); } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 6ac452237..1b6afaa91 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -256,8 +256,17 @@ impl SchedulerUpdate { } } +/// Eventualities, keyed by the encoding of the key the Eventualities are for. +pub type KeyScopedEventualities = HashMap, Vec>>; + /// The object responsible for accumulating outputs and planning new transactions. pub trait Scheduler: 'static + Send { + /// An error encountered when handling updates/payments. + /// + /// This MUST be an ephemeral error. Retrying handling updates/payments MUST eventually + /// resolve without manual intervention/changing the arguments. + type EphemeralError: Debug; + /// The type for a signable transaction. type SignableTransaction: scheduler_primitives::SignableTransaction; @@ -278,11 +287,12 @@ pub trait Scheduler: 'static + Send { /// If the retiring key has any unfulfilled payments associated with it, those MUST be made /// the responsibility of the new key. fn flush_key( + &self, txn: &mut impl DbTxn, block: &BlockFor, retiring_key: KeyFor, new_key: KeyFor, - ) -> HashMap, Vec>>; + ) -> impl Send + Future, Self::EphemeralError>>; /// Retire a key as it'll no longer be used. /// @@ -300,11 +310,12 @@ pub trait Scheduler: 'static + Send { /// The `Vec` used as the key in the returned HashMap should be the encoded key the /// Eventualities are for. fn update( + &self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], update: SchedulerUpdate, - ) -> HashMap, Vec>>; + ) -> impl Send + Future, Self::EphemeralError>>; /// Fulfill a series of payments, yielding the Eventualities now to be scanned for. /// @@ -339,11 +350,12 @@ pub trait Scheduler: 'static + Send { has an output-to-Serai, the new primary output). */ fn fulfill( + &self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], payments: Vec>>, - ) -> HashMap, Vec>>; + ) -> impl Send + Future, Self::EphemeralError>>; } /// A representation of a scanner. @@ -358,14 +370,15 @@ impl Scanner { /// This will begin its execution, spawning several asynchronous tasks. /// /// This will return None if the Scanner was never initialized. - pub async fn new>(db: impl Db, feed: S) -> Option { + pub async fn new(db: impl Db, feed: S, scheduler: impl Scheduler) -> Option { let start_block = ScannerGlobalDb::::start_block(&db)?; let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); let report_task = report::ReportTask::<_, S>::new(db.clone(), start_block); let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone()); - let eventuality_task = eventuality::EventualityTask::<_, _, Sch>::new(db, feed, start_block); + let eventuality_task = + eventuality::EventualityTask::<_, _, _>::new(db, feed, scheduler, start_block); let (index_task_def, _index_handle) = Task::new(); let (scan_task_def, scan_handle) = Task::new(); @@ -394,9 +407,10 @@ impl Scanner { /// This will begin its execution, spawning several asynchronous tasks. /// /// This passes through to `Scanner::new` if prior called. - pub async fn initialize>( + pub async fn initialize( mut db: impl Db, feed: S, + scheduler: impl Scheduler, start_block: u64, start_key: KeyFor, ) -> Self { @@ -407,7 +421,7 @@ impl Scanner { txn.commit(); } - Self::new::(db, feed).await.unwrap() + Self::new(db, feed, scheduler).await.unwrap() } /// Acknowledge a Batch having been published on Serai. diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index e48221a1c..00b2d10f5 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -2,6 +2,8 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] +use core::{fmt::Debug, future::Future}; + use serai_primitives::{Coin, Amount}; use primitives::{ReceivedOutput, Payment}; @@ -40,8 +42,14 @@ pub struct AmortizePlannedTransaction: 'static + Send + Sync { + /// An error encountered when handling planning transactions. + /// + /// This MUST be an ephemeral error. Retrying planning transactions MUST eventually resolve + /// resolve manual intervention/changing the arguments. + type EphemeralError: Debug; + /// The type representing a fee rate to use for transactions. - type FeeRate: Clone + Copy; + type FeeRate: Send + Clone + Copy; /// The type representing a signable transaction. type SignableTransaction: SignableTransaction; @@ -82,11 +90,15 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// `change` will always be an address belonging to the Serai network. If it is `Some`, a change /// output must be created. fn plan( + &self, fee_rate: Self::FeeRate, inputs: Vec>, payments: Vec>>, change: Option>, - ) -> PlannedTransaction; + ) -> impl Send + + Future< + Output = Result, Self::EphemeralError>, + >; /// Obtain a PlannedTransaction via amortizing the fee over the payments. /// @@ -98,132 +110,142 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// Returns `None` if the fee exceeded the inputs, or `Some` otherwise. // TODO: Enum for Change of None, Some, Mandatory fn plan_transaction_with_fee_amortization( + &self, operating_costs: &mut u64, fee_rate: Self::FeeRate, inputs: Vec>, mut payments: Vec>>, mut change: Option>, - ) -> Option> { - // If there's no change output, we can't recoup any operating costs we would amortize - // We also don't have any losses if the inputs are written off/the change output is reduced - let mut operating_costs_if_no_change = 0; - let operating_costs_in_effect = - if change.is_none() { &mut operating_costs_if_no_change } else { operating_costs }; + ) -> impl Send + + Future< + Output = Result< + Option>, + Self::EphemeralError, + >, + > { + async move { + // If there's no change output, we can't recoup any operating costs we would amortize + // We also don't have any losses if the inputs are written off/the change output is reduced + let mut operating_costs_if_no_change = 0; + let operating_costs_in_effect = + if change.is_none() { &mut operating_costs_if_no_change } else { operating_costs }; - // Sanity checks - { - assert!(!inputs.is_empty()); - assert!((!payments.is_empty()) || change.is_some()); - let coin = inputs.first().unwrap().balance().coin; - for input in &inputs { - assert_eq!(coin, input.balance().coin); - } - for payment in &payments { - assert_eq!(coin, payment.balance().coin); + // Sanity checks + { + assert!(!inputs.is_empty()); + assert!((!payments.is_empty()) || change.is_some()); + let coin = inputs.first().unwrap().balance().coin; + for input in &inputs { + assert_eq!(coin, input.balance().coin); + } + for payment in &payments { + assert_eq!(coin, payment.balance().coin); + } + assert!( + (inputs.iter().map(|input| input.balance().amount.0).sum::() + + *operating_costs_in_effect) >= + payments.iter().map(|payment| payment.balance().amount.0).sum::(), + "attempted to fulfill payments without a sufficient input set" + ); } - assert!( - (inputs.iter().map(|input| input.balance().amount.0).sum::() + - *operating_costs_in_effect) >= - payments.iter().map(|payment| payment.balance().amount.0).sum::(), - "attempted to fulfill payments without a sufficient input set" - ); - } - let coin = inputs.first().unwrap().balance().coin; + let coin = inputs.first().unwrap().balance().coin; - // Amortization - { - // Sort payments from high amount to low amount - payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); + // Amortization + { + // Sort payments from high amount to low amount + payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); - let mut fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0; - let mut amortized = 0; - while !payments.is_empty() { - // We need to pay the fee, and any accrued operating costs, minus what we've already - // amortized - let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); + let mut fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0; + let mut amortized = 0; + while !payments.is_empty() { + // We need to pay the fee, and any accrued operating costs, minus what we've already + // amortized + let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); - /* - Ideally, we wouldn't use a ceil div yet would be accurate about it. Any remainder could - be amortized over the largest outputs, which wouldn't be relevant here as we only work - with the smallest output. The issue is the theoretical edge case where all outputs have - the same value and are of the minimum value. In that case, none would be able to have the - remainder amortized as it'd cause them to need to be dropped. Using a ceil div avoids - this. - */ - let per_payment_fee = adjusted_fee.div_ceil(u64::try_from(payments.len()).unwrap()); - // Pop the last payment if it can't pay the fee, remaining about the dust limit as it does - if payments.last().unwrap().balance().amount.0 <= (per_payment_fee + S::dust(coin).0) { - amortized += payments.pop().unwrap().balance().amount.0; - // Recalculate the fee and try again - fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0; - continue; + /* + Ideally, we wouldn't use a ceil div yet would be accurate about it. Any remainder could + be amortized over the largest outputs, which wouldn't be relevant here as we only work + with the smallest output. The issue is the theoretical edge case where all outputs have + the same value and are of the minimum value. In that case, none would be able to have + the remainder amortized as it'd cause them to need to be dropped. Using a ceil div + avoids this. + */ + let per_payment_fee = adjusted_fee.div_ceil(u64::try_from(payments.len()).unwrap()); + // Pop the last payment if it can't pay the fee, remaining about the dust limit as it does + if payments.last().unwrap().balance().amount.0 <= (per_payment_fee + S::dust(coin).0) { + amortized += payments.pop().unwrap().balance().amount.0; + // Recalculate the fee and try again + fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0; + continue; + } + // Break since all of these payments shouldn't be dropped + break; } - // Break since all of these payments shouldn't be dropped - break; - } - // If we couldn't amortize the fee over the payments, check if we even have enough to pay it - if payments.is_empty() { - // If we don't have a change output, we simply return here - // We no longer have anything to do here, nor any expectations - if change.is_none() { - None?; - } + // If we couldn't amortize the fee over the payments, check if we even have enough to pay it + if payments.is_empty() { + // If we don't have a change output, we simply return here + // We no longer have anything to do here, nor any expectations + if change.is_none() { + return Ok(None); + } - let inputs = inputs.iter().map(|input| input.balance().amount.0).sum::(); - // Checks not just if we can pay for it, yet that the would-be change output is at least - // dust - if inputs < (fee + S::dust(coin).0) { - // Write off these inputs - *operating_costs_in_effect += inputs; - // Yet also claw back the payments we dropped, as we only lost the change - // The dropped payments will be worth less than the inputs + operating_costs we started - // with, so this shouldn't use `saturating_sub` - *operating_costs_in_effect -= amortized; - None?; - } - } else { - // Since we have payments which can pay the fee we ended up with, amortize it - let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); - let per_payment_base_fee = adjusted_fee / u64::try_from(payments.len()).unwrap(); - let payments_paying_one_atomic_unit_more = - usize::try_from(adjusted_fee % u64::try_from(payments.len()).unwrap()).unwrap(); + let inputs = inputs.iter().map(|input| input.balance().amount.0).sum::(); + // Checks not just if we can pay for it, yet that the would-be change output is at least + // dust + if inputs < (fee + S::dust(coin).0) { + // Write off these inputs + *operating_costs_in_effect += inputs; + // Yet also claw back the payments we dropped, as we only lost the change + // The dropped payments will be worth less than the inputs + operating_costs we started + // with, so this shouldn't use `saturating_sub` + *operating_costs_in_effect -= amortized; + return Ok(None); + } + } else { + // Since we have payments which can pay the fee we ended up with, amortize it + let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); + let per_payment_base_fee = adjusted_fee / u64::try_from(payments.len()).unwrap(); + let payments_paying_one_atomic_unit_more = + usize::try_from(adjusted_fee % u64::try_from(payments.len()).unwrap()).unwrap(); - for (i, payment) in payments.iter_mut().enumerate() { - let per_payment_fee = - per_payment_base_fee + u64::from(u8::from(i < payments_paying_one_atomic_unit_more)); - payment.balance().amount.0 -= per_payment_fee; - amortized += per_payment_fee; - } - assert!(amortized >= (*operating_costs_in_effect + fee)); + for (i, payment) in payments.iter_mut().enumerate() { + let per_payment_fee = + per_payment_base_fee + u64::from(u8::from(i < payments_paying_one_atomic_unit_more)); + payment.balance().amount.0 -= per_payment_fee; + amortized += per_payment_fee; + } + assert!(amortized >= (*operating_costs_in_effect + fee)); - // If the change is less than the dust, drop it - let would_be_change = inputs.iter().map(|input| input.balance().amount.0).sum::() - - payments.iter().map(|payment| payment.balance().amount.0).sum::() - - fee; - if would_be_change < S::dust(coin).0 { - change = None; - *operating_costs_in_effect += would_be_change; + // If the change is less than the dust, drop it + let would_be_change = inputs.iter().map(|input| input.balance().amount.0).sum::() - + payments.iter().map(|payment| payment.balance().amount.0).sum::() - + fee; + if would_be_change < S::dust(coin).0 { + change = None; + *operating_costs_in_effect += would_be_change; + } } + + // Update the amount of operating costs + *operating_costs_in_effect = (*operating_costs_in_effect + fee).saturating_sub(amortized); } - // Update the amount of operating costs - *operating_costs_in_effect = (*operating_costs_in_effect + fee).saturating_sub(amortized); - } + // Because we amortized, or accrued as operating costs, the fee, make the transaction + let effected_payments = payments.iter().map(|payment| payment.balance().amount).collect(); + let has_change = change.is_some(); - // Because we amortized, or accrued as operating costs, the fee, make the transaction - let effected_payments = payments.iter().map(|payment| payment.balance().amount).collect(); - let has_change = change.is_some(); - let PlannedTransaction { signable, eventuality, auxilliary } = - Self::plan(fee_rate, inputs, payments, change); - Some(AmortizePlannedTransaction { - effected_payments, - has_change, - signable, - eventuality, - auxilliary, - }) + let PlannedTransaction { signable, eventuality, auxilliary } = + self.plan(fee_rate, inputs, payments, change).await?; + Ok(Some(AmortizePlannedTransaction { + effected_payments, + has_change, + signable, + eventuality, + auxilliary, + })) + } } /// Create a tree to fulfill a set of payments. diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs index 3ae855e73..5ff786a73 100644 --- a/processor/scheduler/utxo/standard/src/lib.rs +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -2,7 +2,7 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] -use core::marker::PhantomData; +use core::{marker::PhantomData, future::Future}; use std::collections::HashMap; use group::GroupEncoding; @@ -14,7 +14,7 @@ use serai_db::DbTxn; use primitives::{ReceivedOutput, Payment}; use scanner::{ LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor, - SchedulerUpdate, Scheduler as SchedulerTrait, + SchedulerUpdate, KeyScopedEventualities, Scheduler as SchedulerTrait, }; use scheduler_primitives::*; use utxo_scheduler_primitives::*; @@ -23,16 +23,27 @@ mod db; use db::Db; /// A scheduler of transactions for networks premised on the UTXO model. -pub struct Scheduler>(PhantomData, PhantomData

); +#[allow(non_snake_case)] +#[derive(Clone)] +pub struct Scheduler> { + planner: P, + _S: PhantomData, +} impl> Scheduler { - fn aggregate_inputs( + /// Create a new scheduler. + pub fn new(planner: P) -> Self { + Self { planner, _S: PhantomData } + } + + async fn aggregate_inputs( + &self, txn: &mut impl DbTxn, block: &BlockFor, key_for_change: KeyFor, key: KeyFor, coin: Coin, - ) -> Vec> { + ) -> Result>, >::EphemeralError> { let mut eventualities = vec![]; let mut operating_costs = Db::::operating_costs(txn, coin).0; @@ -41,13 +52,17 @@ impl> Scheduler { while outputs.len() > P::MAX_INPUTS { let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::>(); - let Some(planned) = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - P::fee_rate(block, coin), - to_aggregate, - vec![], - Some(key_for_change), - ) else { + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + to_aggregate, + vec![], + Some(key_for_change), + ) + .await? + else { continue; }; @@ -57,7 +72,7 @@ impl> Scheduler { Db::::set_outputs(txn, key, coin, &outputs); Db::::set_operating_costs(txn, coin, Amount(operating_costs)); - eventualities + Ok(eventualities) } fn fulfillable_payments( @@ -140,31 +155,36 @@ impl> Scheduler { } } - fn handle_branch( + async fn handle_branch( + &self, txn: &mut impl DbTxn, block: &BlockFor, eventualities: &mut Vec>, output: OutputFor, tx: TreeTransaction>, - ) -> bool { + ) -> Result>::EphemeralError> { let key = output.key(); let coin = output.balance().coin; let Some(payments) = tx.payments::(coin, &P::branch_address(key), output.balance().amount.0) else { // If this output has become too small to satisfy this branch, drop it - return false; + return Ok(false); }; - let Some(planned) = P::plan_transaction_with_fee_amortization( - // Uses 0 as there's no operating costs to incur/amortize here - &mut 0, - P::fee_rate(block, coin), - vec![output], - payments, - None, - ) else { + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + // Uses 0 as there's no operating costs to incur/amortize here + &mut 0, + P::fee_rate(block, coin), + vec![output], + payments, + None, + ) + .await? + else { // This Branch isn't viable, so drop it (and its children) - return false; + return Ok(false); }; TransactionsToSign::::send(txn, &key, &planned.signable); @@ -172,15 +192,16 @@ impl> Scheduler { Self::queue_branches(txn, key, coin, planned.effected_payments, tx); - true + Ok(true) } - fn step( + async fn step( + &self, txn: &mut impl DbTxn, active_keys: &[(KeyFor, LifetimeStage)], block: &BlockFor, key: KeyFor, - ) -> Vec> { + ) -> Result>, >::EphemeralError> { let mut eventualities = vec![]; let key_for_change = match active_keys[0].1 { @@ -198,7 +219,8 @@ impl> Scheduler { let coin = *coin; // Perform any input aggregation we should - eventualities.append(&mut Self::aggregate_inputs(txn, block, key_for_change, key, coin)); + eventualities + .append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?); // Fetch the operating costs/outputs let mut operating_costs = Db::::operating_costs(txn, coin).0; @@ -228,15 +250,19 @@ impl> Scheduler { // scanner API) let mut planned_outer = None; for i in 0 .. 2 { - let Some(planned) = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - P::fee_rate(block, coin), - outputs.clone(), - tree[0] - .payments::(coin, &branch_address, tree[0].value()) - .expect("payments were dropped despite providing an input of the needed value"), - Some(key_for_change), - ) else { + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + outputs.clone(), + tree[0] + .payments::(coin, &branch_address, tree[0].value()) + .expect("payments were dropped despite providing an input of the needed value"), + Some(key_for_change), + ) + .await? + else { // This should trip on the first iteration or not at all assert_eq!(i, 0); // This doesn't have inputs even worth aggregating so drop the entire tree @@ -272,46 +298,53 @@ impl> Scheduler { Self::queue_branches(txn, key, coin, planned.effected_payments, tree.remove(0)); } - eventualities + Ok(eventualities) } - fn flush_outputs( + async fn flush_outputs( + &self, txn: &mut impl DbTxn, - eventualities: &mut HashMap, Vec>>, + eventualities: &mut KeyScopedEventualities, block: &BlockFor, from: KeyFor, to: KeyFor, coin: Coin, - ) { + ) -> Result<(), >::EphemeralError> { let from_bytes = from.to_bytes().as_ref().to_vec(); // Ensure our inputs are aggregated eventualities .entry(from_bytes.clone()) .or_insert(vec![]) - .append(&mut Self::aggregate_inputs(txn, block, to, from, coin)); + .append(&mut self.aggregate_inputs(txn, block, to, from, coin).await?); // Now that our inputs are aggregated, transfer all of them to the new key let mut operating_costs = Db::::operating_costs(txn, coin).0; let outputs = Db::::outputs(txn, from, coin).unwrap(); if outputs.is_empty() { - return; + return Ok(()); } - let planned = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - P::fee_rate(block, coin), - outputs, - vec![], - Some(to), - ); + let planned = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + outputs, + vec![], + Some(to), + ) + .await?; Db::::set_operating_costs(txn, coin, Amount(operating_costs)); - let Some(planned) = planned else { return }; + let Some(planned) = planned else { return Ok(()) }; TransactionsToSign::::send(txn, &from, &planned.signable); eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality); + + Ok(()) } } impl> SchedulerTrait for Scheduler { + type EphemeralError = P::EphemeralError; type SignableTransaction = P::SignableTransaction; fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { @@ -324,29 +357,32 @@ impl> SchedulerTrait for Schedul } fn flush_key( + &self, txn: &mut impl DbTxn, block: &BlockFor, retiring_key: KeyFor, new_key: KeyFor, - ) -> HashMap, Vec>> { - let mut eventualities = HashMap::new(); - for coin in S::NETWORK.coins() { - // Move the payments to the new key - { - let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); - let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); - - let mut queued = still_queued; - queued.append(&mut new_queued); + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let mut eventualities = HashMap::new(); + for coin in S::NETWORK.coins() { + // Move the payments to the new key + { + let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); + + let mut queued = still_queued; + queued.append(&mut new_queued); + + Db::::set_queued_payments(txn, retiring_key, *coin, &[]); + Db::::set_queued_payments(txn, new_key, *coin, &queued); + } - Db::::set_queued_payments(txn, retiring_key, *coin, &[]); - Db::::set_queued_payments(txn, new_key, *coin, &queued); + // Move the outputs to the new key + self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin).await?; } - - // Move the outputs to the new key - Self::flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin); + Ok(eventualities) } - eventualities } fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { @@ -359,155 +395,174 @@ impl> SchedulerTrait for Schedul } fn update( + &self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], update: SchedulerUpdate, - ) -> HashMap, Vec>> { - let mut eventualities = HashMap::new(); - - // Accumulate the new outputs - { - let mut outputs_by_key = HashMap::new(); - for output in update.outputs() { - // If this aligns for a branch, handle it - if let Some(branch) = Db::::take_pending_branch(txn, output.key(), output.balance()) { - if Self::handle_branch( - txn, - block, - eventualities.entry(output.key().to_bytes().as_ref().to_vec()).or_insert(vec![]), - output.clone(), - branch, - ) { - // If we could use it for a branch, we do and move on - // Else, we let it be accumulated by the standard accumulation code - continue; + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let mut eventualities = HashMap::new(); + + // Accumulate the new outputs + { + let mut outputs_by_key = HashMap::new(); + for output in update.outputs() { + // If this aligns for a branch, handle it + if let Some(branch) = Db::::take_pending_branch(txn, output.key(), output.balance()) { + if self + .handle_branch( + txn, + block, + eventualities.entry(output.key().to_bytes().as_ref().to_vec()).or_insert(vec![]), + output.clone(), + branch, + ) + .await? + { + // If we could use it for a branch, we do and move on + // Else, we let it be accumulated by the standard accumulation code + continue; + } } - } - let coin = output.balance().coin; - outputs_by_key - // Index by key and coin - .entry((output.key().to_bytes().as_ref().to_vec(), coin)) - // If we haven't accumulated here prior, read the outputs from the database - .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) - .1 - .push(output.clone()); - } - // Write the outputs back to the database - for ((_key_vec, coin), (key, outputs)) in outputs_by_key { - Db::::set_outputs(txn, key, coin, &outputs); + let coin = output.balance().coin; + outputs_by_key + // Index by key and coin + .entry((output.key().to_bytes().as_ref().to_vec(), coin)) + // If we haven't accumulated here prior, read the outputs from the database + .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) + .1 + .push(output.clone()); + } + // Write the outputs back to the database + for ((_key_vec, coin), (key, outputs)) in outputs_by_key { + Db::::set_outputs(txn, key, coin, &outputs); + } } - } - // Fulfill the payments we prior couldn't - for (key, _stage) in active_keys { - eventualities - .entry(key.to_bytes().as_ref().to_vec()) - .or_insert(vec![]) - .append(&mut Self::step(txn, active_keys, block, *key)); - } + // Fulfill the payments we prior couldn't + for (key, _stage) in active_keys { + eventualities + .entry(key.to_bytes().as_ref().to_vec()) + .or_insert(vec![]) + .append(&mut self.step(txn, active_keys, block, *key).await?); + } - // If this key has been flushed, forward all outputs - match active_keys[0].1 { - LifetimeStage::ActiveYetNotReporting | - LifetimeStage::Active | - LifetimeStage::UsingNewForChange => {} - LifetimeStage::Forwarding | LifetimeStage::Finishing => { - for coin in S::NETWORK.coins() { - Self::flush_outputs( - txn, - &mut eventualities, - block, - active_keys[0].0, - active_keys[1].0, - *coin, - ); + // If this key has been flushed, forward all outputs + match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => {} + LifetimeStage::Forwarding | LifetimeStage::Finishing => { + for coin in S::NETWORK.coins() { + self + .flush_outputs( + txn, + &mut eventualities, + block, + active_keys[0].0, + active_keys[1].0, + *coin, + ) + .await?; + } } } - } - // Create the transactions for the forwards/burns - { - let mut planned_txs = vec![]; - for forward in update.forwards() { - let key = forward.key(); - - assert_eq!(active_keys.len(), 2); - assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); - assert_eq!(active_keys[1].1, LifetimeStage::Active); - let forward_to_key = active_keys[1].0; - - let Some(plan) = P::plan_transaction_with_fee_amortization( - // This uses 0 for the operating costs as we don't incur any here - // If the output can't pay for itself to be forwarded, we simply drop it - &mut 0, - P::fee_rate(block, forward.balance().coin), - vec![forward.clone()], - vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], - None, - ) else { - continue; - }; - planned_txs.push((key, plan)); - } - for to_return in update.returns() { - let key = to_return.output().key(); - let out_instruction = - Payment::new(to_return.address().clone(), to_return.output().balance(), None); - let Some(plan) = P::plan_transaction_with_fee_amortization( - // This uses 0 for the operating costs as we don't incur any here - // If the output can't pay for itself to be returned, we simply drop it - &mut 0, - P::fee_rate(block, out_instruction.balance().coin), - vec![to_return.output().clone()], - vec![out_instruction], - None, - ) else { - continue; - }; - planned_txs.push((key, plan)); - } + // Create the transactions for the forwards/burns + { + let mut planned_txs = vec![]; + for forward in update.forwards() { + let key = forward.key(); + + assert_eq!(active_keys.len(), 2); + assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); + assert_eq!(active_keys[1].1, LifetimeStage::Active); + let forward_to_key = active_keys[1].0; + + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be forwarded, we simply drop it + &mut 0, + P::fee_rate(block, forward.balance().coin), + vec![forward.clone()], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + for to_return in update.returns() { + let key = to_return.output().key(); + let out_instruction = + Payment::new(to_return.address().clone(), to_return.output().balance(), None); + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be returned, we simply drop it + &mut 0, + P::fee_rate(block, out_instruction.balance().coin), + vec![to_return.output().clone()], + vec![out_instruction], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } - for (key, planned_tx) in planned_txs { - // Send the transactions off for signing - TransactionsToSign::::send(txn, &key, &planned_tx.signable); + for (key, planned_tx) in planned_txs { + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned_tx.signable); - // Insert the Eventualities into the result - eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); - } + // Insert the Eventualities into the result + eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); + } - eventualities + Ok(eventualities) + } } } fn fulfill( + &self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], payments: Vec>>, - ) -> HashMap, Vec>> { - // Find the key to filfill these payments with - let fulfillment_key = match active_keys[0].1 { - LifetimeStage::ActiveYetNotReporting => { - panic!("expected to fulfill payments despite not reporting for the oldest key") + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + // Find the key to filfill these payments with + let fulfillment_key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + // Queue the payments for this key + for coin in S::NETWORK.coins() { + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); + queued_payments + .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); } - LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, - LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, - }; - // Queue the payments for this key - for coin in S::NETWORK.coins() { - let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); - queued_payments - .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); - Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); + // Handle the queued payments + Ok(HashMap::from([( + fulfillment_key.to_bytes().as_ref().to_vec(), + self.step(txn, active_keys, block, fulfillment_key).await?, + )])) } - - // Handle the queued payments - HashMap::from([( - fulfillment_key.to_bytes().as_ref().to_vec(), - Self::step(txn, active_keys, block, fulfillment_key), - )]) } } diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index e43f5fecb..cb0a8b154 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -2,7 +2,7 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] -use core::marker::PhantomData; +use core::{marker::PhantomData, future::Future}; use std::collections::HashMap; use group::GroupEncoding; @@ -14,7 +14,7 @@ use serai_db::DbTxn; use primitives::{OutputType, ReceivedOutput, Payment}; use scanner::{ LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor, - SchedulerUpdate, Scheduler as SchedulerTrait, + SchedulerUpdate, KeyScopedEventualities, Scheduler as SchedulerTrait, }; use scheduler_primitives::*; use utxo_scheduler_primitives::*; @@ -27,12 +27,19 @@ pub struct EffectedReceivedOutputs(pub Vec>); /// A scheduler of transactions for networks premised on the UTXO model which support /// transaction chaining. -pub struct Scheduler>>( - PhantomData, - PhantomData

, -); +#[allow(non_snake_case)] +#[derive(Clone)] +pub struct Scheduler>> { + planner: P, + _S: PhantomData, +} impl>> Scheduler { + /// Create a new scheduler. + pub fn new(planner: P) -> Self { + Self { planner, _S: PhantomData } + } + fn accumulate_outputs(txn: &mut impl DbTxn, outputs: Vec>, from_scanner: bool) { let mut outputs_by_key = HashMap::new(); for output in outputs { @@ -59,13 +66,14 @@ impl>> Sched } } - fn aggregate_inputs( + async fn aggregate_inputs( + &self, txn: &mut impl DbTxn, block: &BlockFor, key_for_change: KeyFor, key: KeyFor, coin: Coin, - ) -> Vec> { + ) -> Result>, >::EphemeralError> { let mut eventualities = vec![]; let mut operating_costs = Db::::operating_costs(txn, coin).0; @@ -74,13 +82,17 @@ impl>> Sched let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::>(); Db::::set_outputs(txn, key, coin, &outputs); - let Some(planned) = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - P::fee_rate(block, coin), - to_aggregate, - vec![], - Some(key_for_change), - ) else { + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + to_aggregate, + vec![], + Some(key_for_change), + ) + .await? + else { continue; }; @@ -93,7 +105,7 @@ impl>> Sched } Db::::set_operating_costs(txn, coin, Amount(operating_costs)); - eventualities + Ok(eventualities) } fn fulfillable_payments( @@ -151,12 +163,13 @@ impl>> Sched } } - fn step( + async fn step( + &self, txn: &mut impl DbTxn, active_keys: &[(KeyFor, LifetimeStage)], block: &BlockFor, key: KeyFor, - ) -> Vec> { + ) -> Result>, >::EphemeralError> { let mut eventualities = vec![]; let key_for_change = match active_keys[0].1 { @@ -174,7 +187,8 @@ impl>> Sched let coin = *coin; // Perform any input aggregation we should - eventualities.append(&mut Self::aggregate_inputs(txn, block, key_for_change, key, coin)); + eventualities + .append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?); // Fetch the operating costs/outputs let mut operating_costs = Db::::operating_costs(txn, coin).0; @@ -211,15 +225,19 @@ impl>> Sched // scanner API) let mut planned_outer = None; for i in 0 .. 2 { - let Some(planned) = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - P::fee_rate(block, coin), - outputs.clone(), - tree[0] - .payments::(coin, &branch_address, tree[0].value()) - .expect("payments were dropped despite providing an input of the needed value"), - Some(key_for_change), - ) else { + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + outputs.clone(), + tree[0] + .payments::(coin, &branch_address, tree[0].value()) + .expect("payments were dropped despite providing an input of the needed value"), + Some(key_for_change), + ) + .await? + else { // This should trip on the first iteration or not at all assert_eq!(i, 0); // This doesn't have inputs even worth aggregating so drop the entire tree @@ -300,14 +318,18 @@ impl>> Sched }; let branch_output_id = branch_output.id(); - let Some(mut planned) = P::plan_transaction_with_fee_amortization( - // Uses 0 as there's no operating costs to incur/amortize here - &mut 0, - P::fee_rate(block, coin), - vec![branch_output], - payments, - None, - ) else { + let Some(mut planned) = self + .planner + .plan_transaction_with_fee_amortization( + // Uses 0 as there's no operating costs to incur/amortize here + &mut 0, + P::fee_rate(block, coin), + vec![branch_output], + payments, + None, + ) + .await? + else { // This Branch isn't viable, so drop it (and its children) continue; }; @@ -328,49 +350,56 @@ impl>> Sched } } - eventualities + Ok(eventualities) } - fn flush_outputs( + async fn flush_outputs( + &self, txn: &mut impl DbTxn, - eventualities: &mut HashMap, Vec>>, + eventualities: &mut KeyScopedEventualities, block: &BlockFor, from: KeyFor, to: KeyFor, coin: Coin, - ) { + ) -> Result<(), >::EphemeralError> { let from_bytes = from.to_bytes().as_ref().to_vec(); // Ensure our inputs are aggregated eventualities .entry(from_bytes.clone()) .or_insert(vec![]) - .append(&mut Self::aggregate_inputs(txn, block, to, from, coin)); + .append(&mut self.aggregate_inputs(txn, block, to, from, coin).await?); // Now that our inputs are aggregated, transfer all of them to the new key let mut operating_costs = Db::::operating_costs(txn, coin).0; let outputs = Db::::outputs(txn, from, coin).unwrap(); if outputs.is_empty() { - return; + return Ok(()); } - let planned = P::plan_transaction_with_fee_amortization( - &mut operating_costs, - P::fee_rate(block, coin), - outputs, - vec![], - Some(to), - ); + let planned = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + P::fee_rate(block, coin), + outputs, + vec![], + Some(to), + ) + .await?; Db::::set_operating_costs(txn, coin, Amount(operating_costs)); - let Some(planned) = planned else { return }; + let Some(planned) = planned else { return Ok(()) }; TransactionsToSign::::send(txn, &from, &planned.signable); eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality); Self::accumulate_outputs(txn, planned.auxilliary.0, false); + + Ok(()) } } impl>> SchedulerTrait for Scheduler { + type EphemeralError = P::EphemeralError; type SignableTransaction = P::SignableTransaction; fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { @@ -383,29 +412,32 @@ impl>> Sched } fn flush_key( + &self, txn: &mut impl DbTxn, block: &BlockFor, retiring_key: KeyFor, new_key: KeyFor, - ) -> HashMap, Vec>> { - let mut eventualities = HashMap::new(); - for coin in S::NETWORK.coins() { - // Move the payments to the new key - { - let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); - let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); - - let mut queued = still_queued; - queued.append(&mut new_queued); + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let mut eventualities = HashMap::new(); + for coin in S::NETWORK.coins() { + // Move the payments to the new key + { + let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); + + let mut queued = still_queued; + queued.append(&mut new_queued); + + Db::::set_queued_payments(txn, retiring_key, *coin, &[]); + Db::::set_queued_payments(txn, new_key, *coin, &queued); + } - Db::::set_queued_payments(txn, retiring_key, *coin, &[]); - Db::::set_queued_payments(txn, new_key, *coin, &queued); + // Move the outputs to the new key + self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin).await?; } - - // Move the outputs to the new key - Self::flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin); + Ok(eventualities) } - eventualities } fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { @@ -418,121 +450,137 @@ impl>> Sched } fn update( + &self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], update: SchedulerUpdate, - ) -> HashMap, Vec>> { - Self::accumulate_outputs(txn, update.outputs().to_vec(), true); - - // Fulfill the payments we prior couldn't - let mut eventualities = HashMap::new(); - for (key, _stage) in active_keys { - assert!(eventualities - .insert(key.to_bytes().as_ref().to_vec(), Self::step(txn, active_keys, block, *key)) - .is_none()); - } + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + Self::accumulate_outputs(txn, update.outputs().to_vec(), true); + + // Fulfill the payments we prior couldn't + let mut eventualities = HashMap::new(); + for (key, _stage) in active_keys { + assert!(eventualities + .insert(key.to_bytes().as_ref().to_vec(), self.step(txn, active_keys, block, *key).await?) + .is_none()); + } - // If this key has been flushed, forward all outputs - match active_keys[0].1 { - LifetimeStage::ActiveYetNotReporting | - LifetimeStage::Active | - LifetimeStage::UsingNewForChange => {} - LifetimeStage::Forwarding | LifetimeStage::Finishing => { - for coin in S::NETWORK.coins() { - Self::flush_outputs( - txn, - &mut eventualities, - block, - active_keys[0].0, - active_keys[1].0, - *coin, - ); + // If this key has been flushed, forward all outputs + match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => {} + LifetimeStage::Forwarding | LifetimeStage::Finishing => { + for coin in S::NETWORK.coins() { + self + .flush_outputs( + txn, + &mut eventualities, + block, + active_keys[0].0, + active_keys[1].0, + *coin, + ) + .await?; + } } } - } - // Create the transactions for the forwards/burns - { - let mut planned_txs = vec![]; - for forward in update.forwards() { - let key = forward.key(); - - assert_eq!(active_keys.len(), 2); - assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); - assert_eq!(active_keys[1].1, LifetimeStage::Active); - let forward_to_key = active_keys[1].0; - - let Some(plan) = P::plan_transaction_with_fee_amortization( - // This uses 0 for the operating costs as we don't incur any here - // If the output can't pay for itself to be forwarded, we simply drop it - &mut 0, - P::fee_rate(block, forward.balance().coin), - vec![forward.clone()], - vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], - None, - ) else { - continue; - }; - planned_txs.push((key, plan)); - } - for to_return in update.returns() { - let key = to_return.output().key(); - let out_instruction = - Payment::new(to_return.address().clone(), to_return.output().balance(), None); - let Some(plan) = P::plan_transaction_with_fee_amortization( - // This uses 0 for the operating costs as we don't incur any here - // If the output can't pay for itself to be returned, we simply drop it - &mut 0, - P::fee_rate(block, out_instruction.balance().coin), - vec![to_return.output().clone()], - vec![out_instruction], - None, - ) else { - continue; - }; - planned_txs.push((key, plan)); - } + // Create the transactions for the forwards/burns + { + let mut planned_txs = vec![]; + for forward in update.forwards() { + let key = forward.key(); + + assert_eq!(active_keys.len(), 2); + assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); + assert_eq!(active_keys[1].1, LifetimeStage::Active); + let forward_to_key = active_keys[1].0; + + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be forwarded, we simply drop it + &mut 0, + P::fee_rate(block, forward.balance().coin), + vec![forward.clone()], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + for to_return in update.returns() { + let key = to_return.output().key(); + let out_instruction = + Payment::new(to_return.address().clone(), to_return.output().balance(), None); + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be returned, we simply drop it + &mut 0, + P::fee_rate(block, out_instruction.balance().coin), + vec![to_return.output().clone()], + vec![out_instruction], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } - for (key, planned_tx) in planned_txs { - // Send the transactions off for signing - TransactionsToSign::::send(txn, &key, &planned_tx.signable); + for (key, planned_tx) in planned_txs { + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned_tx.signable); - // Insert the Eventualities into the result - eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); - } + // Insert the Eventualities into the result + eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); + } - eventualities + Ok(eventualities) + } } } fn fulfill( + &self, txn: &mut impl DbTxn, block: &BlockFor, active_keys: &[(KeyFor, LifetimeStage)], payments: Vec>>, - ) -> HashMap, Vec>> { - // Find the key to filfill these payments with - let fulfillment_key = match active_keys[0].1 { - LifetimeStage::ActiveYetNotReporting => { - panic!("expected to fulfill payments despite not reporting for the oldest key") + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + // Find the key to filfill these payments with + let fulfillment_key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + // Queue the payments for this key + for coin in S::NETWORK.coins() { + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); + queued_payments + .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); } - LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, - LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, - }; - // Queue the payments for this key - for coin in S::NETWORK.coins() { - let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); - queued_payments - .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); - Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); + // Handle the queued payments + Ok(HashMap::from([( + fulfillment_key.to_bytes().as_ref().to_vec(), + self.step(txn, active_keys, block, fulfillment_key).await?, + )])) } - - // Handle the queued payments - HashMap::from([( - fulfillment_key.to_bytes().as_ref().to_vec(), - Self::step(txn, active_keys, block, fulfillment_key), - )]) } } From 1cc7d816306b22f4a76a0838e5b09047534ac88d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 01:38:31 -0400 Subject: [PATCH 122/179] Stub out Scheduler in the Monero processor --- processor/monero/src/lib.rs | 10 -- processor/monero/src/main.rs | 21 ++-- processor/monero/src/primitives/block.rs | 26 ++-- processor/monero/src/primitives/mod.rs | 37 +++++- processor/monero/src/primitives/output.rs | 9 +- processor/monero/src/scheduler.rs | 147 ++++++++++++++++++---- 6 files changed, 178 insertions(+), 72 deletions(-) diff --git a/processor/monero/src/lib.rs b/processor/monero/src/lib.rs index 52ebb6cbe..0848e08a5 100644 --- a/processor/monero/src/lib.rs +++ b/processor/monero/src/lib.rs @@ -130,8 +130,6 @@ impl Network for Monero { const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120; const CONFIRMATIONS: usize = 10; - const MAX_OUTPUTS: usize = 16; - // TODO const COST_TO_AGGREGATE: u64 = 0; @@ -318,12 +316,4 @@ impl Network for Monero { self.get_block(block).await.unwrap() } } - -impl UtxoNetwork for Monero { - // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction - // larger than 150kb. This fits within the 100kb mark - // Technically, it can be ~124, yet a small bit of buffer is appreciated - // TODO: Test creating a TX this big - const MAX_INPUTS: usize = 120; -} */ diff --git a/processor/monero/src/main.rs b/processor/monero/src/main.rs index 344b6c487..daba3255b 100644 --- a/processor/monero/src/main.rs +++ b/processor/monero/src/main.rs @@ -6,7 +6,7 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); -use monero_wallet::rpc::Rpc as MRpc; +use monero_simple_request_rpc::SimpleRequestRpc; mod primitives; pub(crate) use crate::primitives::*; @@ -15,18 +15,15 @@ mod key_gen; use crate::key_gen::KeyGenParams; mod rpc; use rpc::Rpc; - -/* mod scheduler; -use scheduler::Scheduler; +use scheduler::{Planner, Scheduler}; #[tokio::main] async fn main() { let db = bin::init(); let feed = Rpc { - db: db.clone(), rpc: loop { - match MRpc::new(bin::url()).await { + match SimpleRequestRpc::new(bin::url()).await { Ok(rpc) => break rpc, Err(e) => { log::error!("couldn't connect to the Monero node: {e:?}"); @@ -36,9 +33,11 @@ async fn main() { }, }; - bin::main_loop::<_, KeyGenParams, Scheduler<_>, Rpc>(db, feed.clone(), feed).await; + bin::main_loop::<_, KeyGenParams, _>( + db, + feed.clone(), + Scheduler::new(Planner(feed.clone())), + feed, + ) + .await; } -*/ - -#[tokio::main] -async fn main() {} diff --git a/processor/monero/src/primitives/block.rs b/processor/monero/src/primitives/block.rs index 130e5ac82..70a559c1d 100644 --- a/processor/monero/src/primitives/block.rs +++ b/processor/monero/src/primitives/block.rs @@ -1,21 +1,17 @@ use std::collections::HashMap; -use zeroize::Zeroizing; - use ciphersuite::{Ciphersuite, Ed25519}; use monero_wallet::{ - block::Block as MBlock, rpc::ScannableBlock as MScannableBlock, ViewPairError, - GuaranteedViewPair, ScanError, GuaranteedScanner, + block::Block as MBlock, rpc::ScannableBlock as MScannableBlock, ScanError, GuaranteedScanner, }; use serai_client::networks::monero::Address; use primitives::{ReceivedOutput, EventualityTracker}; -use view_keys::view_key; use crate::{ - EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, output::Output, - transaction::Eventuality, + EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, view_pair, + output::Output, transaction::Eventuality, }; #[derive(Clone, Debug)] @@ -45,17 +41,11 @@ impl primitives::Block for Block { } fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { - let view_pair = match GuaranteedViewPair::new(key.0, Zeroizing::new(*view_key::(0))) { - Ok(view_pair) => view_pair, - Err(ViewPairError::TorsionedSpendKey) => { - unreachable!("dalek_ff_group::EdwardsPoint had torsion") - } - }; - let mut scanner = GuaranteedScanner::new(view_pair); - scanner.register_subaddress(EXTERNAL_SUBADDRESS.unwrap()); - scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap()); - scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap()); - scanner.register_subaddress(FORWARDED_SUBADDRESS.unwrap()); + let mut scanner = GuaranteedScanner::new(view_pair(key)); + scanner.register_subaddress(EXTERNAL_SUBADDRESS); + scanner.register_subaddress(BRANCH_SUBADDRESS); + scanner.register_subaddress(CHANGE_SUBADDRESS); + scanner.register_subaddress(FORWARDED_SUBADDRESS); match scanner.scan(self.0.clone()) { Ok(outputs) => outputs.not_additionally_locked().into_iter().map(Output).collect(), Err(ScanError::UnsupportedProtocol(version)) => { diff --git a/processor/monero/src/primitives/mod.rs b/processor/monero/src/primitives/mod.rs index de0573995..317cae280 100644 --- a/processor/monero/src/primitives/mod.rs +++ b/processor/monero/src/primitives/mod.rs @@ -1,10 +1,37 @@ -use monero_wallet::address::SubaddressIndex; +use zeroize::Zeroizing; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_wallet::{address::SubaddressIndex, ViewPairError, GuaranteedViewPair}; + +use view_keys::view_key; pub(crate) mod output; pub(crate) mod transaction; pub(crate) mod block; -pub(crate) const EXTERNAL_SUBADDRESS: Option = SubaddressIndex::new(1, 0); -pub(crate) const BRANCH_SUBADDRESS: Option = SubaddressIndex::new(2, 0); -pub(crate) const CHANGE_SUBADDRESS: Option = SubaddressIndex::new(2, 1); -pub(crate) const FORWARDED_SUBADDRESS: Option = SubaddressIndex::new(2, 2); +pub(crate) const EXTERNAL_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(1, 0) { + Some(index) => index, + None => panic!("SubaddressIndex for EXTERNAL_SUBADDRESS was None"), +}; +pub(crate) const BRANCH_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(2, 0) { + Some(index) => index, + None => panic!("SubaddressIndex for BRANCH_SUBADDRESS was None"), +}; +pub(crate) const CHANGE_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(2, 1) { + Some(index) => index, + None => panic!("SubaddressIndex for CHANGE_SUBADDRESS was None"), +}; +pub(crate) const FORWARDED_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(2, 2) { + Some(index) => index, + None => panic!("SubaddressIndex for FORWARDED_SUBADDRESS was None"), +}; + +pub(crate) fn view_pair(key: ::G) -> GuaranteedViewPair { + match GuaranteedViewPair::new(key.0, Zeroizing::new(*view_key::(0))) { + Ok(view_pair) => view_pair, + Err(ViewPairError::TorsionedSpendKey) => { + unreachable!("dalek_ff_group::EdwardsPoint had torsion") + } + } +} diff --git a/processor/monero/src/primitives/output.rs b/processor/monero/src/primitives/output.rs index d66fd983e..fea042c83 100644 --- a/processor/monero/src/primitives/output.rs +++ b/processor/monero/src/primitives/output.rs @@ -46,16 +46,17 @@ impl ReceivedOutput<::G, Address> for Output { type TransactionId = [u8; 32]; fn kind(&self) -> OutputType { - if self.0.subaddress() == EXTERNAL_SUBADDRESS { + let subaddress = self.0.subaddress().unwrap(); + if subaddress == EXTERNAL_SUBADDRESS { return OutputType::External; } - if self.0.subaddress() == BRANCH_SUBADDRESS { + if subaddress == BRANCH_SUBADDRESS { return OutputType::Branch; } - if self.0.subaddress() == CHANGE_SUBADDRESS { + if subaddress == CHANGE_SUBADDRESS { return OutputType::Change; } - if self.0.subaddress() == FORWARDED_SUBADDRESS { + if subaddress == FORWARDED_SUBADDRESS { return OutputType::Forwarded; } unreachable!("scanned output to unknown subaddress"); diff --git a/processor/monero/src/scheduler.rs b/processor/monero/src/scheduler.rs index 7666ec4f8..ef52c4131 100644 --- a/processor/monero/src/scheduler.rs +++ b/processor/monero/src/scheduler.rs @@ -1,3 +1,4 @@ +/* async fn make_signable_transaction( block_number: usize, plan_id: &[u8; 32], @@ -136,10 +137,106 @@ match MSignableTransaction::new( }, } } +*/ + +use core::future::Future; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_wallet::rpc::{FeeRate, RpcError}; + +use serai_client::{ + primitives::{Coin, Amount}, + networks::monero::Address, +}; + +use primitives::{OutputType, ReceivedOutput, Payment}; +use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; +use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; + +use monero_wallet::address::Network; + +use crate::{ + EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, view_pair, + output::Output, + transaction::{SignableTransaction, Eventuality}, + rpc::Rpc, +}; + +fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { + view_pair(key) + .address( + Network::Mainnet, + Some(match kind { + OutputType::External => EXTERNAL_SUBADDRESS, + OutputType::Branch => BRANCH_SUBADDRESS, + OutputType::Change => CHANGE_SUBADDRESS, + OutputType::Forwarded => FORWARDED_SUBADDRESS, + }), + None, + ) + .try_into() + .expect("created address which wasn't representable") +} +#[derive(Clone)] +pub(crate) struct Planner(pub(crate) Rpc); +impl TransactionPlanner for Planner { + type EphemeralError = RpcError; + + type FeeRate = FeeRate; + + type SignableTransaction = SignableTransaction; + + // wallet2 will not create a transaction larger than 100 KB, and Monero won't relay a transaction + // larger than 150 KB. This fits within the 100 KB mark to fit in and not poke the bear. + // Technically, it can be ~124, yet a small bit of buffer is appreciated + // TODO: Test creating a TX this big + const MAX_INPUTS: usize = 120; + const MAX_OUTPUTS: usize = 16; + + fn fee_rate(block: &BlockFor, coin: Coin) -> Self::FeeRate { + assert_eq!(coin, Coin::Monero); + // TODO + todo!("TODO") + } + + fn branch_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Branch) + } + fn change_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Change) + } + fn forwarding_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Forwarded) + } + + fn calculate_fee( + fee_rate: Self::FeeRate, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> Amount { + todo!("TODO") + } + + fn plan( + &self, + fee_rate: Self::FeeRate, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> impl Send + + Future, RpcError>> + { + async move { todo!("TODO") } + } +} + +pub(crate) type Scheduler = utxo_standard_scheduler::Scheduler; /* -use ciphersuite::{Ciphersuite, Secp256k1}; +use ciphersuite::{Ciphersuite, Ed25519}; use bitcoin_serai::{ bitcoin::ScriptBuf, @@ -163,8 +260,8 @@ use crate::{ rpc::Rpc, }; -fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { - let offset = ::G::GENERATOR * offsets_for_key(key)[&kind]; +fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { + let offset = ::G::GENERATOR * offsets_for_key(key)[&kind]; Address::new( p2tr_script_buf(key + offset) .expect("creating address from Serai key which wasn't properly tweaked"), @@ -174,17 +271,17 @@ fn address_from_serai_key(key: ::G, kind: OutputType) fn signable_transaction( fee_per_vbyte: u64, - inputs: Vec>>, - payments: Vec>>>, - change: Option>>, + inputs: Vec>, + payments: Vec>>, + change: Option>, ) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> { assert!( inputs.len() < - , ()>>::MAX_INPUTS + >::MAX_INPUTS ); assert!( (payments.len() + usize::from(u8::from(change.is_some()))) < - , ()>>::MAX_OUTPUTS + >::MAX_OUTPUTS ); let inputs = inputs.into_iter().map(|input| input.output).collect::>(); @@ -194,7 +291,7 @@ fn signable_transaction( .map(|payment| { (payment.address().clone(), { let balance = payment.balance(); - assert_eq!(balance.coin, Coin::Bitcoin); + assert_eq!(balance.coin, Coin::Monero); balance.amount.0 }) }) @@ -206,14 +303,14 @@ fn signable_transaction( */ payments.push(( // The generator is even so this is valid - Address::new(p2tr_script_buf(::G::GENERATOR).unwrap()).unwrap(), + Address::new(p2tr_script_buf(::G::GENERATOR).unwrap()).unwrap(), // This uses the minimum output value allowed, as defined as a constant in bitcoin-serai // TODO: Add a test for this comparing to bitcoin's `minimal_non_dust` bitcoin_serai::wallet::DUST, )); let change = change - .map(, ()>>::change_address); + .map(>::change_address); BSignableTransaction::new( inputs.clone(), @@ -231,12 +328,14 @@ fn signable_transaction( pub(crate) struct Planner; impl TransactionPlanner for Planner { + type EphemeralError = RpcError; + type FeeRate = u64; type SignableTransaction = SignableTransaction; /* - Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT). + Monero has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT). A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes. While our inputs are entirely SegWit, such fine tuning is not necessary and could create issues in @@ -255,27 +354,27 @@ impl TransactionPlanner for Planner { // to unstick any transactions which had too low of a fee. const MAX_OUTPUTS: usize = 519; - fn fee_rate(block: &BlockFor>, coin: Coin) -> Self::FeeRate { - assert_eq!(coin, Coin::Bitcoin); + fn fee_rate(block: &BlockFor, coin: Coin) -> Self::FeeRate { + assert_eq!(coin, Coin::Monero); // TODO 1 } - fn branch_address(key: KeyFor>) -> AddressFor> { + fn branch_address(key: KeyFor) -> AddressFor { address_from_serai_key(key, OutputType::Branch) } - fn change_address(key: KeyFor>) -> AddressFor> { + fn change_address(key: KeyFor) -> AddressFor { address_from_serai_key(key, OutputType::Change) } - fn forwarding_address(key: KeyFor>) -> AddressFor> { + fn forwarding_address(key: KeyFor) -> AddressFor { address_from_serai_key(key, OutputType::Forwarded) } fn calculate_fee( fee_rate: Self::FeeRate, - inputs: Vec>>, - payments: Vec>>>, - change: Option>>, + inputs: Vec>, + payments: Vec>>, + change: Option>, ) -> Amount { match signable_transaction::(fee_rate, inputs, payments, change) { Ok(tx) => Amount(tx.1.needed_fee()), @@ -294,10 +393,10 @@ impl TransactionPlanner for Planner { fn plan( fee_rate: Self::FeeRate, - inputs: Vec>>, - payments: Vec>>>, - change: Option>>, - ) -> PlannedTransaction, Self::SignableTransaction, ()> { + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> PlannedTransaction { let key = inputs.first().unwrap().key(); for input in &inputs { assert_eq!(key, input.key()); From ae9835b5bc2f6d4b17d0d7785244e16b7661e0df Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 04:19:44 -0400 Subject: [PATCH 123/179] Tighten documentation on Block::number --- networks/monero/src/block.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/networks/monero/src/block.rs b/networks/monero/src/block.rs index 62a77f8b7..15a8d1fc5 100644 --- a/networks/monero/src/block.rs +++ b/networks/monero/src/block.rs @@ -79,10 +79,13 @@ pub struct Block { } impl Block { - /// The zero-index position of this block within the blockchain. + /// The zero-indexed position of this block within the blockchain. /// /// This information comes from the Block's miner transaction. If the miner transaction isn't - /// structed as expected, this will return None. + /// structed as expected, this will return None. This will return Some for any Block which would + /// pass the consensus rules. + // https://github.com/monero-project/monero/blob/a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623 + // /src/cryptonote_core/blockchain.cpp#L1365-L1382 pub fn number(&self) -> Option { match &self.miner_transaction { Transaction::V1 { prefix, .. } | Transaction::V2 { prefix, .. } => { From 3bd8383aa68c192c0e93010dd8294de32f4debeb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 04:23:42 -0400 Subject: [PATCH 124/179] Change dummy payment ID behavior on 2-output, no change This reduces the ability to fingerprint from any observer of the blockchain to just one of the two recipients. --- networks/monero/wallet/src/send/mod.rs | 9 +++++---- networks/monero/wallet/src/send/tx.rs | 16 ++++++++++++---- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/networks/monero/wallet/src/send/mod.rs b/networks/monero/wallet/src/send/mod.rs index 87d98d69f..3bd883dfe 100644 --- a/networks/monero/wallet/src/send/mod.rs +++ b/networks/monero/wallet/src/send/mod.rs @@ -100,10 +100,11 @@ impl Change { /// /// 1) The change in the TX is shunted to the fee (making it fingerprintable). /// - /// 2) If there are two outputs in the TX, Monero would create a payment ID for the non-change - /// output so an observer can't tell apart TXs with a payment ID from TXs without a payment - /// ID. monero-wallet will simply not create a payment ID in this case, revealing it's a - /// monero-wallet TX without change. + /// 2) In two-output transactions, where the payment address doesn't have a payment ID, wallet2 + /// includes an encrypted dummy payment ID for the non-change output in order to not allow + /// differentiating if transactions send to addresses with payment IDs or not. monero-wallet + /// includes a dummy payment ID which at least one recipient will identify as not the expected + /// dummy payment ID, revealing to the recipient(s) the sender is using non-wallet2 software. pub fn fingerprintable(address: Option) -> Change { if let Some(address) = address { Change(Some(ChangeEnum::AddressOnly(address))) diff --git a/networks/monero/wallet/src/send/tx.rs b/networks/monero/wallet/src/send/tx.rs index 659622115..0ebd47f10 100644 --- a/networks/monero/wallet/src/send/tx.rs +++ b/networks/monero/wallet/src/send/tx.rs @@ -76,10 +76,18 @@ impl SignableTransaction { PaymentId::Encrypted(id).write(&mut id_vec).unwrap(); extra.push_nonce(id_vec); } else { - // If there's no payment ID, we push a dummy (as wallet2 does) if there's only one payment - if (self.payments.len() == 2) && - self.payments.iter().any(|payment| matches!(payment, InternalPayment::Change(_))) - { + /* + If there's no payment ID, we push a dummy (as wallet2 does) to the first payment. + + This does cause a random payment ID for the other recipient (a documented fingerprint). + Functionally, random payment IDs should be fine as wallet2 will trigger this same behavior + (a random payment ID being seen by the recipient) with a batch send if one of the recipient + addresses has a payment ID. + + The alternative would be to not include any payment ID, fingerprinting to the entire + blockchain this is non-standard wallet software (instead of just a single recipient). + */ + if self.payments.len() == 2 { let (_, payment_id_xor) = self .payments .iter() From b537c294c3908461e04f8977209ecbc996a64cb9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 04:24:48 -0400 Subject: [PATCH 125/179] Monero Planner Finishes the Monero processor. --- Cargo.lock | 1 + processor/bitcoin/src/main.rs | 9 - .../bitcoin/src/primitives/transaction.rs | 25 +- processor/bitcoin/src/scheduler.rs | 57 +- processor/monero/Cargo.toml | 1 + processor/monero/src/lib.rs | 319 ----------- processor/monero/src/main.rs | 146 +++++ processor/monero/src/primitives/output.rs | 7 - .../monero/src/primitives/transaction.rs | 8 +- processor/monero/src/scheduler.rs | 513 ++++++------------ .../scheduler/utxo/primitives/src/lib.rs | 29 +- processor/scheduler/utxo/standard/src/lib.rs | 12 +- .../utxo/transaction-chaining/src/lib.rs | 12 +- 13 files changed, 395 insertions(+), 744 deletions(-) delete mode 100644 processor/monero/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 9e34ea3c3..c3e39a09b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8525,6 +8525,7 @@ dependencies = [ "monero-simple-request-rpc", "monero-wallet", "parity-scale-codec", + "rand_chacha", "rand_core", "serai-client", "serai-db", diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index d029ad8b5..f260c47cb 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -223,15 +223,6 @@ impl Network for Bitcoin { self.rpc.get_block_number(id).await.unwrap() } - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - _: &EmptyClaim, - ) -> bool { - self.rpc.get_transaction(&eventuality.0).await.is_ok() - } - #[cfg(test)] async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction { self.rpc.get_transaction(&id.0).await.unwrap() diff --git a/processor/bitcoin/src/primitives/transaction.rs b/processor/bitcoin/src/primitives/transaction.rs index 5fca0b91b..8e7a26f6c 100644 --- a/processor/bitcoin/src/primitives/transaction.rs +++ b/processor/bitcoin/src/primitives/transaction.rs @@ -49,7 +49,7 @@ impl scheduler::Transaction for Transaction { #[derive(Clone, Debug)] pub(crate) struct SignableTransaction { pub(crate) inputs: Vec, - pub(crate) payments: Vec<(Address, u64)>, + pub(crate) payments: Vec<(ScriptBuf, u64)>, pub(crate) change: Option

, pub(crate) fee_per_vbyte: u64, } @@ -58,12 +58,7 @@ impl SignableTransaction { fn signable(self) -> Result { BSignableTransaction::new( self.inputs, - &self - .payments - .iter() - .cloned() - .map(|(address, amount)| (ScriptBuf::from(address), amount)) - .collect::>(), + &self.payments, self.change.map(ScriptBuf::from), None, self.fee_per_vbyte, @@ -108,11 +103,19 @@ impl scheduler::SignableTransaction for SignableTransaction { inputs }; - let payments = <_>::deserialize_reader(reader)?; + let payments = Vec::<(Vec, u64)>::deserialize_reader(reader)?; let change = <_>::deserialize_reader(reader)?; let fee_per_vbyte = <_>::deserialize_reader(reader)?; - Ok(Self { inputs, payments, change, fee_per_vbyte }) + Ok(Self { + inputs, + payments: payments + .into_iter() + .map(|(address, amount)| (ScriptBuf::from_bytes(address), amount)) + .collect(), + change, + fee_per_vbyte, + }) } fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; @@ -120,7 +123,9 @@ impl scheduler::SignableTransaction for SignableTransaction { input.write(writer)?; } - self.payments.serialize(writer)?; + for payment in &self.payments { + (payment.0.as_script().as_bytes(), payment.1).serialize(writer)?; + } self.change.serialize(writer)?; self.fee_per_vbyte.serialize(writer)?; diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs index b6554bdae..08dc508c1 100644 --- a/processor/bitcoin/src/scheduler.rs +++ b/processor/bitcoin/src/scheduler.rs @@ -35,7 +35,7 @@ fn address_from_serai_key(key: ::G, kind: OutputType) } fn signable_transaction( - fee_per_vbyte: u64, + _reference_block: &BlockFor>, inputs: Vec>>, payments: Vec>>>, change: Option>>, @@ -49,12 +49,15 @@ fn signable_transaction( , EffectedReceivedOutputs>>>::MAX_OUTPUTS ); + // TODO + let fee_per_vbyte = 1; + let inputs = inputs.into_iter().map(|input| input.output).collect::>(); let mut payments = payments .into_iter() .map(|payment| { - (payment.address().clone(), { + (ScriptBuf::from(payment.address().clone()), { let balance = payment.balance(); assert_eq!(balance.coin, Coin::Bitcoin); balance.amount.0 @@ -68,7 +71,7 @@ fn signable_transaction( */ payments.push(( // The generator is even so this is valid - Address::new(p2tr_script_buf(::G::GENERATOR).unwrap()).unwrap(), + p2tr_script_buf(::G::GENERATOR).unwrap(), // This uses the minimum output value allowed, as defined as a constant in bitcoin-serai // TODO: Add a test for this comparing to bitcoin's `minimal_non_dust` bitcoin_serai::wallet::DUST, @@ -79,11 +82,7 @@ fn signable_transaction( BSignableTransaction::new( inputs.clone(), - &payments - .iter() - .cloned() - .map(|(address, amount)| (ScriptBuf::from(address), amount)) - .collect::>(), + &payments, change.clone().map(ScriptBuf::from), None, fee_per_vbyte, @@ -95,7 +94,6 @@ fn signable_transaction( pub(crate) struct Planner; impl TransactionPlanner, EffectedReceivedOutputs>> for Planner { type EphemeralError = (); - type FeeRate = u64; type SignableTransaction = SignableTransaction; @@ -119,12 +117,6 @@ impl TransactionPlanner, EffectedReceivedOutputs>> for Plan // to unstick any transactions which had too low of a fee. const MAX_OUTPUTS: usize = 519; - fn fee_rate(block: &BlockFor>, coin: Coin) -> Self::FeeRate { - assert_eq!(coin, Coin::Bitcoin); - // TODO - 1 - } - fn branch_address(key: KeyFor>) -> AddressFor> { address_from_serai_key(key, OutputType::Branch) } @@ -136,29 +128,32 @@ impl TransactionPlanner, EffectedReceivedOutputs>> for Plan } fn calculate_fee( - fee_rate: Self::FeeRate, + &self, + reference_block: &BlockFor>, inputs: Vec>>, payments: Vec>>>, change: Option>>, - ) -> Amount { - match signable_transaction::(fee_rate, inputs, payments, change) { - Ok(tx) => Amount(tx.1.needed_fee()), - Err( - TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, - ) => panic!("malformed arguments to calculate_fee"), - // No data, we have a minimum fee rate, we checked the amount of inputs/outputs - Err( - TransactionError::TooMuchData | - TransactionError::TooLowFee | - TransactionError::TooLargeTransaction, - ) => unreachable!(), - Err(TransactionError::NotEnoughFunds { fee, .. }) => Amount(fee), + ) -> impl Send + Future> { + async move { + Ok(match signable_transaction::(reference_block, inputs, payments, change) { + Ok(tx) => Amount(tx.1.needed_fee()), + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to calculate_fee"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { fee, .. }) => Amount(fee), + }) } } fn plan( &self, - fee_rate: Self::FeeRate, + reference_block: &BlockFor>, inputs: Vec>>, payments: Vec>>>, change: Option>>, @@ -176,7 +171,7 @@ impl TransactionPlanner, EffectedReceivedOutputs>> for Plan } let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); - match signable_transaction::(fee_rate, inputs.clone(), payments, change) { + match signable_transaction::(reference_block, inputs.clone(), payments, change) { Ok(tx) => Ok(PlannedTransaction { signable: tx.0, eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml index 436f327e5..cc895edaa 100644 --- a/processor/monero/Cargo.toml +++ b/processor/monero/Cargo.toml @@ -18,6 +18,7 @@ workspace = true [dependencies] rand_core = { version = "0.6", default-features = false } +rand_chacha = { version = "0.3", default-features = false, features = ["std"] } zeroize = { version = "1", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] } diff --git a/processor/monero/src/lib.rs b/processor/monero/src/lib.rs deleted file mode 100644 index 0848e08a5..000000000 --- a/processor/monero/src/lib.rs +++ /dev/null @@ -1,319 +0,0 @@ -/* -// TODO: Consider ([u8; 32], TransactionPruned) -#[async_trait] -impl TransactionTrait for Transaction { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.hash() - } - - #[cfg(test)] - async fn fee(&self, _: &Monero) -> u64 { - match self { - Transaction::V1 { .. } => panic!("v1 TX in test-only function"), - Transaction::V2 { ref proofs, .. } => proofs.as_ref().unwrap().base.fee, - } - } -} - -impl EventualityTrait for Eventuality { - type Claim = [u8; 32]; - type Completion = Transaction; - - // Use the TX extra to look up potential matches - // While anyone can forge this, a transaction with distinct outputs won't actually match - // Extra includess the one time keys which are derived from the plan ID, so a collision here is a - // hash collision - fn lookup(&self) -> Vec { - self.extra() - } - - fn read(reader: &mut R) -> io::Result { - Eventuality::read(reader) - } - fn serialize(&self) -> Vec { - self.serialize() - } - - fn claim(tx: &Transaction) -> [u8; 32] { - tx.id() - } - fn serialize_completion(completion: &Transaction) -> Vec { - completion.serialize() - } - fn read_completion(reader: &mut R) -> io::Result { - Transaction::read(reader) - } -} - -#[derive(Clone, Debug)] -pub struct SignableTransaction(MSignableTransaction); -impl SignableTransactionTrait for SignableTransaction { - fn fee(&self) -> u64 { - self.0.necessary_fee() - } -} - -enum MakeSignableTransactionResult { - Fee(u64), - SignableTransaction(MSignableTransaction), -} - -impl Monero { - pub async fn new(url: String) -> Monero { - let mut res = SimpleRequestRpc::new(url.clone()).await; - while let Err(e) = res { - log::error!("couldn't connect to Monero node: {e:?}"); - tokio::time::sleep(Duration::from_secs(5)).await; - res = SimpleRequestRpc::new(url.clone()).await; - } - Monero { rpc: res.unwrap() } - } - - fn view_pair(spend: EdwardsPoint) -> GuaranteedViewPair { - GuaranteedViewPair::new(spend.0, Zeroizing::new(additional_key::(0).0)).unwrap() - } - - fn address_internal(spend: EdwardsPoint, subaddress: Option) -> Address { - Address::new(Self::view_pair(spend).address(MoneroNetwork::Mainnet, subaddress, None)).unwrap() - } - - fn scanner(spend: EdwardsPoint) -> GuaranteedScanner { - let mut scanner = GuaranteedScanner::new(Self::view_pair(spend)); - debug_assert!(EXTERNAL_SUBADDRESS.is_none()); - scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap()); - scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap()); - scanner.register_subaddress(FORWARD_SUBADDRESS.unwrap()); - scanner - } - - async fn median_fee(&self, block: &Block) -> Result { - let mut fees = vec![]; - for tx_hash in &block.transactions { - let tx = - self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; - // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate - let fee = match &tx { - Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee, - _ => continue, - }; - fees.push(fee / u64::try_from(tx.weight()).unwrap()); - } - fees.sort(); - let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); - - // TODO: Set a sane minimum fee - const MINIMUM_FEE: u64 = 1_500_000; - Ok(FeeRate::new(fee.max(MINIMUM_FEE), 10000).unwrap()) - } - - #[cfg(test)] - fn test_view_pair() -> ViewPair { - ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::ONE.0)).unwrap() - } - - #[cfg(test)] - fn test_scanner() -> Scanner { - Scanner::new(Self::test_view_pair()) - } - - #[cfg(test)] - fn test_address() -> Address { - Address::new(Self::test_view_pair().legacy_address(MoneroNetwork::Mainnet)).unwrap() - } -} - -#[async_trait] -impl Network for Monero { - const NETWORK: NetworkId = NetworkId::Monero; - const ID: &'static str = "Monero"; - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120; - const CONFIRMATIONS: usize = 10; - - // TODO - const COST_TO_AGGREGATE: u64 = 0; - - #[cfg(test)] - async fn external_address(&self, key: EdwardsPoint) -> Address { - Self::address_internal(key, EXTERNAL_SUBADDRESS) - } - - fn branch_address(key: EdwardsPoint) -> Option
{ - Some(Self::address_internal(key, BRANCH_SUBADDRESS)) - } - - fn change_address(key: EdwardsPoint) -> Option
{ - Some(Self::address_internal(key, CHANGE_SUBADDRESS)) - } - - fn forward_address(key: EdwardsPoint) -> Option
{ - Some(Self::address_internal(key, FORWARD_SUBADDRESS)) - } - - async fn needed_fee( - &self, - block_number: usize, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - ) -> Result, NetworkError> { - let res = self - .make_signable_transaction(block_number, &[0; 32], inputs, payments, change, true) - .await?; - let Some(res) = res else { return Ok(None) }; - let MakeSignableTransactionResult::Fee(fee) = res else { - panic!("told make_signable_transaction calculating_fee and got transaction") - }; - Ok(Some(fee)) - } - - async fn signable_transaction( - &self, - block_number: usize, - plan_id: &[u8; 32], - _key: EdwardsPoint, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - (): &(), - ) -> Result, NetworkError> { - let res = self - .make_signable_transaction(block_number, plan_id, inputs, payments, change, false) - .await?; - let Some(res) = res else { return Ok(None) }; - let MakeSignableTransactionResult::SignableTransaction(signable) = res else { - panic!("told make_signable_transaction not calculating_fee and got fee") - }; - - let signable = SignableTransaction(signable); - let eventuality = signable.0.clone().into(); - Ok(Some((signable, eventuality))) - } - - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: SignableTransaction, - ) -> Result { - match transaction.0.clone().multisig(keys) { - Ok(machine) => Ok(machine), - Err(e) => panic!("failed to create a multisig machine for TX: {e}"), - } - } - - async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { - match self.rpc.publish_transaction(tx).await { - Ok(()) => Ok(()), - Err(RpcError::ConnectionError(e)) => { - log::debug!("Monero ConnectionError: {e}"); - Err(NetworkError::ConnectionError)? - } - // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs - // invalid transaction - Err(e) => panic!("failed to publish TX {}: {e}", hex::encode(tx.hash())), - } - } - - #[cfg(test)] - async fn get_block_number(&self, id: &[u8; 32]) -> usize { - self.rpc.get_block(*id).await.unwrap().number().unwrap() - } - - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - claim: &[u8; 32], - ) -> bool { - return eventuality.matches(&self.rpc.get_pruned_transaction(*claim).await.unwrap()); - } - - #[cfg(test)] - async fn get_transaction_by_eventuality( - &self, - block: usize, - eventuality: &Eventuality, - ) -> Transaction { - let block = self.rpc.get_block_by_number(block).await.unwrap(); - for tx in &block.transactions { - let tx = self.rpc.get_transaction(*tx).await.unwrap(); - if eventuality.matches(&tx.clone().into()) { - return tx; - } - } - panic!("block didn't have a transaction for this eventuality") - } - - #[cfg(test)] - async fn mine_block(&self) { - // https://github.com/serai-dex/serai/issues/198 - sleep(std::time::Duration::from_millis(100)).await; - self.rpc.generate_blocks(&Self::test_address().into(), 1).await.unwrap(); - } - - #[cfg(test)] - async fn test_send(&self, address: Address) -> Block { - use zeroize::Zeroizing; - use rand_core::{RngCore, OsRng}; - use monero_wallet::rpc::FeePriority; - - let new_block = self.get_latest_block_number().await.unwrap() + 1; - for _ in 0 .. 80 { - self.mine_block().await; - } - - let new_block = self.rpc.get_block_by_number(new_block).await.unwrap(); - let mut outputs = Self::test_scanner() - .scan(self.rpc.get_scannable_block(new_block.clone()).await.unwrap()) - .unwrap() - .ignore_additional_timelock(); - let output = outputs.swap_remove(0); - - let amount = output.commitment().amount; - // The dust should always be sufficient for the fee - let fee = Monero::DUST; - - let rct_type = match new_block.header.hardfork_version { - 14 => RctType::ClsagBulletproof, - 15 | 16 => RctType::ClsagBulletproofPlus, - _ => panic!("Monero hard forked and the processor wasn't updated for it"), - }; - - let output = OutputWithDecoys::fingerprintable_deterministic_new( - &mut OsRng, - &self.rpc, - match rct_type { - RctType::ClsagBulletproof => 11, - RctType::ClsagBulletproofPlus => 16, - _ => panic!("selecting decoys for an unsupported RctType"), - }, - self.rpc.get_height().await.unwrap(), - output, - ) - .await - .unwrap(); - - let mut outgoing_view_key = Zeroizing::new([0; 32]); - OsRng.fill_bytes(outgoing_view_key.as_mut()); - let tx = MSignableTransaction::new( - rct_type, - outgoing_view_key, - vec![output], - vec![(address.into(), amount - fee)], - Change::fingerprintable(Some(Self::test_address().into())), - vec![], - self.rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(), - ) - .unwrap() - .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE.0)) - .unwrap(); - - let block = self.get_latest_block_number().await.unwrap() + 1; - self.rpc.publish_transaction(&tx).await.unwrap(); - for _ in 0 .. 10 { - self.mine_block().await; - } - self.get_block(block).await.unwrap() - } -} -*/ diff --git a/processor/monero/src/main.rs b/processor/monero/src/main.rs index daba3255b..d36118d01 100644 --- a/processor/monero/src/main.rs +++ b/processor/monero/src/main.rs @@ -41,3 +41,149 @@ async fn main() { ) .await; } + +/* +#[async_trait] +impl TransactionTrait for Transaction { + #[cfg(test)] + async fn fee(&self, _: &Monero) -> u64 { + match self { + Transaction::V1 { .. } => panic!("v1 TX in test-only function"), + Transaction::V2 { ref proofs, .. } => proofs.as_ref().unwrap().base.fee, + } + } +} + +impl Monero { + async fn median_fee(&self, block: &Block) -> Result { + let mut fees = vec![]; + for tx_hash in &block.transactions { + let tx = + self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; + // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate + let fee = match &tx { + Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee, + _ => continue, + }; + fees.push(fee / u64::try_from(tx.weight()).unwrap()); + } + fees.sort(); + let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); + + // TODO: Set a sane minimum fee + const MINIMUM_FEE: u64 = 1_500_000; + Ok(FeeRate::new(fee.max(MINIMUM_FEE), 10000).unwrap()) + } + + #[cfg(test)] + fn test_view_pair() -> ViewPair { + ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::ONE.0)).unwrap() + } + + #[cfg(test)] + fn test_scanner() -> Scanner { + Scanner::new(Self::test_view_pair()) + } + + #[cfg(test)] + fn test_address() -> Address { + Address::new(Self::test_view_pair().legacy_address(MoneroNetwork::Mainnet)).unwrap() + } +} + +#[async_trait] +impl Network for Monero { + #[cfg(test)] + async fn get_block_number(&self, id: &[u8; 32]) -> usize { + self.rpc.get_block(*id).await.unwrap().number().unwrap() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Eventuality, + ) -> Transaction { + let block = self.rpc.get_block_by_number(block).await.unwrap(); + for tx in &block.transactions { + let tx = self.rpc.get_transaction(*tx).await.unwrap(); + if eventuality.matches(&tx.clone().into()) { + return tx; + } + } + panic!("block didn't have a transaction for this eventuality") + } + + #[cfg(test)] + async fn mine_block(&self) { + // https://github.com/serai-dex/serai/issues/198 + sleep(std::time::Duration::from_millis(100)).await; + self.rpc.generate_blocks(&Self::test_address().into(), 1).await.unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, address: Address) -> Block { + use zeroize::Zeroizing; + use rand_core::{RngCore, OsRng}; + use monero_wallet::rpc::FeePriority; + + let new_block = self.get_latest_block_number().await.unwrap() + 1; + for _ in 0 .. 80 { + self.mine_block().await; + } + + let new_block = self.rpc.get_block_by_number(new_block).await.unwrap(); + let mut outputs = Self::test_scanner() + .scan(self.rpc.get_scannable_block(new_block.clone()).await.unwrap()) + .unwrap() + .ignore_additional_timelock(); + let output = outputs.swap_remove(0); + + let amount = output.commitment().amount; + // The dust should always be sufficient for the fee + let fee = Monero::DUST; + + let rct_type = match new_block.header.hardfork_version { + 14 => RctType::ClsagBulletproof, + 15 | 16 => RctType::ClsagBulletproofPlus, + _ => panic!("Monero hard forked and the processor wasn't updated for it"), + }; + + let output = OutputWithDecoys::fingerprintable_deterministic_new( + &mut OsRng, + &self.rpc, + match rct_type { + RctType::ClsagBulletproof => 11, + RctType::ClsagBulletproofPlus => 16, + _ => panic!("selecting decoys for an unsupported RctType"), + }, + self.rpc.get_height().await.unwrap(), + output, + ) + .await + .unwrap(); + + let mut outgoing_view_key = Zeroizing::new([0; 32]); + OsRng.fill_bytes(outgoing_view_key.as_mut()); + let tx = MSignableTransaction::new( + rct_type, + outgoing_view_key, + vec![output], + vec![(address.into(), amount - fee)], + Change::fingerprintable(Some(Self::test_address().into())), + vec![], + self.rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(), + ) + .unwrap() + .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE.0)) + .unwrap(); + + let block = self.get_latest_block_number().await.unwrap() + 1; + self.rpc.publish_transaction(&tx).await.unwrap(); + for _ in 0 .. 10 { + self.mine_block().await; + } + self.get_block(block).await.unwrap() + } +} +*/ diff --git a/processor/monero/src/primitives/output.rs b/processor/monero/src/primitives/output.rs index fea042c83..201e75c99 100644 --- a/processor/monero/src/primitives/output.rs +++ b/processor/monero/src/primitives/output.rs @@ -34,13 +34,6 @@ impl AsMut<[u8]> for OutputId { #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct Output(pub(crate) WalletOutput); - -impl Output { - pub(crate) fn new(output: WalletOutput) -> Self { - Self(output) - } -} - impl ReceivedOutput<::G, Address> for Output { type Id = OutputId; type TransactionId = [u8; 32]; diff --git a/processor/monero/src/primitives/transaction.rs b/processor/monero/src/primitives/transaction.rs index f6765cd9b..eeeef81dc 100644 --- a/processor/monero/src/primitives/transaction.rs +++ b/processor/monero/src/primitives/transaction.rs @@ -34,8 +34,8 @@ impl scheduler::Transaction for Transaction { #[derive(Clone, Debug)] pub(crate) struct SignableTransaction { - id: [u8; 32], - signable: MSignableTransaction, + pub(crate) id: [u8; 32], + pub(crate) signable: MSignableTransaction, } #[derive(Clone)] @@ -81,8 +81,8 @@ impl scheduler::SignableTransaction for SignableTransaction { #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct Eventuality { - id: [u8; 32], - singular_spent_output: Option, + pub(crate) id: [u8; 32], + pub(crate) singular_spent_output: Option, pub(crate) eventuality: MEventuality, } diff --git a/processor/monero/src/scheduler.rs b/processor/monero/src/scheduler.rs index ef52c4131..667840f6b 100644 --- a/processor/monero/src/scheduler.rs +++ b/processor/monero/src/scheduler.rs @@ -1,146 +1,9 @@ -/* -async fn make_signable_transaction( -block_number: usize, -plan_id: &[u8; 32], -inputs: &[Output], -payments: &[Payment], -change: &Option
, -calculating_fee: bool, -) -> Result, NetworkError> { -for payment in payments { - assert_eq!(payment.balance.coin, Coin::Monero); -} - -// TODO2: Use an fee representative of several blocks, cached inside Self -let block_for_fee = self.get_block(block_number).await?; -let fee_rate = self.median_fee(&block_for_fee).await?; - -// Determine the RCT proofs to make based off the hard fork -// TODO: Make a fn for this block which is duplicated with tests -let rct_type = match block_for_fee.header.hardfork_version { - 14 => RctType::ClsagBulletproof, - 15 | 16 => RctType::ClsagBulletproofPlus, - _ => panic!("Monero hard forked and the processor wasn't updated for it"), -}; - -let mut transcript = - RecommendedTranscript::new(b"Serai Processor Monero Transaction Transcript"); -transcript.append_message(b"plan", plan_id); - -// All signers need to select the same decoys -// All signers use the same height and a seeded RNG to make sure they do so. -let mut inputs_actual = Vec::with_capacity(inputs.len()); -for input in inputs { - inputs_actual.push( - OutputWithDecoys::fingerprintable_deterministic_new( - &mut ChaCha20Rng::from_seed(transcript.rng_seed(b"decoys")), - &self.rpc, - // TODO: Have Decoys take RctType - match rct_type { - RctType::ClsagBulletproof => 11, - RctType::ClsagBulletproofPlus => 16, - _ => panic!("selecting decoys for an unsupported RctType"), - }, - block_number + 1, - input.0.clone(), - ) - .await - .map_err(map_rpc_err)?, - ); -} - -// Monero requires at least two outputs -// If we only have one output planned, add a dummy payment -let mut payments = payments.to_vec(); -let outputs = payments.len() + usize::from(u8::from(change.is_some())); -if outputs == 0 { - return Ok(None); -} else if outputs == 1 { - payments.push(Payment { - address: Address::new( - ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0)) - .unwrap() - .legacy_address(MoneroNetwork::Mainnet), - ) - .unwrap(), - balance: Balance { coin: Coin::Monero, amount: Amount(0) }, - data: None, - }); -} - -let payments = payments - .into_iter() - .map(|payment| (payment.address.into(), payment.balance.amount.0)) - .collect::>(); - -match MSignableTransaction::new( - rct_type, - // Use the plan ID as the outgoing view key - Zeroizing::new(*plan_id), - inputs_actual, - payments, - Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), - vec![], - fee_rate, -) { - Ok(signable) => Ok(Some({ - if calculating_fee { - MakeSignableTransactionResult::Fee(signable.necessary_fee()) - } else { - MakeSignableTransactionResult::SignableTransaction(signable) - } - })), - Err(e) => match e { - SendError::UnsupportedRctType => { - panic!("trying to use an RctType unsupported by monero-wallet") - } - SendError::NoInputs | - SendError::InvalidDecoyQuantity | - SendError::NoOutputs | - SendError::TooManyOutputs | - SendError::NoChange | - SendError::TooMuchArbitraryData | - SendError::TooLargeTransaction | - SendError::WrongPrivateKey => { - panic!("created an invalid Monero transaction: {e}"); - } - SendError::MultiplePaymentIds => { - panic!("multiple payment IDs despite not supporting integrated addresses"); - } - SendError::NotEnoughFunds { inputs, outputs, necessary_fee } => { - log::debug!( - "Monero NotEnoughFunds. inputs: {:?}, outputs: {:?}, necessary_fee: {necessary_fee:?}", - inputs, - outputs - ); - match necessary_fee { - Some(necessary_fee) => { - // If we're solely calculating the fee, return the fee this TX will cost - if calculating_fee { - Ok(Some(MakeSignableTransactionResult::Fee(necessary_fee))) - } else { - // If we're actually trying to make the TX, return None - Ok(None) - } - } - // We didn't have enough funds to even cover the outputs - None => { - // Ensure we're not misinterpreting this - assert!(outputs > inputs); - Ok(None) - } - } - } - SendError::MaliciousSerialization | SendError::ClsagError(_) | SendError::FrostError(_) => { - panic!("supposedly unreachable (at this time) Monero error: {e}"); - } - }, -} -} -*/ - use core::future::Future; +use zeroize::Zeroizing; +use rand_core::SeedableRng; +use rand_chacha::ChaCha20Rng; + use ciphersuite::{Ciphersuite, Ed25519}; use monero_wallet::rpc::{FeeRate, RpcError}; @@ -154,11 +17,17 @@ use primitives::{OutputType, ReceivedOutput, Payment}; use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; -use monero_wallet::address::Network; +use monero_wallet::{ + ringct::RctType, + address::{Network, AddressType, MoneroAddress}, + OutputWithDecoys, + send::{ + Change, SendError, SignableTransaction as MSignableTransaction, Eventuality as MEventuality, + }, +}; use crate::{ EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, view_pair, - output::Output, transaction::{SignableTransaction, Eventuality}, rpc::Rpc, }; @@ -179,186 +48,116 @@ fn address_from_serai_key(key: ::G, kind: OutputType) -> .expect("created address which wasn't representable") } -#[derive(Clone)] -pub(crate) struct Planner(pub(crate) Rpc); -impl TransactionPlanner for Planner { - type EphemeralError = RpcError; - - type FeeRate = FeeRate; - - type SignableTransaction = SignableTransaction; - - // wallet2 will not create a transaction larger than 100 KB, and Monero won't relay a transaction - // larger than 150 KB. This fits within the 100 KB mark to fit in and not poke the bear. - // Technically, it can be ~124, yet a small bit of buffer is appreciated - // TODO: Test creating a TX this big - const MAX_INPUTS: usize = 120; - const MAX_OUTPUTS: usize = 16; - - fn fee_rate(block: &BlockFor, coin: Coin) -> Self::FeeRate { - assert_eq!(coin, Coin::Monero); - // TODO - todo!("TODO") - } - - fn branch_address(key: KeyFor) -> AddressFor { - address_from_serai_key(key, OutputType::Branch) - } - fn change_address(key: KeyFor) -> AddressFor { - address_from_serai_key(key, OutputType::Change) - } - fn forwarding_address(key: KeyFor) -> AddressFor { - address_from_serai_key(key, OutputType::Forwarded) - } - - fn calculate_fee( - fee_rate: Self::FeeRate, - inputs: Vec>, - payments: Vec>>, - change: Option>, - ) -> Amount { - todo!("TODO") - } - - fn plan( - &self, - fee_rate: Self::FeeRate, - inputs: Vec>, - payments: Vec>>, - change: Option>, - ) -> impl Send - + Future, RpcError>> - { - async move { todo!("TODO") } - } -} - -pub(crate) type Scheduler = utxo_standard_scheduler::Scheduler; - -/* -use ciphersuite::{Ciphersuite, Ed25519}; - -use bitcoin_serai::{ - bitcoin::ScriptBuf, - wallet::{TransactionError, SignableTransaction as BSignableTransaction, p2tr_script_buf}, -}; - -use serai_client::{ - primitives::{Coin, Amount}, - networks::bitcoin::Address, -}; - -use serai_db::Db; -use primitives::{OutputType, ReceivedOutput, Payment}; -use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; -use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; - -use crate::{ - scan::{offsets_for_key, scanner}, - output::Output, - transaction::{SignableTransaction, Eventuality}, - rpc::Rpc, -}; - -fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { - let offset = ::G::GENERATOR * offsets_for_key(key)[&kind]; - Address::new( - p2tr_script_buf(key + offset) - .expect("creating address from Serai key which wasn't properly tweaked"), - ) - .expect("couldn't create Serai-representable address for P2TR script") -} - -fn signable_transaction( - fee_per_vbyte: u64, +async fn signable_transaction( + rpc: &Rpc, + reference_block: &BlockFor, inputs: Vec>, payments: Vec>>, change: Option>, -) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> { - assert!( - inputs.len() < - >::MAX_INPUTS - ); +) -> Result, RpcError> { + assert!(inputs.len() < >::MAX_INPUTS); assert!( (payments.len() + usize::from(u8::from(change.is_some()))) < >::MAX_OUTPUTS ); - let inputs = inputs.into_iter().map(|input| input.output).collect::>(); + // TODO: Set a sane minimum fee + const MINIMUM_FEE: u64 = 1_500_000; + // TODO: Set a fee rate based on the reference block + let fee_rate = FeeRate::new(MINIMUM_FEE, 10000).unwrap(); + + // Determine the RCT proofs to make based off the hard fork + let rct_type = match reference_block.0.block.header.hardfork_version { + 14 => RctType::ClsagBulletproof, + 15 | 16 => RctType::ClsagBulletproofPlus, + _ => panic!("Monero hard forked and the processor wasn't updated for it"), + }; + + // We need a unique ID to distinguish this transaction from another transaction with an identical + // set of payments (as our Eventualities only match over the payments). The output's ID is + // guaranteed to be unique, making it satisfactory + let id = inputs.first().unwrap().id().0; + + let mut inputs_actual = Vec::with_capacity(inputs.len()); + for input in inputs { + inputs_actual.push( + OutputWithDecoys::fingerprintable_deterministic_new( + // We need a deterministic RNG here with *some* seed + // The unique ID means we don't pick some static seed + // It is a public value, yet that's fine as this is assumed fully transparent + // It is a reused value (with later code), but that's not an issue. Just an oddity + &mut ChaCha20Rng::from_seed(id), + &rpc.rpc, + // TODO: Have Decoys take RctType + match rct_type { + RctType::ClsagBulletproof => 11, + RctType::ClsagBulletproofPlus => 16, + _ => panic!("selecting decoys for an unsupported RctType"), + }, + reference_block.0.block.number().unwrap() + 1, + input.0.clone(), + ) + .await?, + ); + } + let inputs = inputs_actual; let mut payments = payments .into_iter() .map(|payment| { - (payment.address().clone(), { + (MoneroAddress::from(*payment.address()), { let balance = payment.balance(); assert_eq!(balance.coin, Coin::Monero); balance.amount.0 }) }) .collect::>(); - /* - Push a payment to a key with a known private key which anyone can spend. If this transaction - gets stuck, this lets anyone create a child transaction spending this output, raising the fee, - getting the transaction unstuck (via CPFP). - */ - payments.push(( - // The generator is even so this is valid - Address::new(p2tr_script_buf(::G::GENERATOR).unwrap()).unwrap(), - // This uses the minimum output value allowed, as defined as a constant in bitcoin-serai - // TODO: Add a test for this comparing to bitcoin's `minimal_non_dust` - bitcoin_serai::wallet::DUST, - )); - - let change = change - .map(>::change_address); + if (payments.len() + usize::from(u8::from(change.is_some()))) == 1 { + // Monero requires at least two outputs, so add a dummy payment + payments.push(( + MoneroAddress::new( + Network::Mainnet, + AddressType::Legacy, + ::generator().0, + ::generator().0, + ), + 0, + )); + } - BSignableTransaction::new( - inputs.clone(), - &payments - .iter() - .cloned() - .map(|(address, amount)| (ScriptBuf::from(address), amount)) - .collect::>(), - change.clone().map(ScriptBuf::from), - None, - fee_per_vbyte, + let change = if let Some(change) = change { + Change::guaranteed(view_pair(change), Some(CHANGE_SUBADDRESS)) + } else { + Change::fingerprintable(None) + }; + + Ok( + MSignableTransaction::new( + rct_type, + Zeroizing::new(id), + inputs, + payments, + change, + vec![], + fee_rate, + ) + .map(|signable| (SignableTransaction { id, signable: signable.clone() }, signable)), ) - .map(|bst| (SignableTransaction { inputs, payments, change, fee_per_vbyte }, bst)) } -pub(crate) struct Planner; +#[derive(Clone)] +pub(crate) struct Planner(pub(crate) Rpc); impl TransactionPlanner for Planner { type EphemeralError = RpcError; - type FeeRate = u64; - type SignableTransaction = SignableTransaction; - /* - Monero has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT). - - A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes. While - our inputs are entirely SegWit, such fine tuning is not necessary and could create issues in - the future (if the size decreases or we misevaluate it). It also offers a minimal amount of - benefit when we are able to logarithmically accumulate inputs/fulfill payments. - - For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and - 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 - bytes. - - 100,000 / 192 = 520 - 520 * 192 leaves 160 bytes of overhead for the transaction structure itself. - */ - const MAX_INPUTS: usize = 520; - // We always reserve one output to create an anyone-can-spend output enabling anyone to use CPFP - // to unstick any transactions which had too low of a fee. - const MAX_OUTPUTS: usize = 519; - - fn fee_rate(block: &BlockFor, coin: Coin) -> Self::FeeRate { - assert_eq!(coin, Coin::Monero); - // TODO - 1 - } + // wallet2 will not create a transaction larger than 100 KB, and Monero won't relay a transaction + // larger than 150 KB. This fits within the 100 KB mark to fit in and not poke the bear. + // Technically, it can be ~124, yet a small bit of buffer is appreciated + // TODO: Test creating a TX this big + const MAX_INPUTS: usize = 120; + const MAX_OUTPUTS: usize = 16; fn branch_address(key: KeyFor) -> AddressFor { address_from_serai_key(key, OutputType::Branch) @@ -371,59 +170,101 @@ impl TransactionPlanner for Planner { } fn calculate_fee( - fee_rate: Self::FeeRate, + &self, + reference_block: &BlockFor, inputs: Vec>, payments: Vec>>, change: Option>, - ) -> Amount { - match signable_transaction::(fee_rate, inputs, payments, change) { - Ok(tx) => Amount(tx.1.needed_fee()), - Err( - TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, - ) => panic!("malformed arguments to calculate_fee"), - // No data, we have a minimum fee rate, we checked the amount of inputs/outputs - Err( - TransactionError::TooMuchData | - TransactionError::TooLowFee | - TransactionError::TooLargeTransaction, - ) => unreachable!(), - Err(TransactionError::NotEnoughFunds { fee, .. }) => Amount(fee), + ) -> impl Send + Future> { + async move { + Ok(match signable_transaction(&self.0, reference_block, inputs, payments, change).await? { + Ok(tx) => Amount(tx.1.necessary_fee()), + Err(SendError::NotEnoughFunds { necessary_fee, .. }) => { + Amount(necessary_fee.expect("outputs value exceeded inputs value")) + } + Err(SendError::UnsupportedRctType) => { + panic!("tried to use an RctType monero-wallet doesn't support") + } + Err(SendError::NoInputs | SendError::NoOutputs | SendError::TooManyOutputs) => { + panic!("malformed plan passed to calculate_fee") + } + Err(SendError::InvalidDecoyQuantity) => panic!("selected the wrong amount of decoys"), + Err(SendError::NoChange) => { + panic!("didn't add a dummy payment to satisfy the 2-output minimum") + } + Err(SendError::MultiplePaymentIds) => { + panic!("included multiple payment IDs despite not supporting addresses with payment IDs") + } + Err(SendError::TooMuchArbitraryData) => { + panic!("included too much arbitrary data despite not including any") + } + Err(SendError::TooLargeTransaction) => { + panic!("too large transaction despite MAX_INPUTS/MAX_OUTPUTS") + } + Err( + SendError::WrongPrivateKey | + SendError::MaliciousSerialization | + SendError::ClsagError(_) | + SendError::FrostError(_), + ) => unreachable!("signing/serialization error when not signing/serializing"), + }) } } fn plan( - fee_rate: Self::FeeRate, + &self, + reference_block: &BlockFor, inputs: Vec>, payments: Vec>>, change: Option>, - ) -> PlannedTransaction { - let key = inputs.first().unwrap().key(); - for input in &inputs { - assert_eq!(key, input.key()); - } - + ) -> impl Send + + Future, RpcError>> + { let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); - match signable_transaction::(fee_rate, inputs.clone(), payments, change) { - Ok(tx) => PlannedTransaction { - signable: tx.0, - eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, - auxilliary: (), - }, - Err( - TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, - ) => panic!("malformed arguments to plan"), - // No data, we have a minimum fee rate, we checked the amount of inputs/outputs - Err( - TransactionError::TooMuchData | - TransactionError::TooLowFee | - TransactionError::TooLargeTransaction, - ) => unreachable!(), - Err(TransactionError::NotEnoughFunds { .. }) => { - panic!("plan called for a transaction without enough funds") - } + + async move { + Ok(match signable_transaction(&self.0, reference_block, inputs, payments, change).await? { + Ok(tx) => { + let id = tx.0.id; + PlannedTransaction { + signable: tx.0, + eventuality: Eventuality { + id, + singular_spent_output, + eventuality: MEventuality::from(tx.1), + }, + auxilliary: (), + } + } + Err(SendError::NotEnoughFunds { .. }) => panic!("failed to successfully amortize the fee"), + Err(SendError::UnsupportedRctType) => { + panic!("tried to use an RctType monero-wallet doesn't support") + } + Err(SendError::NoInputs | SendError::NoOutputs | SendError::TooManyOutputs) => { + panic!("malformed plan passed to calculate_fee") + } + Err(SendError::InvalidDecoyQuantity) => panic!("selected the wrong amount of decoys"), + Err(SendError::NoChange) => { + panic!("didn't add a dummy payment to satisfy the 2-output minimum") + } + Err(SendError::MultiplePaymentIds) => { + panic!("included multiple payment IDs despite not supporting addresses with payment IDs") + } + Err(SendError::TooMuchArbitraryData) => { + panic!("included too much arbitrary data despite not including any") + } + Err(SendError::TooLargeTransaction) => { + panic!("too large transaction despite MAX_INPUTS/MAX_OUTPUTS") + } + Err( + SendError::WrongPrivateKey | + SendError::MaliciousSerialization | + SendError::ClsagError(_) | + SendError::FrostError(_), + ) => unreachable!("signing/serialization error when not signing/serializing"), + }) } } } pub(crate) type Scheduler = utxo_standard_scheduler::Scheduler; -*/ diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs index 00b2d10f5..c01baf021 100644 --- a/processor/scheduler/utxo/primitives/src/lib.rs +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -4,7 +4,7 @@ use core::{fmt::Debug, future::Future}; -use serai_primitives::{Coin, Amount}; +use serai_primitives::Amount; use primitives::{ReceivedOutput, Payment}; use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor}; @@ -48,9 +48,6 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// resolve manual intervention/changing the arguments. type EphemeralError: Debug; - /// The type representing a fee rate to use for transactions. - type FeeRate: Send + Clone + Copy; - /// The type representing a signable transaction. type SignableTransaction: SignableTransaction; @@ -59,11 +56,6 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// The maximum amount of outputs allowed in a transaction, including the change output. const MAX_OUTPUTS: usize; - /// Obtain the fee rate to pay. - /// - /// This must be constant to the block and coin. - fn fee_rate(block: &BlockFor, coin: Coin) -> Self::FeeRate; - /// The branch address for this key of Serai's. fn branch_address(key: KeyFor) -> AddressFor; /// The change address for this key of Serai's. @@ -76,11 +68,12 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// The fee rate, inputs, and payments, will all be for the same coin. The returned fee is /// denominated in this coin. fn calculate_fee( - fee_rate: Self::FeeRate, + &self, + reference_block: &BlockFor, inputs: Vec>, payments: Vec>>, change: Option>, - ) -> Amount; + ) -> impl Send + Future>; /// Plan a transaction. /// @@ -91,7 +84,7 @@ pub trait TransactionPlanner: 'static + Send + Sync { /// output must be created. fn plan( &self, - fee_rate: Self::FeeRate, + reference_block: &BlockFor, inputs: Vec>, payments: Vec>>, change: Option>, @@ -112,7 +105,7 @@ pub trait TransactionPlanner: 'static + Send + Sync { fn plan_transaction_with_fee_amortization( &self, operating_costs: &mut u64, - fee_rate: Self::FeeRate, + reference_block: &BlockFor, inputs: Vec>, mut payments: Vec>>, mut change: Option>, @@ -156,7 +149,8 @@ pub trait TransactionPlanner: 'static + Send + Sync { // Sort payments from high amount to low amount payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); - let mut fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0; + let mut fee = + self.calculate_fee(reference_block, inputs.clone(), payments.clone(), change).await?.0; let mut amortized = 0; while !payments.is_empty() { // We need to pay the fee, and any accrued operating costs, minus what we've already @@ -176,7 +170,10 @@ pub trait TransactionPlanner: 'static + Send + Sync { if payments.last().unwrap().balance().amount.0 <= (per_payment_fee + S::dust(coin).0) { amortized += payments.pop().unwrap().balance().amount.0; // Recalculate the fee and try again - fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0; + fee = self + .calculate_fee(reference_block, inputs.clone(), payments.clone(), change) + .await? + .0; continue; } // Break since all of these payments shouldn't be dropped @@ -237,7 +234,7 @@ pub trait TransactionPlanner: 'static + Send + Sync { let has_change = change.is_some(); let PlannedTransaction { signable, eventuality, auxilliary } = - self.plan(fee_rate, inputs, payments, change).await?; + self.plan(reference_block, inputs, payments, change).await?; Ok(Some(AmortizePlannedTransaction { effected_payments, has_change, diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs index 5ff786a73..208ae8a09 100644 --- a/processor/scheduler/utxo/standard/src/lib.rs +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -56,7 +56,7 @@ impl> Scheduler { .planner .plan_transaction_with_fee_amortization( &mut operating_costs, - P::fee_rate(block, coin), + block, to_aggregate, vec![], Some(key_for_change), @@ -176,7 +176,7 @@ impl> Scheduler { .plan_transaction_with_fee_amortization( // Uses 0 as there's no operating costs to incur/amortize here &mut 0, - P::fee_rate(block, coin), + block, vec![output], payments, None, @@ -254,7 +254,7 @@ impl> Scheduler { .planner .plan_transaction_with_fee_amortization( &mut operating_costs, - P::fee_rate(block, coin), + block, outputs.clone(), tree[0] .payments::(coin, &branch_address, tree[0].value()) @@ -327,7 +327,7 @@ impl> Scheduler { .planner .plan_transaction_with_fee_amortization( &mut operating_costs, - P::fee_rate(block, coin), + block, outputs, vec![], Some(to), @@ -487,7 +487,7 @@ impl> SchedulerTrait for Schedul // This uses 0 for the operating costs as we don't incur any here // If the output can't pay for itself to be forwarded, we simply drop it &mut 0, - P::fee_rate(block, forward.balance().coin), + block, vec![forward.clone()], vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], None, @@ -508,7 +508,7 @@ impl> SchedulerTrait for Schedul // This uses 0 for the operating costs as we don't incur any here // If the output can't pay for itself to be returned, we simply drop it &mut 0, - P::fee_rate(block, out_instruction.balance().coin), + block, vec![to_return.output().clone()], vec![out_instruction], None, diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index cb0a8b154..961c6fcb2 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -86,7 +86,7 @@ impl>> Sched .planner .plan_transaction_with_fee_amortization( &mut operating_costs, - P::fee_rate(block, coin), + block, to_aggregate, vec![], Some(key_for_change), @@ -229,7 +229,7 @@ impl>> Sched .planner .plan_transaction_with_fee_amortization( &mut operating_costs, - P::fee_rate(block, coin), + block, outputs.clone(), tree[0] .payments::(coin, &branch_address, tree[0].value()) @@ -323,7 +323,7 @@ impl>> Sched .plan_transaction_with_fee_amortization( // Uses 0 as there's no operating costs to incur/amortize here &mut 0, - P::fee_rate(block, coin), + block, vec![branch_output], payments, None, @@ -379,7 +379,7 @@ impl>> Sched .planner .plan_transaction_with_fee_amortization( &mut operating_costs, - P::fee_rate(block, coin), + block, outputs, vec![], Some(to), @@ -505,7 +505,7 @@ impl>> Sched // This uses 0 for the operating costs as we don't incur any here // If the output can't pay for itself to be forwarded, we simply drop it &mut 0, - P::fee_rate(block, forward.balance().coin), + block, vec![forward.clone()], vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], None, @@ -526,7 +526,7 @@ impl>> Sched // This uses 0 for the operating costs as we don't incur any here // If the output can't pay for itself to be returned, we simply drop it &mut 0, - P::fee_rate(block, out_instruction.balance().coin), + block, vec![to_return.output().clone()], vec![out_instruction], None, From 01679d39ce9d99c8eac4e3a1fa7519574f3dbbe2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 05:20:02 -0400 Subject: [PATCH 126/179] Smart Contract Scheduler --- .github/workflows/tests.yml | 1 + Cargo.lock | 33 ++- Cargo.toml | 1 + deny.toml | 1 + processor/ethereum/Cargo.toml | 29 ++- processor/scheduler/smart-contract/Cargo.toml | 34 +++ processor/scheduler/smart-contract/LICENSE | 15 ++ processor/scheduler/smart-contract/README.md | 3 + processor/scheduler/smart-contract/src/lib.rs | 136 ++++++++++++ processor/scheduler/utxo/standard/src/lib.rs | 2 +- .../utxo/transaction-chaining/src/lib.rs | 2 +- .../src/multisigs/scheduler/smart_contract.rs | 208 ------------------ 12 files changed, 240 insertions(+), 225 deletions(-) create mode 100644 processor/scheduler/smart-contract/Cargo.toml create mode 100644 processor/scheduler/smart-contract/LICENSE create mode 100644 processor/scheduler/smart-contract/README.md create mode 100644 processor/scheduler/smart-contract/src/lib.rs delete mode 100644 processor/src/multisigs/scheduler/smart_contract.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 8bf4084da..e1c54349c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -48,6 +48,7 @@ jobs: -p serai-processor-utxo-scheduler-primitives \ -p serai-processor-utxo-scheduler \ -p serai-processor-transaction-chaining-scheduler \ + -p serai-processor-smart-contract-scheduler \ -p serai-processor-signers \ -p serai-processor-bin \ -p serai-bitcoin-processor \ diff --git a/Cargo.lock b/Cargo.lock index c3e39a09b..147cc295b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8350,18 +8350,25 @@ name = "serai-ethereum-processor" version = "0.1.0" dependencies = [ "borsh", - "const-hex", - "env_logger", + "ciphersuite", + "dkg", "ethereum-serai", + "flexible-transcript", "hex", "k256", "log", + "modular-frost", "parity-scale-codec", + "rand_core", + "serai-client", "serai-db", - "serai-env", - "serai-message-queue", - "serai-processor-messages", - "serde_json", + "serai-processor-bin", + "serai-processor-key-gen", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "serai-processor-smart-contract-scheduler", "tokio", "zalloc", ] @@ -8781,6 +8788,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-processor-smart-contract-scheduler" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", +] + [[package]] name = "serai-processor-tests" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index b35b3318f..adaa63db5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,6 +81,7 @@ members = [ "processor/scheduler/utxo/primitives", "processor/scheduler/utxo/standard", "processor/scheduler/utxo/transaction-chaining", + "processor/scheduler/smart-contract", "processor/signers", "processor/bin", diff --git a/deny.toml b/deny.toml index ef195411e..0e013f5e7 100644 --- a/deny.toml +++ b/deny.toml @@ -55,6 +55,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" }, { allow = ["AGPL-3.0"], name = "serai-processor-standard-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, + { allow = ["AGPL-3.0"], name = "serai-processor-smart-contract-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor-signers" }, { allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" }, diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml index ea65d570d..ede9c71b1 100644 --- a/processor/ethereum/Cargo.toml +++ b/processor/ethereum/Cargo.toml @@ -17,27 +17,38 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -const-hex = { version = "1", default-features = false } +rand_core = { version = "0.6", default-features = false } + hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -serde_json = { version = "1", default-features = false, features = ["std"] } + +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } k256 = { version = "^0.13.1", default-features = false, features = ["std"] } ethereum-serai = { path = "../../networks/ethereum", default-features = false, optional = true } +serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } + +zalloc = { path = "../../common/zalloc" } log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -zalloc = { path = "../../common/zalloc" } serai-db = { path = "../../common/db" } -serai-env = { path = "../../common/env" } -messages = { package = "serai-processor-messages", path = "../messages" } +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +smart-contract-scheduler = { package = "serai-processor-smart-contract-scheduler", path = "../scheduler/smart-contract" } +signers = { package = "serai-processor-signers", path = "../signers" } -message-queue = { package = "serai-message-queue", path = "../../message-queue" } +bin = { package = "serai-processor-bin", path = "../bin" } [features] -parity-db = ["serai-db/parity-db"] -rocksdb = ["serai-db/rocksdb"] +parity-db = ["bin/parity-db"] +rocksdb = ["bin/rocksdb"] diff --git a/processor/scheduler/smart-contract/Cargo.toml b/processor/scheduler/smart-contract/Cargo.toml new file mode 100644 index 000000000..69ce9840a --- /dev/null +++ b/processor/scheduler/smart-contract/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "serai-processor-smart-contract-scheduler" +version = "0.1.0" +description = "Scheduler for a smart contract representing the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/smart-contract" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../../common/db" } + +primitives = { package = "serai-processor-primitives", path = "../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../primitives" } diff --git a/processor/scheduler/smart-contract/LICENSE b/processor/scheduler/smart-contract/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/scheduler/smart-contract/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/smart-contract/README.md b/processor/scheduler/smart-contract/README.md new file mode 100644 index 000000000..0be94d20f --- /dev/null +++ b/processor/scheduler/smart-contract/README.md @@ -0,0 +1,3 @@ +# Smart Contract Scheduler + +A scheduler for a smart contract representing the Serai processor. diff --git a/processor/scheduler/smart-contract/src/lib.rs b/processor/scheduler/smart-contract/src/lib.rs new file mode 100644 index 000000000..091ffe6ab --- /dev/null +++ b/processor/scheduler/smart-contract/src/lib.rs @@ -0,0 +1,136 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{marker::PhantomData, future::Future}; +use std::collections::HashMap; + +use group::GroupEncoding; + +use serai_db::{Get, DbTxn, create_db}; + +use primitives::{ReceivedOutput, Payment}; +use scanner::{ + LifetimeStage, ScannerFeed, KeyFor, AddressFor, EventualityFor, BlockFor, SchedulerUpdate, + KeyScopedEventualities, Scheduler as SchedulerTrait, +}; +use scheduler_primitives::*; + +create_db! { + SmartContractScheduler { + NextNonce: () -> u64, + } +} + +/// A smart contract. +pub trait SmartContract: 'static + Send { + /// The type representing a signable transaction. + type SignableTransaction: SignableTransaction; + + /// Rotate from the retiring key to the new key. + fn rotate( + nonce: u64, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> (Self::SignableTransaction, EventualityFor); + /// Fulfill the set of payments, dropping any not worth handling. + fn fulfill( + starting_nonce: u64, + payments: Vec>>, + ) -> Vec<(Self::SignableTransaction, EventualityFor)>; +} + +/// A scheduler for a smart contract representing the Serai processor. +#[allow(non_snake_case)] +#[derive(Clone, Default)] +pub struct Scheduler> { + _S: PhantomData, + _SC: PhantomData, +} + +fn fulfill_payments>( + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, +) -> KeyScopedEventualities { + let key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + let mut nonce = NextNonce::get(txn).unwrap_or(0); + let mut eventualities = Vec::with_capacity(1); + for (signable, eventuality) in SC::fulfill(nonce, payments) { + TransactionsToSign::::send(txn, &key, &signable); + nonce += 1; + eventualities.push(eventuality); + } + NextNonce::set(txn, &nonce); + HashMap::from([(key.to_bytes().as_ref().to_vec(), eventualities)]) +} + +impl> SchedulerTrait for Scheduler { + type EphemeralError = (); + type SignableTransaction = SC::SignableTransaction; + + fn activate_key(_txn: &mut impl DbTxn, _key: KeyFor) {} + + fn flush_key( + &self, + txn: &mut impl DbTxn, + _block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let nonce = NextNonce::get(txn).unwrap_or(0); + let (signable, eventuality) = SC::rotate(nonce, retiring_key, new_key); + NextNonce::set(txn, &(nonce + 1)); + TransactionsToSign::::send(txn, &retiring_key, &signable); + Ok(HashMap::from([(retiring_key.to_bytes().as_ref().to_vec(), vec![eventuality])])) + } + } + + fn retire_key(_txn: &mut impl DbTxn, _key: KeyFor) {} + + fn update( + &self, + txn: &mut impl DbTxn, + _block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + // We ignore the outputs as we don't need to know our current state as it never suffers + // partial availability + + // We shouldn't have any forwards though + assert!(update.forwards().is_empty()); + + // Create the transactions for the returns + Ok(fulfill_payments::( + txn, + active_keys, + update + .returns() + .iter() + .map(|to_return| { + Payment::new(to_return.address().clone(), to_return.output().balance(), None) + }) + .collect::>(), + )) + } + } + + fn fulfill( + &self, + txn: &mut impl DbTxn, + _block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { Ok(fulfill_payments::(txn, active_keys, payments)) } + } +} diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs index 208ae8a09..dc2ccb064 100644 --- a/processor/scheduler/utxo/standard/src/lib.rs +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -470,7 +470,7 @@ impl> SchedulerTrait for Schedul } } - // Create the transactions for the forwards/burns + // Create the transactions for the forwards/returns { let mut planned_txs = vec![]; for forward in update.forwards() { diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 961c6fcb2..93bdf1f39 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -488,7 +488,7 @@ impl>> Sched } } - // Create the transactions for the forwards/burns + // Create the transactions for the forwards/returns { let mut planned_txs = vec![]; for forward in update.forwards() { diff --git a/processor/src/multisigs/scheduler/smart_contract.rs b/processor/src/multisigs/scheduler/smart_contract.rs deleted file mode 100644 index 3da8acf48..000000000 --- a/processor/src/multisigs/scheduler/smart_contract.rs +++ /dev/null @@ -1,208 +0,0 @@ -use std::{io, collections::HashSet}; - -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use serai_client::primitives::{NetworkId, Coin, Balance}; - -use crate::{ - Get, DbTxn, Db, Payment, Plan, create_db, - networks::{Output, Network}, - multisigs::scheduler::{SchedulerAddendum, Scheduler as SchedulerTrait}, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Scheduler { - key: ::G, - coins: HashSet, - rotated: bool, -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum Addendum { - Nonce(u64), - RotateTo { nonce: u64, new_key: ::G }, -} - -impl SchedulerAddendum for Addendum { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - match kind[0] { - 0 => { - let mut nonce = [0; 8]; - reader.read_exact(&mut nonce)?; - Ok(Addendum::Nonce(u64::from_le_bytes(nonce))) - } - 1 => { - let mut nonce = [0; 8]; - reader.read_exact(&mut nonce)?; - let nonce = u64::from_le_bytes(nonce); - - let new_key = N::Curve::read_G(reader)?; - Ok(Addendum::RotateTo { nonce, new_key }) - } - _ => Err(io::Error::other("reading unknown Addendum type"))?, - } - } - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Addendum::Nonce(nonce) => { - writer.write_all(&[0])?; - writer.write_all(&nonce.to_le_bytes()) - } - Addendum::RotateTo { nonce, new_key } => { - writer.write_all(&[1])?; - writer.write_all(&nonce.to_le_bytes())?; - writer.write_all(new_key.to_bytes().as_ref()) - } - } - } -} - -create_db! { - SchedulerDb { - LastNonce: () -> u64, - RotatedTo: (key: &[u8]) -> Vec, - } -} - -impl> SchedulerTrait for Scheduler { - type Addendum = Addendum; - - /// Check if this Scheduler is empty. - fn empty(&self) -> bool { - self.rotated - } - - /// Create a new Scheduler. - fn new( - _txn: &mut D::Transaction<'_>, - key: ::G, - network: NetworkId, - ) -> Self { - assert!(N::branch_address(key).is_none()); - assert!(N::change_address(key).is_none()); - assert!(N::forward_address(key).is_none()); - - Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: false } - } - - /// Load a Scheduler from the DB. - fn from_db( - db: &D, - key: ::G, - network: NetworkId, - ) -> io::Result { - Ok(Scheduler { - key, - coins: network.coins().iter().copied().collect(), - rotated: RotatedTo::get(db, key.to_bytes().as_ref()).is_some(), - }) - } - - fn can_use_branch(&self, _balance: Balance) -> bool { - false - } - - fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec> { - for utxo in utxos { - assert!(self.coins.contains(&utxo.balance().coin)); - } - - let mut nonce = LastNonce::get(txn).unwrap_or(1); - let mut plans = vec![]; - for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) { - // Once we rotate, all further payments should be scheduled via the new multisig - assert!(!self.rotated); - plans.push(Plan { - key: self.key, - inputs: vec![], - payments: chunk.to_vec(), - change: None, - scheduler_addendum: Addendum::Nonce(nonce), - }); - nonce += 1; - } - - // If we're supposed to rotate to the new key, create an empty Plan which will signify the key - // update - if force_spend && (!self.rotated) { - plans.push(Plan { - key: self.key, - inputs: vec![], - payments: vec![], - change: None, - scheduler_addendum: Addendum::RotateTo { nonce, new_key: key_for_any_change }, - }); - nonce += 1; - self.rotated = true; - RotatedTo::set( - txn, - self.key.to_bytes().as_ref(), - &key_for_any_change.to_bytes().as_ref().to_vec(), - ); - } - - LastNonce::set(txn, &nonce); - - plans - } - - fn consume_payments(&mut self, _txn: &mut D::Transaction<'_>) -> Vec> { - vec![] - } - - fn created_output( - &mut self, - _txn: &mut D::Transaction<'_>, - _expected: u64, - _actual: Option, - ) { - panic!("Smart Contract Scheduler created a Branch output") - } - - /// Refund a specific output. - fn refund_plan( - &mut self, - txn: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan { - let current_key = RotatedTo::get(txn, self.key.to_bytes().as_ref()) - .and_then(|key_bytes| ::read_G(&mut key_bytes.as_slice()).ok()) - .unwrap_or(self.key); - - let nonce = LastNonce::get(txn).map_or(1, |nonce| nonce + 1); - LastNonce::set(txn, &(nonce + 1)); - Plan { - key: current_key, - inputs: vec![], - payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], - change: None, - scheduler_addendum: Addendum::Nonce(nonce), - } - } - - fn shim_forward_plan(_output: N::Output, _to: ::G) -> Option> { - None - } - - /// Forward a specific output to the new multisig. - /// - /// Returns None if no forwarding is necessary. - fn forward_plan( - &mut self, - _txn: &mut D::Transaction<'_>, - _output: N::Output, - _to: ::G, - ) -> Option> { - None - } -} From da3c75827c8bacfa7315a17e9e7bed1811d76d54 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 07:54:18 -0400 Subject: [PATCH 127/179] Outline the Ethereum processor This was only half-finished to begin with, unfortunately... --- Cargo.lock | 8 +- networks/ethereum/src/crypto.rs | 15 +- processor/ethereum/Cargo.toml | 5 +- processor/ethereum/src/key_gen.rs | 25 + processor/ethereum/src/lib.rs | 467 +----------------- processor/ethereum/src/main.rs | 65 +++ processor/ethereum/src/primitives/block.rs | 71 +++ processor/ethereum/src/primitives/mod.rs | 3 + processor/ethereum/src/primitives/output.rs | 123 +++++ .../ethereum/src/primitives/transaction.rs | 117 +++++ processor/ethereum/src/publisher.rs | 60 +++ processor/ethereum/src/rpc.rs | 135 +++++ processor/ethereum/src/scheduler.rs | 90 ++++ processor/monero/Cargo.toml | 5 - processor/scheduler/smart-contract/Cargo.toml | 2 - processor/scheduler/smart-contract/src/lib.rs | 88 ++-- substrate/client/Cargo.toml | 1 + substrate/client/src/networks/ethereum.rs | 51 ++ substrate/client/src/networks/mod.rs | 3 + 19 files changed, 810 insertions(+), 524 deletions(-) create mode 100644 processor/ethereum/src/key_gen.rs create mode 100644 processor/ethereum/src/main.rs create mode 100644 processor/ethereum/src/primitives/block.rs create mode 100644 processor/ethereum/src/primitives/mod.rs create mode 100644 processor/ethereum/src/primitives/output.rs create mode 100644 processor/ethereum/src/primitives/transaction.rs create mode 100644 processor/ethereum/src/publisher.rs create mode 100644 processor/ethereum/src/rpc.rs create mode 100644 processor/ethereum/src/scheduler.rs create mode 100644 substrate/client/src/networks/ethereum.rs diff --git a/Cargo.lock b/Cargo.lock index 147cc295b..e98a8f34c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8351,9 +8351,9 @@ version = "0.1.0" dependencies = [ "borsh", "ciphersuite", + "const-hex", "dkg", "ethereum-serai", - "flexible-transcript", "hex", "k256", "log", @@ -8362,6 +8362,7 @@ dependencies = [ "rand_core", "serai-client", "serai-db", + "serai-env", "serai-processor-bin", "serai-processor-key-gen", "serai-processor-primitives", @@ -8522,11 +8523,8 @@ version = "0.1.0" dependencies = [ "borsh", "ciphersuite", - "curve25519-dalek", "dalek-ff-group", "dkg", - "flexible-transcript", - "hex", "log", "modular-frost", "monero-simple-request-rpc", @@ -8535,7 +8533,6 @@ dependencies = [ "rand_chacha", "rand_core", "serai-client", - "serai-db", "serai-processor-bin", "serai-processor-key-gen", "serai-processor-primitives", @@ -8796,7 +8793,6 @@ dependencies = [ "group", "parity-scale-codec", "serai-db", - "serai-primitives", "serai-processor-primitives", "serai-processor-scanner", "serai-processor-scheduler-primitives", diff --git a/networks/ethereum/src/crypto.rs b/networks/ethereum/src/crypto.rs index 6ea6a0b04..326343d86 100644 --- a/networks/ethereum/src/crypto.rs +++ b/networks/ethereum/src/crypto.rs @@ -1,10 +1,12 @@ use group::ff::PrimeField; use k256::{ - elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, - ProjectivePoint, Scalar, U256 as KU256, + elliptic_curve::{ + ops::Reduce, + point::{AffineCoordinates, DecompressPoint}, + sec1::ToEncodedPoint, + }, + AffinePoint, ProjectivePoint, Scalar, U256 as KU256, }; -#[cfg(test)] -use k256::{elliptic_curve::point::DecompressPoint, AffinePoint}; use frost::{ algorithm::{Hram, SchnorrSignature}, @@ -99,12 +101,11 @@ impl PublicKey { self.A } - pub(crate) fn eth_repr(&self) -> [u8; 32] { + pub fn eth_repr(&self) -> [u8; 32] { self.px.to_repr().into() } - #[cfg(test)] - pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option { + pub fn from_eth_repr(repr: [u8; 32]) -> Option { #[allow(non_snake_case)] let A = Option::::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into(); Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px }) diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml index ede9c71b1..12f56d72c 100644 --- a/processor/ethereum/Cargo.toml +++ b/processor/ethereum/Cargo.toml @@ -19,11 +19,11 @@ workspace = true [dependencies] rand_core = { version = "0.6", default-features = false } +const-hex = { version = "1", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } @@ -31,12 +31,13 @@ frost = { package = "modular-frost", path = "../../crypto/frost", default-featur k256 = { version = "^0.13.1", default-features = false, features = ["std"] } ethereum-serai = { path = "../../networks/ethereum", default-features = false, optional = true } -serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } +serai-client = { path = "../../substrate/client", default-features = false, features = ["ethereum"] } zalloc = { path = "../../common/zalloc" } log = { version = "0.4", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } +serai-env = { path = "../../common/env" } serai-db = { path = "../../common/db" } key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } diff --git a/processor/ethereum/src/key_gen.rs b/processor/ethereum/src/key_gen.rs new file mode 100644 index 000000000..73b7c1e16 --- /dev/null +++ b/processor/ethereum/src/key_gen.rs @@ -0,0 +1,25 @@ +use ciphersuite::{Ciphersuite, Secp256k1}; +use dkg::ThresholdKeys; + +use ethereum_serai::crypto::PublicKey; + +pub(crate) struct KeyGenParams; +impl key_gen::KeyGenParams for KeyGenParams { + const ID: &'static str = "Ethereum"; + + type ExternalNetworkCiphersuite = Secp256k1; + + fn tweak_keys(keys: &mut ThresholdKeys) { + while PublicKey::new(keys.group_key()).is_none() { + *keys = keys.offset(::F::ONE); + } + } + + fn encode_key(key: ::G) -> Vec { + PublicKey::new(key).unwrap().eth_repr().to_vec() + } + + fn decode_key(key: &[u8]) -> Option<::G> { + PublicKey::from_eth_repr(key.try_into().ok()?).map(|key| key.point()) + } +} diff --git a/processor/ethereum/src/lib.rs b/processor/ethereum/src/lib.rs index 99d042038..a8f55c791 100644 --- a/processor/ethereum/src/lib.rs +++ b/processor/ethereum/src/lib.rs @@ -1,3 +1,4 @@ +/* #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![doc = include_str!("../README.md")] #![deny(missing_docs)] @@ -59,240 +60,6 @@ use crate::{ }, }; -#[cfg(not(test))] -const DAI: [u8; 20] = - match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { - Ok(res) => res, - Err(_) => panic!("invalid non-test DAI hex address"), - }; -#[cfg(test)] // TODO -const DAI: [u8; 20] = - match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") { - Ok(res) => res, - Err(_) => panic!("invalid test DAI hex address"), - }; - -fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { - match coin { - EthereumCoin::Ether => Some(Coin::Ether), - EthereumCoin::Erc20(token) => { - if *token == DAI { - return Some(Coin::Dai); - } - None - } - } -} - -fn amount_to_serai_amount(coin: Coin, amount: U256) -> Amount { - assert_eq!(coin.network(), NetworkId::Ethereum); - assert_eq!(coin.decimals(), 8); - // Remove 10 decimals so we go from 18 decimals to 8 decimals - let divisor = U256::from(10_000_000_000u64); - // This is valid up to 184b, which is assumed for the coins allowed - Amount(u64::try_from(amount / divisor).unwrap()) -} - -fn balance_to_ethereum_amount(balance: Balance) -> U256 { - assert_eq!(balance.coin.network(), NetworkId::Ethereum); - assert_eq!(balance.coin.decimals(), 8); - // Restore 10 decimals so we go from 8 decimals to 18 decimals - let factor = U256::from(10_000_000_000u64); - U256::from(balance.amount.0) * factor -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Address(pub [u8; 20]); -impl TryFrom> for Address { - type Error = (); - fn try_from(bytes: Vec) -> Result { - if bytes.len() != 20 { - Err(())?; - } - let mut res = [0; 20]; - res.copy_from_slice(&bytes); - Ok(Address(res)) - } -} -impl TryInto> for Address { - type Error = (); - fn try_into(self) -> Result, ()> { - Ok(self.0.to_vec()) - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ethereum_serai::alloy::primitives::Address::from(self.0).fmt(f) - } -} - -impl SignableTransaction for RouterCommand { - fn fee(&self) -> u64 { - // Return a fee of 0 as we'll handle amortization on our end - 0 - } -} - -#[async_trait] -impl TransactionTrait> for Transaction { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.hash.0 - } - - #[cfg(test)] - async fn fee(&self, _network: &Ethereum) -> u64 { - // Return a fee of 0 as we'll handle amortization on our end - 0 - } -} - -// We use 32-block Epochs to represent blocks. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Epoch { - // The hash of the block which ended the prior Epoch. - prior_end_hash: [u8; 32], - // The first block number within this Epoch. - start: u64, - // The hash of the last block within this Epoch. - end_hash: [u8; 32], - // The monotonic time for this Epoch. - time: u64, -} - -impl Epoch { - fn end(&self) -> u64 { - self.start + 31 - } -} - -#[async_trait] -impl Block> for Epoch { - type Id = [u8; 32]; - fn id(&self) -> [u8; 32] { - self.end_hash - } - fn parent(&self) -> [u8; 32] { - self.prior_end_hash - } - async fn time(&self, _: &Ethereum) -> u64 { - self.time - } -} - -impl Output> for EthereumInInstruction { - type Id = [u8; 32]; - - fn kind(&self) -> OutputType { - OutputType::External - } - - fn id(&self) -> Self::Id { - let mut id = [0; 40]; - id[.. 32].copy_from_slice(&self.id.0); - id[32 ..].copy_from_slice(&self.id.1.to_le_bytes()); - *ethereum_serai::alloy::primitives::keccak256(id) - } - fn tx_id(&self) -> [u8; 32] { - self.id.0 - } - fn key(&self) -> ::G { - self.key_at_end_of_block - } - - fn presumed_origin(&self) -> Option
{ - Some(Address(self.from)) - } - - fn balance(&self) -> Balance { - let coin = coin_to_serai_coin(&self.coin).unwrap_or_else(|| { - panic!( - "requesting coin for an EthereumInInstruction with a coin {}", - "we don't handle. this never should have been yielded" - ) - }); - Balance { coin, amount: amount_to_serai_amount(coin, self.amount) } - } - fn data(&self) -> &[u8] { - &self.data - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - EthereumInInstruction::write(self, writer) - } - fn read(reader: &mut R) -> io::Result { - EthereumInInstruction::read(reader) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Claim { - signature: [u8; 64], -} -impl AsRef<[u8]> for Claim { - fn as_ref(&self) -> &[u8] { - &self.signature - } -} -impl AsMut<[u8]> for Claim { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.signature - } -} -impl Default for Claim { - fn default() -> Self { - Self { signature: [0; 64] } - } -} -impl From<&Signature> for Claim { - fn from(sig: &Signature) -> Self { - Self { signature: sig.to_bytes() } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Eventuality(PublicKey, RouterCommand); -impl EventualityTrait for Eventuality { - type Claim = Claim; - type Completion = SignedRouterCommand; - - fn lookup(&self) -> Vec { - match self.1 { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - nonce.as_le_bytes().to_vec() - } - } - } - - fn read(reader: &mut R) -> io::Result { - let point = Secp256k1::read_G(reader)?; - let command = RouterCommand::read(reader)?; - Ok(Eventuality( - PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?, - command, - )) - } - fn serialize(&self) -> Vec { - let mut res = vec![]; - res.extend(self.0.point().to_bytes().as_slice()); - self.1.write(&mut res).unwrap(); - res - } - - fn claim(completion: &Self::Completion) -> Self::Claim { - Claim::from(completion.signature()) - } - fn serialize_completion(completion: &Self::Completion) -> Vec { - let mut res = vec![]; - completion.write(&mut res).unwrap(); - res - } - fn read_completion(reader: &mut R) -> io::Result { - SignedRouterCommand::read(reader) - } -} - #[derive(Clone)] pub struct Ethereum { // This DB is solely used to access the first key generated, as needed to determine the Router's @@ -305,20 +72,6 @@ pub struct Ethereum { deployer: Deployer, router: Arc>>, } -impl PartialEq for Ethereum { - fn eq(&self, _other: &Ethereum) -> bool { - true - } -} -impl fmt::Debug for Ethereum { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Ethereum") - .field("deployer", &self.deployer) - .field("router", &self.router) - .finish_non_exhaustive() - } -} impl Ethereum { pub async fn new(db: D, daemon_url: String, relayer_url: String) -> Self { let provider = Arc::new(RootProvider::new( @@ -384,110 +137,10 @@ impl Ethereum { #[async_trait] impl Network for Ethereum { - type Curve = Secp256k1; - - type Transaction = Transaction; - type Block = Epoch; - - type Output = EthereumInInstruction; - type SignableTransaction = RouterCommand; - type Eventuality = Eventuality; - type TransactionMachine = RouterCommandMachine; - - type Scheduler = Scheduler; - - type Address = Address; - - const NETWORK: NetworkId = NetworkId::Ethereum; - const ID: &'static str = "Ethereum"; - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12; - const CONFIRMATIONS: usize = 1; - const DUST: u64 = 0; // TODO const COST_TO_AGGREGATE: u64 = 0; - // TODO: usize::max, with a merkle tree in the router - const MAX_OUTPUTS: usize = 256; - - fn tweak_keys(keys: &mut ThresholdKeys) { - while PublicKey::new(keys.group_key()).is_none() { - *keys = keys.offset(::F::ONE); - } - } - - #[cfg(test)] - async fn external_address(&self, _key: ::G) -> Address { - Address(self.router().await.as_ref().unwrap().address()) - } - - fn branch_address(_key: ::G) -> Option
{ - None - } - - fn change_address(_key: ::G) -> Option
{ - None - } - - fn forward_address(_key: ::G) -> Option
{ - None - } - - async fn get_latest_block_number(&self) -> Result { - let actual_number = self - .provider - .get_block(BlockNumberOrTag::Finalized.into(), BlockTransactionsKind::Hashes) - .await - .map_err(|_| NetworkError::ConnectionError)? - .ok_or(NetworkError::ConnectionError)? - .header - .number; - // Error if there hasn't been a full epoch yet - if actual_number < 32 { - Err(NetworkError::ConnectionError)? - } - // If this is 33, the division will return 1, yet 1 is the epoch in progress - let latest_full_epoch = (actual_number / 32).saturating_sub(1); - Ok(latest_full_epoch.try_into().unwrap()) - } - - async fn get_block(&self, number: usize) -> Result { - let latest_finalized = self.get_latest_block_number().await?; - if number > latest_finalized { - Err(NetworkError::ConnectionError)? - } - - let start = number * 32; - let prior_end_hash = if start == 0 { - [0; 32] - } else { - self - .provider - .get_block(u64::try_from(start - 1).unwrap().into(), BlockTransactionsKind::Hashes) - .await - .ok() - .flatten() - .ok_or(NetworkError::ConnectionError)? - .header - .hash - .into() - }; - - let end_header = self - .provider - .get_block(u64::try_from(start + 31).unwrap().into(), BlockTransactionsKind::Hashes) - .await - .ok() - .flatten() - .ok_or(NetworkError::ConnectionError)? - .header; - - let end_hash = end_header.hash.into(); - let time = end_header.timestamp; - - Ok(Epoch { prior_end_hash, start: start.try_into().unwrap(), end_hash, time }) - } - async fn get_outputs( &self, block: &Self::Block, @@ -627,97 +280,6 @@ impl Network for Ethereum { res } - async fn needed_fee( - &self, - _block_number: usize, - inputs: &[Self::Output], - _payments: &[Payment], - _change: &Option, - ) -> Result, NetworkError> { - assert_eq!(inputs.len(), 0); - // Claim no fee is needed so we can perform amortization ourselves - Ok(Some(0)) - } - - async fn signable_transaction( - &self, - _block_number: usize, - _plan_id: &[u8; 32], - key: ::G, - inputs: &[Self::Output], - payments: &[Payment], - change: &Option, - scheduler_addendum: &>::Addendum, - ) -> Result, NetworkError> { - assert_eq!(inputs.len(), 0); - assert!(change.is_none()); - let chain_id = self.provider.get_chain_id().await.map_err(|_| NetworkError::ConnectionError)?; - - // TODO: Perform fee amortization (in scheduler? - // TODO: Make this function internal and have needed_fee properly return None as expected? - // TODO: signable_transaction is written as cannot return None if needed_fee returns Some - // TODO: Why can this return None at all if it isn't allowed to return None? - - let command = match scheduler_addendum { - Addendum::Nonce(nonce) => RouterCommand::Execute { - chain_id: U256::try_from(chain_id).unwrap(), - nonce: U256::try_from(*nonce).unwrap(), - outs: payments - .iter() - .filter_map(|payment| { - Some(OutInstruction { - target: if let Some(data) = payment.data.as_ref() { - // This introspects the Call serialization format, expecting the first 20 bytes to - // be the address - // This avoids wasting the 20-bytes allocated within address - let full_data = [payment.address.0.as_slice(), data].concat(); - let mut reader = full_data.as_slice(); - - let mut calls = vec![]; - while !reader.is_empty() { - calls.push(Call::read(&mut reader).ok()?) - } - // The above must have executed at least once since reader contains the address - assert_eq!(calls[0].to, payment.address.0); - - OutInstructionTarget::Calls(calls) - } else { - OutInstructionTarget::Direct(payment.address.0) - }, - value: { - assert_eq!(payment.balance.coin, Coin::Ether); // TODO - balance_to_ethereum_amount(payment.balance) - }, - }) - }) - .collect(), - }, - Addendum::RotateTo { nonce, new_key } => { - assert!(payments.is_empty()); - RouterCommand::UpdateSeraiKey { - chain_id: U256::try_from(chain_id).unwrap(), - nonce: U256::try_from(*nonce).unwrap(), - key: PublicKey::new(*new_key).expect("new key wasn't a valid ETH public key"), - } - } - }; - Ok(Some(( - command.clone(), - Eventuality(PublicKey::new(key).expect("key wasn't a valid ETH public key"), command), - ))) - } - - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: Self::SignableTransaction, - ) -> Result { - Ok( - RouterCommandMachine::new(keys, transaction) - .expect("keys weren't usable to sign router commands"), - ) - } - async fn publish_completion( &self, completion: &::Completion, @@ -725,32 +287,6 @@ impl Network for Ethereum { // Publish this to the dedicated TX server for a solver to actually publish #[cfg(not(test))] { - let mut msg = vec![]; - match completion.command() { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - msg.extend(&u32::try_from(nonce).unwrap().to_le_bytes()); - } - } - completion.write(&mut msg).unwrap(); - - let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { - log::warn!("couldn't connect to the relayer server"); - Err(NetworkError::ConnectionError)? - }; - let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { - log::warn!("couldn't send the message's len to the relayer server"); - Err(NetworkError::ConnectionError)? - }; - let Ok(()) = socket.write_all(&msg).await else { - log::warn!("couldn't write the message to the relayer server"); - Err(NetworkError::ConnectionError)? - }; - if socket.read_u8().await.ok() != Some(1) { - log::warn!("didn't get the ack from the relayer server"); - Err(NetworkError::ConnectionError)?; - } - - Ok(()) } // Publish this using a dummy account we fund with magic RPC commands @@ -938,3 +474,4 @@ impl Network for Ethereum { self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() } } +*/ diff --git a/processor/ethereum/src/main.rs b/processor/ethereum/src/main.rs new file mode 100644 index 000000000..e4ec37013 --- /dev/null +++ b/processor/ethereum/src/main.rs @@ -0,0 +1,65 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + +use std::sync::Arc; + +use ethereum_serai::alloy::{ + primitives::U256, + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, +}; + +use serai_env as env; + +mod primitives; +pub(crate) use crate::primitives::*; + +mod key_gen; +use crate::key_gen::KeyGenParams; +mod rpc; +use rpc::Rpc; +mod scheduler; +use scheduler::{SmartContract, Scheduler}; +mod publisher; +use publisher::TransactionPublisher; + +#[tokio::main] +async fn main() { + let db = bin::init(); + let feed = { + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(bin::url()), true), + )); + Rpc { provider } + }; + let chain_id = loop { + match feed.provider.get_chain_id().await { + Ok(chain_id) => break U256::try_from(chain_id).unwrap(), + Err(e) => { + log::error!("couldn't connect to the Ethereum node for the chain ID: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } + } + }; + + bin::main_loop::<_, KeyGenParams, _>( + db, + feed.clone(), + Scheduler::new(SmartContract { chain_id }), + TransactionPublisher::new({ + let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") + .expect("ethereum relayer hostname wasn't specified") + .to_string(); + let relayer_port = + env::var("ETHEREUM_RELAYER_PORT").expect("ethereum relayer port wasn't specified"); + relayer_hostname + ":" + &relayer_port + }), + ) + .await; +} diff --git a/processor/ethereum/src/primitives/block.rs b/processor/ethereum/src/primitives/block.rs new file mode 100644 index 000000000..e947e8513 --- /dev/null +++ b/processor/ethereum/src/primitives/block.rs @@ -0,0 +1,71 @@ +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use serai_client::networks::ethereum::Address; + +use primitives::{ReceivedOutput, EventualityTracker}; +use crate::{output::Output, transaction::Eventuality}; + +// We interpret 32-block Epochs as singular blocks. +// There's no reason for further accuracy when these will all finalize at the same time. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) struct Epoch { + // The hash of the block which ended the prior Epoch. + pub(crate) prior_end_hash: [u8; 32], + // The first block number within this Epoch. + pub(crate) start: u64, + // The hash of the last block within this Epoch. + pub(crate) end_hash: [u8; 32], + // The monotonic time for this Epoch. + pub(crate) time: u64, +} + +impl Epoch { + // The block number of the last block within this epoch. + fn end(&self) -> u64 { + self.start + 31 + } +} + +impl primitives::BlockHeader for Epoch { + fn id(&self) -> [u8; 32] { + self.end_hash + } + fn parent(&self) -> [u8; 32] { + self.prior_end_hash + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) struct FullEpoch { + epoch: Epoch, +} + +impl primitives::Block for FullEpoch { + type Header = Epoch; + + type Key = ::G; + type Address = Address; + type Output = Output; + type Eventuality = Eventuality; + + fn id(&self) -> [u8; 32] { + self.epoch.end_hash + } + + fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { + todo!("TODO") + } + + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + > { + todo!("TODO") + } +} diff --git a/processor/ethereum/src/primitives/mod.rs b/processor/ethereum/src/primitives/mod.rs new file mode 100644 index 000000000..fba52dd96 --- /dev/null +++ b/processor/ethereum/src/primitives/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod output; +pub(crate) mod transaction; +pub(crate) mod block; diff --git a/processor/ethereum/src/primitives/output.rs b/processor/ethereum/src/primitives/output.rs new file mode 100644 index 000000000..fcafae753 --- /dev/null +++ b/processor/ethereum/src/primitives/output.rs @@ -0,0 +1,123 @@ +use std::io; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use ethereum_serai::{ + alloy::primitives::U256, + router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction}, +}; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{ + primitives::{NetworkId, Coin, Amount, Balance}, + networks::ethereum::Address, +}; + +use primitives::{OutputType, ReceivedOutput}; + +#[cfg(not(test))] +const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { + Ok(res) => res, + Err(_) => panic!("invalid non-test DAI hex address"), + }; +#[cfg(test)] // TODO +const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") { + Ok(res) => res, + Err(_) => panic!("invalid test DAI hex address"), + }; + +fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { + match coin { + EthereumCoin::Ether => Some(Coin::Ether), + EthereumCoin::Erc20(token) => { + if *token == DAI { + return Some(Coin::Dai); + } + None + } + } +} + +fn amount_to_serai_amount(coin: Coin, amount: U256) -> Amount { + assert_eq!(coin.network(), NetworkId::Ethereum); + assert_eq!(coin.decimals(), 8); + // Remove 10 decimals so we go from 18 decimals to 8 decimals + let divisor = U256::from(10_000_000_000u64); + // This is valid up to 184b, which is assumed for the coins allowed + Amount(u64::try_from(amount / divisor).unwrap()) +} + +#[derive( + Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, +)] +pub(crate) struct OutputId(pub(crate) [u8; 40]); +impl Default for OutputId { + fn default() -> Self { + Self([0; 40]) + } +} +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Output(pub(crate) EthereumInInstruction); +impl ReceivedOutput<::G, Address> for Output { + type Id = OutputId; + type TransactionId = [u8; 32]; + + // We only scan external outputs as we don't have branch/change/forwards + fn kind(&self) -> OutputType { + OutputType::External + } + + fn id(&self) -> Self::Id { + let mut id = [0; 40]; + id[.. 32].copy_from_slice(&self.0.id.0); + id[32 ..].copy_from_slice(&self.0.id.1.to_le_bytes()); + OutputId(id) + } + + fn transaction_id(&self) -> Self::TransactionId { + self.0.id.0 + } + + fn key(&self) -> ::G { + self.0.key_at_end_of_block + } + + fn presumed_origin(&self) -> Option
{ + Some(Address::from(self.0.from)) + } + + fn balance(&self) -> Balance { + let coin = coin_to_serai_coin(&self.0.coin).unwrap_or_else(|| { + panic!( + "mapping coin from an EthereumInInstruction with coin {}, which we don't handle.", + "this never should have been yielded" + ) + }); + Balance { coin, amount: amount_to_serai_amount(coin, self.0.amount) } + } + fn data(&self) -> &[u8] { + &self.0.data + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + self.0.write(writer) + } + fn read(reader: &mut R) -> io::Result { + EthereumInInstruction::read(reader).map(Self) + } +} diff --git a/processor/ethereum/src/primitives/transaction.rs b/processor/ethereum/src/primitives/transaction.rs new file mode 100644 index 000000000..908358ecb --- /dev/null +++ b/processor/ethereum/src/primitives/transaction.rs @@ -0,0 +1,117 @@ +use std::io; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; + +use ethereum_serai::{crypto::PublicKey, machine::*}; + +use crate::output::OutputId; + +#[derive(Clone, Debug)] +pub(crate) struct Transaction(pub(crate) SignedRouterCommand); + +impl From for Transaction { + fn from(signed_router_command: SignedRouterCommand) -> Self { + Self(signed_router_command) + } +} + +impl scheduler::Transaction for Transaction { + fn read(reader: &mut impl io::Read) -> io::Result { + SignedRouterCommand::read(reader).map(Self) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.0.write(writer) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SignableTransaction(pub(crate) RouterCommand); + +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine(RouterCommand, ThresholdKeys); +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = ::Signature; + type SignMachine = ::SignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + // TODO: Use a proper error here, not an Option + RouterCommandMachine::new(self.1.clone(), self.0.clone()).unwrap().preprocess(rng) + } +} + +impl scheduler::SignableTransaction for SignableTransaction { + type Transaction = Transaction; + type Ciphersuite = Secp256k1; + type PreprocessMachine = ClonableTransctionMachine; + + fn read(reader: &mut impl io::Read) -> io::Result { + RouterCommand::read(reader).map(Self) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.0.write(writer) + } + + fn id(&self) -> [u8; 32] { + let mut res = [0; 32]; + // TODO: Add getter for the nonce + match self.0 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + res[.. 8].copy_from_slice(&nonce.as_le_bytes()); + } + } + res + } + + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { + ClonableTransctionMachine(self.0, keys) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Eventuality(pub(crate) PublicKey, pub(crate) RouterCommand); + +impl primitives::Eventuality for Eventuality { + type OutputId = OutputId; + + fn id(&self) -> [u8; 32] { + let mut res = [0; 32]; + match self.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + res[.. 8].copy_from_slice(&nonce.as_le_bytes()); + } + } + res + } + + fn lookup(&self) -> Vec { + match self.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + nonce.as_le_bytes().to_vec() + } + } + } + + fn singular_spent_output(&self) -> Option { + None + } + + fn read(reader: &mut impl io::Read) -> io::Result { + let point = Secp256k1::read_G(reader)?; + let command = RouterCommand::read(reader)?; + Ok(Eventuality( + PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?, + command, + )) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(self.0.point().to_bytes().as_slice())?; + self.1.write(writer) + } +} diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs new file mode 100644 index 000000000..ad8bd09dc --- /dev/null +++ b/processor/ethereum/src/publisher.rs @@ -0,0 +1,60 @@ +use core::future::Future; + +use crate::transaction::Transaction; + +#[derive(Clone)] +pub(crate) struct TransactionPublisher { + relayer_url: String, +} + +impl TransactionPublisher { + pub(crate) fn new(relayer_url: String) -> Self { + Self { relayer_url } + } +} + +impl signers::TransactionPublisher for TransactionPublisher { + type EphemeralError = (); + + fn publish( + &self, + tx: Transaction, + ) -> impl Send + Future> { + async move { + /* + use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, + }; + + let mut msg = vec![]; + match completion.command() { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + msg.extend(&u32::try_from(nonce).unwrap().to_le_bytes()); + } + } + completion.write(&mut msg).unwrap(); + + let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { + log::warn!("couldn't connect to the relayer server"); + Err(NetworkError::ConnectionError)? + }; + let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { + log::warn!("couldn't send the message's len to the relayer server"); + Err(NetworkError::ConnectionError)? + }; + let Ok(()) = socket.write_all(&msg).await else { + log::warn!("couldn't write the message to the relayer server"); + Err(NetworkError::ConnectionError)? + }; + if socket.read_u8().await.ok() != Some(1) { + log::warn!("didn't get the ack from the relayer server"); + Err(NetworkError::ConnectionError)?; + } + + Ok(()) + */ + todo!("TODO") + } + } +} diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs new file mode 100644 index 000000000..58b3933e9 --- /dev/null +++ b/processor/ethereum/src/rpc.rs @@ -0,0 +1,135 @@ +use core::future::Future; +use std::sync::Arc; + +use ethereum_serai::{ + alloy::{ + rpc_types::{BlockTransactionsKind, BlockNumberOrTag}, + simple_request_transport::SimpleRequest, + provider::{Provider, RootProvider}, + }, +}; + +use serai_client::primitives::{NetworkId, Coin, Amount}; + +use scanner::ScannerFeed; + +use crate::block::{Epoch, FullEpoch}; + +#[derive(Clone)] +pub(crate) struct Rpc { + pub(crate) provider: Arc>, +} + +impl ScannerFeed for Rpc { + const NETWORK: NetworkId = NetworkId::Ethereum; + + // We only need one confirmation as Ethereum properly finalizes + const CONFIRMATIONS: u64 = 1; + // The window length should be roughly an hour + const WINDOW_LENGTH: u64 = 10; + + const TEN_MINUTES: u64 = 2; + + type Block = FullEpoch; + + type EphemeralError = String; + + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future> { + async move { + let actual_number = self + .provider + .get_block(BlockNumberOrTag::Finalized.into(), BlockTransactionsKind::Hashes) + .await + .map_err(|e| format!("couldn't get the latest finalized block: {e:?}"))? + .ok_or_else(|| "there was no finalized block".to_string())? + .header + .number; + // Error if there hasn't been a full epoch yet + if actual_number < 32 { + Err("there has not been a completed epoch yet".to_string())? + } + // The divison by 32 returns the amount of completed epochs + // Converting from amount of completed epochs to the latest completed epoch requires + // subtracting 1 + let latest_full_epoch = (actual_number / 32) - 1; + Ok(latest_full_epoch) + } + } + + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future> { + async move { todo!("TODO") } + } + + fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> impl Send + + Future::Header, Self::EphemeralError>> + { + async move { + let start = number * 32; + let prior_end_hash = if start == 0 { + [0; 32] + } else { + self + .provider + .get_block((start - 1).into(), BlockTransactionsKind::Hashes) + .await + .map_err(|e| format!("couldn't get block: {e:?}"))? + .ok_or_else(|| { + format!("ethereum node didn't have requested block: {number:?}. did we reorg?") + })? + .header + .hash + .into() + }; + + let end_header = self + .provider + .get_block((start + 31).into(), BlockTransactionsKind::Hashes) + .await + .map_err(|e| format!("couldn't get block: {e:?}"))? + .ok_or_else(|| { + format!("ethereum node didn't have requested block: {number:?}. did we reorg?") + })? + .header; + + let end_hash = end_header.hash.into(); + let time = end_header.timestamp; + + Ok(Epoch { prior_end_hash, start, end_hash, time }) + } + } + + #[rustfmt::skip] // It wants to improperly format the `async move` to a single line + fn unchecked_block_by_number( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + todo!("TODO") + } + } + + fn dust(coin: Coin) -> Amount { + assert_eq!(coin.network(), NetworkId::Ethereum); + todo!("TODO") + } + + fn cost_to_aggregate( + &self, + coin: Coin, + _reference_block: &Self::Block, + ) -> impl Send + Future> { + async move { + assert_eq!(coin.network(), NetworkId::Ethereum); + // TODO + Ok(Amount(0)) + } + } +} diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs new file mode 100644 index 000000000..6e17ef70e --- /dev/null +++ b/processor/ethereum/src/scheduler.rs @@ -0,0 +1,90 @@ +use serai_client::primitives::{NetworkId, Balance}; + +use ethereum_serai::{alloy::primitives::U256, router::PublicKey, machine::*}; + +use primitives::Payment; +use scanner::{KeyFor, AddressFor, EventualityFor}; + +use crate::{ + transaction::{SignableTransaction, Eventuality}, + rpc::Rpc, +}; + +fn balance_to_ethereum_amount(balance: Balance) -> U256 { + assert_eq!(balance.coin.network(), NetworkId::Ethereum); + assert_eq!(balance.coin.decimals(), 8); + // Restore 10 decimals so we go from 8 decimals to 18 decimals + // TODO: Document the expectation all integrated coins have 18 decimals + let factor = U256::from(10_000_000_000u64); + U256::from(balance.amount.0) * factor +} + +#[derive(Clone)] +pub(crate) struct SmartContract { + pub(crate) chain_id: U256, +} +impl smart_contract_scheduler::SmartContract for SmartContract { + type SignableTransaction = SignableTransaction; + + fn rotate( + &self, + nonce: u64, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> (Self::SignableTransaction, EventualityFor) { + let command = RouterCommand::UpdateSeraiKey { + chain_id: self.chain_id, + nonce: U256::try_from(nonce).unwrap(), + key: PublicKey::new(new_key).expect("rotating to an invald key"), + }; + ( + SignableTransaction(command.clone()), + Eventuality(PublicKey::new(retiring_key).expect("retiring an invalid key"), command), + ) + } + fn fulfill( + &self, + nonce: u64, + key: KeyFor, + payments: Vec>>, + ) -> Vec<(Self::SignableTransaction, EventualityFor)> { + let mut outs = Vec::with_capacity(payments.len()); + for payment in payments { + outs.push(OutInstruction { + target: if let Some(data) = payment.data() { + // This introspects the Call serialization format, expecting the first 20 bytes to + // be the address + // This avoids wasting the 20-bytes allocated within address + let full_data = [<[u8; 20]>::from(*payment.address()).as_slice(), data].concat(); + let mut reader = full_data.as_slice(); + + let mut calls = vec![]; + while !reader.is_empty() { + let Ok(call) = Call::read(&mut reader) else { break }; + calls.push(call); + } + // The above must have executed at least once since reader contains the address + assert_eq!(calls[0].to, <[u8; 20]>::from(*payment.address())); + + OutInstructionTarget::Calls(calls) + } else { + OutInstructionTarget::Direct((*payment.address()).into()) + }, + value: { balance_to_ethereum_amount(payment.balance()) }, + }); + } + + let command = RouterCommand::Execute { + chain_id: self.chain_id, + nonce: U256::try_from(nonce).unwrap(), + outs, + }; + + vec![( + SignableTransaction(command.clone()), + Eventuality(PublicKey::new(key).expect("fulfilling payments with an invalid key"), command), + )] + } +} + +pub(crate) type Scheduler = smart_contract_scheduler::Scheduler; diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml index cc895edaa..6ea49a0ca 100644 --- a/processor/monero/Cargo.toml +++ b/processor/monero/Cargo.toml @@ -21,12 +21,9 @@ rand_core = { version = "0.6", default-features = false } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } zeroize = { version = "1", default-features = false, features = ["std"] } -hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } -curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] } dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ed25519"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ed25519"] } @@ -41,8 +38,6 @@ zalloc = { path = "../../common/zalloc" } log = { version = "0.4", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -serai-db = { path = "../../common/db" } - key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } view-keys = { package = "serai-processor-view-keys", path = "../view-keys" } diff --git a/processor/scheduler/smart-contract/Cargo.toml b/processor/scheduler/smart-contract/Cargo.toml index 69ce9840a..c43569fb6 100644 --- a/processor/scheduler/smart-contract/Cargo.toml +++ b/processor/scheduler/smart-contract/Cargo.toml @@ -25,8 +25,6 @@ group = { version = "0.13", default-features = false } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] } - serai-db = { path = "../../../common/db" } primitives = { package = "serai-processor-primitives", path = "../../primitives" } diff --git a/processor/scheduler/smart-contract/src/lib.rs b/processor/scheduler/smart-contract/src/lib.rs index 091ffe6ab..7630a0267 100644 --- a/processor/scheduler/smart-contract/src/lib.rs +++ b/processor/scheduler/smart-contract/src/lib.rs @@ -29,49 +29,61 @@ pub trait SmartContract: 'static + Send { /// Rotate from the retiring key to the new key. fn rotate( + &self, nonce: u64, retiring_key: KeyFor, new_key: KeyFor, ) -> (Self::SignableTransaction, EventualityFor); + /// Fulfill the set of payments, dropping any not worth handling. fn fulfill( + &self, starting_nonce: u64, + key: KeyFor, payments: Vec>>, ) -> Vec<(Self::SignableTransaction, EventualityFor)>; } /// A scheduler for a smart contract representing the Serai processor. #[allow(non_snake_case)] -#[derive(Clone, Default)] -pub struct Scheduler> { +#[derive(Clone)] +pub struct Scheduler> { + smart_contract: SC, _S: PhantomData, - _SC: PhantomData, } -fn fulfill_payments>( - txn: &mut impl DbTxn, - active_keys: &[(KeyFor, LifetimeStage)], - payments: Vec>>, -) -> KeyScopedEventualities { - let key = match active_keys[0].1 { - LifetimeStage::ActiveYetNotReporting | - LifetimeStage::Active | - LifetimeStage::UsingNewForChange => active_keys[0].0, - LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, - }; - - let mut nonce = NextNonce::get(txn).unwrap_or(0); - let mut eventualities = Vec::with_capacity(1); - for (signable, eventuality) in SC::fulfill(nonce, payments) { - TransactionsToSign::::send(txn, &key, &signable); - nonce += 1; - eventualities.push(eventuality); +impl> Scheduler { + /// Create a new scheduler. + pub fn new(smart_contract: SC) -> Self { + Self { smart_contract, _S: PhantomData } + } + + fn fulfill_payments( + &self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> KeyScopedEventualities { + let key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + let mut nonce = NextNonce::get(txn).unwrap_or(0); + let mut eventualities = Vec::with_capacity(1); + for (signable, eventuality) in self.smart_contract.fulfill(nonce, key, payments) { + TransactionsToSign::::send(txn, &key, &signable); + nonce += 1; + eventualities.push(eventuality); + } + NextNonce::set(txn, &nonce); + HashMap::from([(key.to_bytes().as_ref().to_vec(), eventualities)]) } - NextNonce::set(txn, &nonce); - HashMap::from([(key.to_bytes().as_ref().to_vec(), eventualities)]) } -impl> SchedulerTrait for Scheduler { +impl> SchedulerTrait for Scheduler { type EphemeralError = (); type SignableTransaction = SC::SignableTransaction; @@ -86,7 +98,7 @@ impl> SchedulerTrait for Scheduler impl Send + Future, Self::EphemeralError>> { async move { let nonce = NextNonce::get(txn).unwrap_or(0); - let (signable, eventuality) = SC::rotate(nonce, retiring_key, new_key); + let (signable, eventuality) = self.smart_contract.rotate(nonce, retiring_key, new_key); NextNonce::set(txn, &(nonce + 1)); TransactionsToSign::::send(txn, &retiring_key, &signable); Ok(HashMap::from([(retiring_key.to_bytes().as_ref().to_vec(), vec![eventuality])])) @@ -110,17 +122,19 @@ impl> SchedulerTrait for Scheduler( - txn, - active_keys, - update - .returns() - .iter() - .map(|to_return| { - Payment::new(to_return.address().clone(), to_return.output().balance(), None) - }) - .collect::>(), - )) + Ok( + self.fulfill_payments( + txn, + active_keys, + update + .returns() + .iter() + .map(|to_return| { + Payment::new(to_return.address().clone(), to_return.output().balance(), None) + }) + .collect::>(), + ), + ) } } @@ -131,6 +145,6 @@ impl> SchedulerTrait for Scheduler, LifetimeStage)], payments: Vec>>, ) -> impl Send + Future, Self::EphemeralError>> { - async move { Ok(fulfill_payments::(txn, active_keys, payments)) } + async move { Ok(self.fulfill_payments(txn, active_keys, payments)) } } } diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index 5f7a24d43..33bfabf9a 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -65,6 +65,7 @@ borsh = ["serai-abi/borsh"] networks = [] bitcoin = ["networks", "dep:bitcoin"] +ethereum = ["networks"] monero = ["networks", "ciphersuite/ed25519", "monero-address"] # Assumes the default usage is to use Serai as a DEX, which doesn't actually diff --git a/substrate/client/src/networks/ethereum.rs b/substrate/client/src/networks/ethereum.rs new file mode 100644 index 000000000..092851699 --- /dev/null +++ b/substrate/client/src/networks/ethereum.rs @@ -0,0 +1,51 @@ +use core::{str::FromStr, fmt}; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use crate::primitives::ExternalAddress; + +/// A representation of an Ethereum address. +#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub struct Address([u8; 20]); + +impl From<[u8; 20]> for Address { + fn from(address: [u8; 20]) -> Self { + Self(address) + } +} + +impl From
for [u8; 20] { + fn from(address: Address) -> Self { + address.0 + } +} + +impl TryFrom for Address { + type Error = (); + fn try_from(data: ExternalAddress) -> Result { + Ok(Self(data.as_ref().try_into().map_err(|_| ())?)) + } +} +impl From
for ExternalAddress { + fn from(address: Address) -> ExternalAddress { + // This is 20 bytes which is less than MAX_ADDRESS_LEN + ExternalAddress::new(address.0.to_vec()).unwrap() + } +} + +impl FromStr for Address { + type Err = (); + fn from_str(str: &str) -> Result { + let Some(address) = str.strip_prefix("0x") else { Err(())? }; + if address.len() != 40 { + Err(())? + }; + Ok(Self(hex::decode(address.to_lowercase()).map_err(|_| ())?.try_into().unwrap())) + } +} + +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "0x{}", hex::encode(self.0)) + } +} diff --git a/substrate/client/src/networks/mod.rs b/substrate/client/src/networks/mod.rs index 63ebf481a..7a99631a4 100644 --- a/substrate/client/src/networks/mod.rs +++ b/substrate/client/src/networks/mod.rs @@ -1,5 +1,8 @@ #[cfg(feature = "bitcoin")] pub mod bitcoin; +#[cfg(feature = "ethereum")] +pub mod ethereum; + #[cfg(feature = "monero")] pub mod monero; From 975201877e0dc06350d441d634336e6c903a5c85 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 12:50:14 -0400 Subject: [PATCH 128/179] Don't use a different address for DAI in test anvil will let us deploy to the existing address. --- processor/ethereum/src/primitives/output.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/processor/ethereum/src/primitives/output.rs b/processor/ethereum/src/primitives/output.rs index fcafae753..4dadb1474 100644 --- a/processor/ethereum/src/primitives/output.rs +++ b/processor/ethereum/src/primitives/output.rs @@ -17,18 +17,11 @@ use serai_client::{ use primitives::{OutputType, ReceivedOutput}; -#[cfg(not(test))] const DAI: [u8; 20] = match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { Ok(res) => res, Err(_) => panic!("invalid non-test DAI hex address"), }; -#[cfg(test)] // TODO -const DAI: [u8; 20] = - match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") { - Ok(res) => res, - Err(_) => panic!("invalid test DAI hex address"), - }; fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { match coin { From 7b63bffda3de6c972f699bcfe2349a9b69d6d19c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 12:58:57 -0400 Subject: [PATCH 129/179] Move ethereum-serai under the processor It isn't generally usable and should be directly integrated at this point. --- .github/workflows/networks-tests.yml | 1 - .github/workflows/tests.yml | 1 + Cargo.toml | 2 +- deny.toml | 2 +- processor/ethereum/Cargo.toml | 2 +- .../ethereum/ethereum-serai}/.gitignore | 0 .../ethereum/ethereum-serai}/Cargo.toml | 8 ++++---- .../ethereum/ethereum-serai}/LICENSE | 0 .../ethereum/ethereum-serai}/README.md | 0 .../ethereum/ethereum-serai}/build.rs | 0 .../ethereum/ethereum-serai}/contracts/Deployer.sol | 0 .../ethereum/ethereum-serai}/contracts/IERC20.sol | 0 .../ethereum/ethereum-serai}/contracts/Router.sol | 0 .../ethereum/ethereum-serai}/contracts/Sandbox.sol | 0 .../ethereum/ethereum-serai}/contracts/Schnorr.sol | 0 .../ethereum/ethereum-serai}/src/abi/mod.rs | 0 .../ethereum/ethereum-serai}/src/crypto.rs | 0 .../ethereum/ethereum-serai}/src/deployer.rs | 0 .../ethereum/ethereum-serai}/src/erc20.rs | 0 .../ethereum/ethereum-serai}/src/lib.rs | 0 .../ethereum/ethereum-serai}/src/machine.rs | 0 .../ethereum/ethereum-serai}/src/router.rs | 0 .../ethereum/ethereum-serai}/src/tests/abi/mod.rs | 0 .../ethereum-serai}/src/tests/contracts/ERC20.sol | 0 .../ethereum-serai}/src/tests/contracts/Schnorr.sol | 0 .../ethereum/ethereum-serai}/src/tests/crypto.rs | 0 .../ethereum/ethereum-serai}/src/tests/mod.rs | 0 .../ethereum/ethereum-serai}/src/tests/router.rs | 0 .../ethereum/ethereum-serai}/src/tests/schnorr.rs | 0 tests/processor/Cargo.toml | 2 +- 30 files changed, 9 insertions(+), 9 deletions(-) rename {networks/ethereum => processor/ethereum/ethereum-serai}/.gitignore (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/Cargo.toml (75%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/LICENSE (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/README.md (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/build.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/contracts/Deployer.sol (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/contracts/IERC20.sol (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/contracts/Router.sol (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/contracts/Sandbox.sol (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/contracts/Schnorr.sol (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/abi/mod.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/crypto.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/deployer.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/erc20.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/lib.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/machine.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/router.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/tests/abi/mod.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/tests/contracts/ERC20.sol (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/tests/contracts/Schnorr.sol (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/tests/crypto.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/tests/mod.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/tests/router.rs (100%) rename {networks/ethereum => processor/ethereum/ethereum-serai}/src/tests/schnorr.rs (100%) diff --git a/.github/workflows/networks-tests.yml b/.github/workflows/networks-tests.yml index f346b9861..7fde517b4 100644 --- a/.github/workflows/networks-tests.yml +++ b/.github/workflows/networks-tests.yml @@ -31,7 +31,6 @@ jobs: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p bitcoin-serai \ -p alloy-simple-request-transport \ - -p ethereum-serai \ -p serai-ethereum-relayer \ -p monero-io \ -p monero-generators \ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e1c54349c..4e1c167a4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -52,6 +52,7 @@ jobs: -p serai-processor-signers \ -p serai-processor-bin \ -p serai-bitcoin-processor \ + -p ethereum-serai \ -p serai-ethereum-processor \ -p serai-monero-processor \ -p tendermint-machine \ diff --git a/Cargo.toml b/Cargo.toml index adaa63db5..09e512553 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,6 @@ members = [ "networks/bitcoin", "networks/ethereum/alloy-simple-request-transport", - "networks/ethereum", "networks/ethereum/relayer", "networks/monero/io", @@ -86,6 +85,7 @@ members = [ "processor/bin", "processor/bitcoin", + "processor/ethereum/ethereum-serai", "processor/ethereum", "processor/monero", diff --git a/deny.toml b/deny.toml index 0e013f5e7..183122e81 100644 --- a/deny.toml +++ b/deny.toml @@ -40,7 +40,6 @@ allow = [ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-env" }, - { allow = ["AGPL-3.0"], name = "ethereum-serai" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-relayer" }, { allow = ["AGPL-3.0"], name = "serai-message-queue" }, @@ -59,6 +58,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-signers" }, { allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" }, + { allow = ["AGPL-3.0"], name = "ethereum-serai" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, { allow = ["AGPL-3.0"], name = "serai-monero-processor" }, diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml index 12f56d72c..dfed2f9d4 100644 --- a/processor/ethereum/Cargo.toml +++ b/processor/ethereum/Cargo.toml @@ -29,7 +29,7 @@ dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } k256 = { version = "^0.13.1", default-features = false, features = ["std"] } -ethereum-serai = { path = "../../networks/ethereum", default-features = false, optional = true } +ethereum-serai = { path = "./ethereum-serai", default-features = false, optional = true } serai-client = { path = "../../substrate/client", default-features = false, features = ["ethereum"] } diff --git a/networks/ethereum/.gitignore b/processor/ethereum/ethereum-serai/.gitignore similarity index 100% rename from networks/ethereum/.gitignore rename to processor/ethereum/ethereum-serai/.gitignore diff --git a/networks/ethereum/Cargo.toml b/processor/ethereum/ethereum-serai/Cargo.toml similarity index 75% rename from networks/ethereum/Cargo.toml rename to processor/ethereum/ethereum-serai/Cargo.toml index a91b83c54..ed4520d11 100644 --- a/networks/ethereum/Cargo.toml +++ b/processor/ethereum/ethereum-serai/Cargo.toml @@ -21,11 +21,11 @@ thiserror = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false, features = ["std"] } -transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] } +transcript = { package = "flexible-transcript", path = "../../../crypto/transcript", default-features = false, features = ["recommended"] } group = { version = "0.13", default-features = false } k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } -frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } +frost = { package = "modular-frost", path = "../../../crypto/frost", default-features = false, features = ["secp256k1"] } alloy-core = { version = "0.8", default-features = false } alloy-sol-types = { version = "0.8", default-features = false, features = ["json"] } @@ -33,13 +33,13 @@ alloy-consensus = { version = "0.3", default-features = false, features = ["k256 alloy-network = { version = "0.3", default-features = false } alloy-rpc-types-eth = { version = "0.3", default-features = false } alloy-rpc-client = { version = "0.3", default-features = false } -alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-provider = { version = "0.3", default-features = false } alloy-node-bindings = { version = "0.3", default-features = false, optional = true } [dev-dependencies] -frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } +frost = { package = "modular-frost", path = "../../../crypto/frost", default-features = false, features = ["tests"] } tokio = { version = "1", features = ["macros"] } diff --git a/networks/ethereum/LICENSE b/processor/ethereum/ethereum-serai/LICENSE similarity index 100% rename from networks/ethereum/LICENSE rename to processor/ethereum/ethereum-serai/LICENSE diff --git a/networks/ethereum/README.md b/processor/ethereum/ethereum-serai/README.md similarity index 100% rename from networks/ethereum/README.md rename to processor/ethereum/ethereum-serai/README.md diff --git a/networks/ethereum/build.rs b/processor/ethereum/ethereum-serai/build.rs similarity index 100% rename from networks/ethereum/build.rs rename to processor/ethereum/ethereum-serai/build.rs diff --git a/networks/ethereum/contracts/Deployer.sol b/processor/ethereum/ethereum-serai/contracts/Deployer.sol similarity index 100% rename from networks/ethereum/contracts/Deployer.sol rename to processor/ethereum/ethereum-serai/contracts/Deployer.sol diff --git a/networks/ethereum/contracts/IERC20.sol b/processor/ethereum/ethereum-serai/contracts/IERC20.sol similarity index 100% rename from networks/ethereum/contracts/IERC20.sol rename to processor/ethereum/ethereum-serai/contracts/IERC20.sol diff --git a/networks/ethereum/contracts/Router.sol b/processor/ethereum/ethereum-serai/contracts/Router.sol similarity index 100% rename from networks/ethereum/contracts/Router.sol rename to processor/ethereum/ethereum-serai/contracts/Router.sol diff --git a/networks/ethereum/contracts/Sandbox.sol b/processor/ethereum/ethereum-serai/contracts/Sandbox.sol similarity index 100% rename from networks/ethereum/contracts/Sandbox.sol rename to processor/ethereum/ethereum-serai/contracts/Sandbox.sol diff --git a/networks/ethereum/contracts/Schnorr.sol b/processor/ethereum/ethereum-serai/contracts/Schnorr.sol similarity index 100% rename from networks/ethereum/contracts/Schnorr.sol rename to processor/ethereum/ethereum-serai/contracts/Schnorr.sol diff --git a/networks/ethereum/src/abi/mod.rs b/processor/ethereum/ethereum-serai/src/abi/mod.rs similarity index 100% rename from networks/ethereum/src/abi/mod.rs rename to processor/ethereum/ethereum-serai/src/abi/mod.rs diff --git a/networks/ethereum/src/crypto.rs b/processor/ethereum/ethereum-serai/src/crypto.rs similarity index 100% rename from networks/ethereum/src/crypto.rs rename to processor/ethereum/ethereum-serai/src/crypto.rs diff --git a/networks/ethereum/src/deployer.rs b/processor/ethereum/ethereum-serai/src/deployer.rs similarity index 100% rename from networks/ethereum/src/deployer.rs rename to processor/ethereum/ethereum-serai/src/deployer.rs diff --git a/networks/ethereum/src/erc20.rs b/processor/ethereum/ethereum-serai/src/erc20.rs similarity index 100% rename from networks/ethereum/src/erc20.rs rename to processor/ethereum/ethereum-serai/src/erc20.rs diff --git a/networks/ethereum/src/lib.rs b/processor/ethereum/ethereum-serai/src/lib.rs similarity index 100% rename from networks/ethereum/src/lib.rs rename to processor/ethereum/ethereum-serai/src/lib.rs diff --git a/networks/ethereum/src/machine.rs b/processor/ethereum/ethereum-serai/src/machine.rs similarity index 100% rename from networks/ethereum/src/machine.rs rename to processor/ethereum/ethereum-serai/src/machine.rs diff --git a/networks/ethereum/src/router.rs b/processor/ethereum/ethereum-serai/src/router.rs similarity index 100% rename from networks/ethereum/src/router.rs rename to processor/ethereum/ethereum-serai/src/router.rs diff --git a/networks/ethereum/src/tests/abi/mod.rs b/processor/ethereum/ethereum-serai/src/tests/abi/mod.rs similarity index 100% rename from networks/ethereum/src/tests/abi/mod.rs rename to processor/ethereum/ethereum-serai/src/tests/abi/mod.rs diff --git a/networks/ethereum/src/tests/contracts/ERC20.sol b/processor/ethereum/ethereum-serai/src/tests/contracts/ERC20.sol similarity index 100% rename from networks/ethereum/src/tests/contracts/ERC20.sol rename to processor/ethereum/ethereum-serai/src/tests/contracts/ERC20.sol diff --git a/networks/ethereum/src/tests/contracts/Schnorr.sol b/processor/ethereum/ethereum-serai/src/tests/contracts/Schnorr.sol similarity index 100% rename from networks/ethereum/src/tests/contracts/Schnorr.sol rename to processor/ethereum/ethereum-serai/src/tests/contracts/Schnorr.sol diff --git a/networks/ethereum/src/tests/crypto.rs b/processor/ethereum/ethereum-serai/src/tests/crypto.rs similarity index 100% rename from networks/ethereum/src/tests/crypto.rs rename to processor/ethereum/ethereum-serai/src/tests/crypto.rs diff --git a/networks/ethereum/src/tests/mod.rs b/processor/ethereum/ethereum-serai/src/tests/mod.rs similarity index 100% rename from networks/ethereum/src/tests/mod.rs rename to processor/ethereum/ethereum-serai/src/tests/mod.rs diff --git a/networks/ethereum/src/tests/router.rs b/processor/ethereum/ethereum-serai/src/tests/router.rs similarity index 100% rename from networks/ethereum/src/tests/router.rs rename to processor/ethereum/ethereum-serai/src/tests/router.rs diff --git a/networks/ethereum/src/tests/schnorr.rs b/processor/ethereum/ethereum-serai/src/tests/schnorr.rs similarity index 100% rename from networks/ethereum/src/tests/schnorr.rs rename to processor/ethereum/ethereum-serai/src/tests/schnorr.rs diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index 13299b932..e37dc2a9b 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -29,7 +29,7 @@ dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] bitcoin-serai = { path = "../../networks/bitcoin" } k256 = "0.13" -ethereum-serai = { path = "../../networks/ethereum" } +ethereum-serai = { path = "../../processor/ethereum/ethereum-serai" } monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request" } monero-wallet = { path = "../../networks/monero/wallet" } From 31659daaf2b1f21ad3aa018a74e672ecd6920896 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 22:12:32 -0400 Subject: [PATCH 130/179] Add crate for the Ethereum contracts --- .github/workflows/tests.yml | 2 +- Cargo.lock | 8 ++++ Cargo.toml | 1 + deny.toml | 1 + .../{ethereum-serai => contracts}/.gitignore | 0 processor/ethereum/contracts/Cargo.toml | 20 ++++++++ processor/ethereum/contracts/LICENSE | 15 ++++++ processor/ethereum/contracts/README.md | 7 +++ .../{ethereum-serai => contracts}/build.rs | 12 +++-- .../contracts/Deployer.sol | 0 .../contracts/IERC20.sol | 0 .../contracts/Router.sol | 0 .../contracts/Sandbox.sol | 0 .../contracts/Schnorr.sol | 0 .../contracts/tests}/ERC20.sol | 0 .../contracts/tests}/Schnorr.sol | 0 processor/ethereum/contracts/src/lib.rs | 48 +++++++++++++++++++ .../abi/mod.rs => contracts/src/tests.rs} | 4 +- processor/ethereum/ethereum-serai/Cargo.toml | 2 + .../ethereum/ethereum-serai/src/abi/mod.rs | 37 -------------- .../ethereum/ethereum-serai/src/deployer.rs | 2 +- processor/ethereum/ethereum-serai/src/lib.rs | 6 ++- .../ethereum/ethereum-serai/src/router.rs | 2 +- .../ethereum/ethereum-serai/src/tests/mod.rs | 2 +- 24 files changed, 121 insertions(+), 48 deletions(-) rename processor/ethereum/{ethereum-serai => contracts}/.gitignore (100%) create mode 100644 processor/ethereum/contracts/Cargo.toml create mode 100644 processor/ethereum/contracts/LICENSE create mode 100644 processor/ethereum/contracts/README.md rename processor/ethereum/{ethereum-serai => contracts}/build.rs (78%) rename processor/ethereum/{ethereum-serai => contracts}/contracts/Deployer.sol (100%) rename processor/ethereum/{ethereum-serai => contracts}/contracts/IERC20.sol (100%) rename processor/ethereum/{ethereum-serai => contracts}/contracts/Router.sol (100%) rename processor/ethereum/{ethereum-serai => contracts}/contracts/Sandbox.sol (100%) rename processor/ethereum/{ethereum-serai => contracts}/contracts/Schnorr.sol (100%) rename processor/ethereum/{ethereum-serai/src/tests/contracts => contracts/contracts/tests}/ERC20.sol (100%) rename processor/ethereum/{ethereum-serai/src/tests/contracts => contracts/contracts/tests}/Schnorr.sol (100%) create mode 100644 processor/ethereum/contracts/src/lib.rs rename processor/ethereum/{ethereum-serai/src/tests/abi/mod.rs => contracts/src/tests.rs} (71%) delete mode 100644 processor/ethereum/ethereum-serai/src/abi/mod.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4e1c167a4..9b90ee916 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -52,7 +52,7 @@ jobs: -p serai-processor-signers \ -p serai-processor-bin \ -p serai-bitcoin-processor \ - -p ethereum-serai \ + -p serai-processor-ethereum-contracts \ -p serai-ethereum-processor \ -p serai-monero-processor \ -p tendermint-machine \ diff --git a/Cargo.lock b/Cargo.lock index e98a8f34c..551082414 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2497,6 +2497,7 @@ dependencies = [ "k256", "modular-frost", "rand_core", + "serai-processor-ethereum-contracts", "thiserror", "tokio", ] @@ -8671,6 +8672,13 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-processor-ethereum-contracts" +version = "0.1.0" +dependencies = [ + "alloy-sol-types", +] + [[package]] name = "serai-processor-frost-attempt-manager" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 09e512553..f06d76efd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,7 @@ members = [ "processor/bin", "processor/bitcoin", + "processor/ethereum/contracts", "processor/ethereum/ethereum-serai", "processor/ethereum", "processor/monero", diff --git a/deny.toml b/deny.toml index 183122e81..cef3a6837 100644 --- a/deny.toml +++ b/deny.toml @@ -59,6 +59,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" }, { allow = ["AGPL-3.0"], name = "ethereum-serai" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-contracts" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, { allow = ["AGPL-3.0"], name = "serai-monero-processor" }, diff --git a/processor/ethereum/ethereum-serai/.gitignore b/processor/ethereum/contracts/.gitignore similarity index 100% rename from processor/ethereum/ethereum-serai/.gitignore rename to processor/ethereum/contracts/.gitignore diff --git a/processor/ethereum/contracts/Cargo.toml b/processor/ethereum/contracts/Cargo.toml new file mode 100644 index 000000000..87beba08a --- /dev/null +++ b/processor/ethereum/contracts/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "serai-processor-ethereum-contracts" +version = "0.1.0" +description = "Ethereum contracts for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/contracts" +authors = ["Luke Parker ", "Elizabeth Binks "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +alloy-sol-types = { version = "0.8", default-features = false } diff --git a/processor/ethereum/contracts/LICENSE b/processor/ethereum/contracts/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/contracts/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/contracts/README.md b/processor/ethereum/contracts/README.md new file mode 100644 index 000000000..fcd8f3c75 --- /dev/null +++ b/processor/ethereum/contracts/README.md @@ -0,0 +1,7 @@ +# Serai Processor Ethereum Contracts + +The Ethereum contracts used for (and for testing) the Serai processor. This is +its own crate for organizational and build-time reasons. It is not intended to +be publicly used. + +This crate will fail to build if `solc` is not installed and available. diff --git a/processor/ethereum/ethereum-serai/build.rs b/processor/ethereum/contracts/build.rs similarity index 78% rename from processor/ethereum/ethereum-serai/build.rs rename to processor/ethereum/contracts/build.rs index 38fcfe002..fe79fcc1d 100644 --- a/processor/ethereum/ethereum-serai/build.rs +++ b/processor/ethereum/contracts/build.rs @@ -28,14 +28,18 @@ fn main() { "./contracts/Sandbox.sol", "./contracts/Router.sol", - "./src/tests/contracts/Schnorr.sol", - "./src/tests/contracts/ERC20.sol", + "./contracts/tests/Schnorr.sol", + "./contracts/tests/ERC20.sol", "--no-color", ]; let solc = Command::new("solc").args(args).output().unwrap(); assert!(solc.status.success()); - for line in String::from_utf8(solc.stderr).unwrap().lines() { - assert!(!line.starts_with("Error:")); + let stderr = String::from_utf8(solc.stderr).unwrap(); + for line in stderr.lines() { + if line.contains("Error:") { + println!("{stderr}"); + panic!() + } } } diff --git a/processor/ethereum/ethereum-serai/contracts/Deployer.sol b/processor/ethereum/contracts/contracts/Deployer.sol similarity index 100% rename from processor/ethereum/ethereum-serai/contracts/Deployer.sol rename to processor/ethereum/contracts/contracts/Deployer.sol diff --git a/processor/ethereum/ethereum-serai/contracts/IERC20.sol b/processor/ethereum/contracts/contracts/IERC20.sol similarity index 100% rename from processor/ethereum/ethereum-serai/contracts/IERC20.sol rename to processor/ethereum/contracts/contracts/IERC20.sol diff --git a/processor/ethereum/ethereum-serai/contracts/Router.sol b/processor/ethereum/contracts/contracts/Router.sol similarity index 100% rename from processor/ethereum/ethereum-serai/contracts/Router.sol rename to processor/ethereum/contracts/contracts/Router.sol diff --git a/processor/ethereum/ethereum-serai/contracts/Sandbox.sol b/processor/ethereum/contracts/contracts/Sandbox.sol similarity index 100% rename from processor/ethereum/ethereum-serai/contracts/Sandbox.sol rename to processor/ethereum/contracts/contracts/Sandbox.sol diff --git a/processor/ethereum/ethereum-serai/contracts/Schnorr.sol b/processor/ethereum/contracts/contracts/Schnorr.sol similarity index 100% rename from processor/ethereum/ethereum-serai/contracts/Schnorr.sol rename to processor/ethereum/contracts/contracts/Schnorr.sol diff --git a/processor/ethereum/ethereum-serai/src/tests/contracts/ERC20.sol b/processor/ethereum/contracts/contracts/tests/ERC20.sol similarity index 100% rename from processor/ethereum/ethereum-serai/src/tests/contracts/ERC20.sol rename to processor/ethereum/contracts/contracts/tests/ERC20.sol diff --git a/processor/ethereum/ethereum-serai/src/tests/contracts/Schnorr.sol b/processor/ethereum/contracts/contracts/tests/Schnorr.sol similarity index 100% rename from processor/ethereum/ethereum-serai/src/tests/contracts/Schnorr.sol rename to processor/ethereum/contracts/contracts/tests/Schnorr.sol diff --git a/processor/ethereum/contracts/src/lib.rs b/processor/ethereum/contracts/src/lib.rs new file mode 100644 index 000000000..fef10288e --- /dev/null +++ b/processor/ethereum/contracts/src/lib.rs @@ -0,0 +1,48 @@ +use alloy_sol_types::sol; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod erc20_container { + use super::*; + sol!("contracts/IERC20.sol"); +} +pub mod erc20 { + pub const BYTECODE: &str = include_str!("../artifacts/Deployer.bin"); + pub use super::erc20_container::IERC20::*; +} + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod deployer_container { + use super::*; + sol!("contracts/Deployer.sol"); +} +pub mod deployer { + pub const BYTECODE: &str = include_str!("../artifacts/Deployer.bin"); + pub use super::deployer_container::Deployer::*; +} + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod router_container { + use super::*; + sol!(Router, "artifacts/Router.abi"); +} +pub mod router { + pub const BYTECODE: &str = include_str!("../artifacts/Router.bin"); + pub use super::router_container::Router::*; +} + +pub mod tests; diff --git a/processor/ethereum/ethereum-serai/src/tests/abi/mod.rs b/processor/ethereum/contracts/src/tests.rs similarity index 71% rename from processor/ethereum/ethereum-serai/src/tests/abi/mod.rs rename to processor/ethereum/contracts/src/tests.rs index 57ea88116..9f141c291 100644 --- a/processor/ethereum/ethereum-serai/src/tests/abi/mod.rs +++ b/processor/ethereum/contracts/src/tests.rs @@ -8,6 +8,6 @@ use alloy_sol_types::sol; #[allow(clippy::redundant_closure_for_method_calls)] mod schnorr_container { use super::*; - sol!("src/tests/contracts/Schnorr.sol"); + sol!("contracts/tests/Schnorr.sol"); } -pub(crate) use schnorr_container::TestSchnorr as schnorr; +pub use schnorr_container::TestSchnorr as schnorr; diff --git a/processor/ethereum/ethereum-serai/Cargo.toml b/processor/ethereum/ethereum-serai/Cargo.toml index ed4520d11..f0ea323f9 100644 --- a/processor/ethereum/ethereum-serai/Cargo.toml +++ b/processor/ethereum/ethereum-serai/Cargo.toml @@ -38,6 +38,8 @@ alloy-provider = { version = "0.3", default-features = false } alloy-node-bindings = { version = "0.3", default-features = false, optional = true } +contracts = { package = "serai-processor-ethereum-contracts", path = "../contracts" } + [dev-dependencies] frost = { package = "modular-frost", path = "../../../crypto/frost", default-features = false, features = ["tests"] } diff --git a/processor/ethereum/ethereum-serai/src/abi/mod.rs b/processor/ethereum/ethereum-serai/src/abi/mod.rs deleted file mode 100644 index 1ae233743..000000000 --- a/processor/ethereum/ethereum-serai/src/abi/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -use alloy_sol_types::sol; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod erc20_container { - use super::*; - sol!("contracts/IERC20.sol"); -} -pub use erc20_container::IERC20 as erc20; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod deployer_container { - use super::*; - sol!("contracts/Deployer.sol"); -} -pub use deployer_container::Deployer as deployer; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod router_container { - use super::*; - sol!(Router, "artifacts/Router.abi"); -} -pub use router_container::Router as router; diff --git a/processor/ethereum/ethereum-serai/src/deployer.rs b/processor/ethereum/ethereum-serai/src/deployer.rs index 19aa328d2..88f4a5fb9 100644 --- a/processor/ethereum/ethereum-serai/src/deployer.rs +++ b/processor/ethereum/ethereum-serai/src/deployer.rs @@ -30,7 +30,7 @@ impl Deployer { /// funded for this transaction to be submitted. This account has no known private key to anyone, /// so ETH sent can be neither misappropriated nor returned. pub fn deployment_tx() -> Signed { - let bytecode = include_str!("../artifacts/Deployer.bin"); + let bytecode = contracts::deployer::BYTECODE; let bytecode = Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex"); diff --git a/processor/ethereum/ethereum-serai/src/lib.rs b/processor/ethereum/ethereum-serai/src/lib.rs index 38bd79e79..761214018 100644 --- a/processor/ethereum/ethereum-serai/src/lib.rs +++ b/processor/ethereum/ethereum-serai/src/lib.rs @@ -15,7 +15,11 @@ pub mod alloy { pub mod crypto; -pub(crate) mod abi; +pub(crate) mod abi { + pub use contracts::erc20; + pub use contracts::deployer; + pub use contracts::router; +} pub mod erc20; pub mod deployer; diff --git a/processor/ethereum/ethereum-serai/src/router.rs b/processor/ethereum/ethereum-serai/src/router.rs index c569d4094..95866e675 100644 --- a/processor/ethereum/ethereum-serai/src/router.rs +++ b/processor/ethereum/ethereum-serai/src/router.rs @@ -135,7 +135,7 @@ pub struct Executed { pub struct Router(Arc>, Address); impl Router { pub(crate) fn code() -> Vec { - let bytecode = include_str!("../artifacts/Router.bin"); + let bytecode = contracts::router::BYTECODE; Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec() } diff --git a/processor/ethereum/ethereum-serai/src/tests/mod.rs b/processor/ethereum/ethereum-serai/src/tests/mod.rs index dcdbedce8..bdfa84142 100644 --- a/processor/ethereum/ethereum-serai/src/tests/mod.rs +++ b/processor/ethereum/ethereum-serai/src/tests/mod.rs @@ -21,7 +21,7 @@ use crate::crypto::{address, deterministically_sign, PublicKey}; mod crypto; #[cfg(test)] -mod abi; +use contracts::tests as abi; #[cfg(test)] mod schnorr; #[cfg(test)] From 0c026f7b0e7d84d9aa04d42d39fce8216cf95d61 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 14 Sep 2024 22:44:16 -0400 Subject: [PATCH 131/179] Add dedicated crate for building Solidity contracts --- .github/workflows/networks-tests.yml | 1 + Cargo.lock | 5 ++ Cargo.toml | 1 + networks/ethereum/build-contracts/Cargo.toml | 15 ++++ networks/ethereum/build-contracts/LICENSE | 15 ++++ networks/ethereum/build-contracts/README.md | 4 + networks/ethereum/build-contracts/src/lib.rs | 88 ++++++++++++++++++++ processor/ethereum/contracts/Cargo.toml | 3 + processor/ethereum/contracts/build.rs | 44 +--------- 9 files changed, 133 insertions(+), 43 deletions(-) create mode 100644 networks/ethereum/build-contracts/Cargo.toml create mode 100644 networks/ethereum/build-contracts/LICENSE create mode 100644 networks/ethereum/build-contracts/README.md create mode 100644 networks/ethereum/build-contracts/src/lib.rs diff --git a/.github/workflows/networks-tests.yml b/.github/workflows/networks-tests.yml index 7fde517b4..ee095df69 100644 --- a/.github/workflows/networks-tests.yml +++ b/.github/workflows/networks-tests.yml @@ -30,6 +30,7 @@ jobs: run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p bitcoin-serai \ + -p build-solidity-contracts \ -p alloy-simple-request-transport \ -p serai-ethereum-relayer \ -p monero-io \ diff --git a/Cargo.lock b/Cargo.lock index 551082414..f4584f65a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1318,6 +1318,10 @@ dependencies = [ "semver 0.6.0", ] +[[package]] +name = "build-solidity-contracts" +version = "0.1.0" + [[package]] name = "bumpalo" version = "3.16.0" @@ -8677,6 +8681,7 @@ name = "serai-processor-ethereum-contracts" version = "0.1.0" dependencies = [ "alloy-sol-types", + "build-solidity-contracts", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f06d76efd..08e0aabee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,7 @@ members = [ "networks/bitcoin", + "networks/ethereum/build-contracts", "networks/ethereum/alloy-simple-request-transport", "networks/ethereum/relayer", diff --git a/networks/ethereum/build-contracts/Cargo.toml b/networks/ethereum/build-contracts/Cargo.toml new file mode 100644 index 000000000..cb47a28d6 --- /dev/null +++ b/networks/ethereum/build-contracts/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "build-solidity-contracts" +version = "0.1.0" +description = "A helper function to build Solidity contracts" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/build-contracts" +authors = ["Luke Parker "] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/networks/ethereum/build-contracts/LICENSE b/networks/ethereum/build-contracts/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/networks/ethereum/build-contracts/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/networks/ethereum/build-contracts/README.md b/networks/ethereum/build-contracts/README.md new file mode 100644 index 000000000..437f15c28 --- /dev/null +++ b/networks/ethereum/build-contracts/README.md @@ -0,0 +1,4 @@ +# Build Solidity Contracts + +A helper function to build Solidity contracts. This is intended to be called +from within build scripts. diff --git a/networks/ethereum/build-contracts/src/lib.rs b/networks/ethereum/build-contracts/src/lib.rs new file mode 100644 index 000000000..c546b1114 --- /dev/null +++ b/networks/ethereum/build-contracts/src/lib.rs @@ -0,0 +1,88 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::{path::PathBuf, fs, process::Command}; + +/// Build contracts placed in `contracts/`, outputting to `artifacts/`. +/// +/// Requires solc 0.8.25. +pub fn build(contracts_path: &str, artifacts_path: &str) -> Result<(), String> { + println!("cargo:rerun-if-changed={contracts_path}/*"); + println!("cargo:rerun-if-changed={artifacts_path}/*"); + + for line in String::from_utf8( + Command::new("solc") + .args(["--version"]) + .output() + .map_err(|_| "couldn't fetch solc output".to_string())? + .stdout, + ) + .map_err(|_| "solc stdout wasn't UTF-8")? + .lines() + { + if let Some(version) = line.strip_prefix("Version: ") { + let version = + version.split('+').next().ok_or_else(|| "no value present on line".to_string())?; + if version != "0.8.25" { + Err(format!("version was {version}, 0.8.25 required"))? + } + } + } + + #[rustfmt::skip] + let args = [ + "--base-path", ".", + "-o", "./artifacts", "--overwrite", + "--bin", "--abi", + "--via-ir", "--optimize", + "--no-color", + ]; + let mut args = args.into_iter().map(str::to_string).collect::>(); + + let mut queue = vec![PathBuf::from(contracts_path)]; + while let Some(folder) = queue.pop() { + for entry in fs::read_dir(folder).map_err(|e| format!("couldn't read directory: {e:?}"))? { + let entry = entry.map_err(|e| format!("couldn't read directory in entry: {e:?}"))?; + let kind = entry.file_type().map_err(|e| format!("couldn't fetch file type: {e:?}"))?; + if kind.is_dir() { + queue.push(entry.path()); + } + + if kind.is_file() && + entry + .file_name() + .into_string() + .map_err(|_| "file name wasn't a valid UTF-8 string".to_string())? + .ends_with(".sol") + { + args.push( + entry + .path() + .into_os_string() + .into_string() + .map_err(|_| "file path wasn't a valid UTF-8 string".to_string())?, + ); + } + + // We on purposely ignore symlinks to avoid recursive structures + } + } + + let solc = Command::new("solc") + .args(args) + .output() + .map_err(|_| "couldn't fetch solc output".to_string())?; + let stderr = + String::from_utf8(solc.stderr).map_err(|_| "solc stderr wasn't UTF-8".to_string())?; + if !solc.status.success() { + Err(format!("solc didn't successfully execute: {stderr}"))?; + } + for line in stderr.lines() { + if line.contains("Error:") { + Err(format!("solc output had error: {stderr}"))?; + } + } + + Ok(()) +} diff --git a/processor/ethereum/contracts/Cargo.toml b/processor/ethereum/contracts/Cargo.toml index 87beba08a..f09eb938e 100644 --- a/processor/ethereum/contracts/Cargo.toml +++ b/processor/ethereum/contracts/Cargo.toml @@ -18,3 +18,6 @@ workspace = true [dependencies] alloy-sol-types = { version = "0.8", default-features = false } + +[build-dependencies] +build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts" } diff --git a/processor/ethereum/contracts/build.rs b/processor/ethereum/contracts/build.rs index fe79fcc1d..8e310b60e 100644 --- a/processor/ethereum/contracts/build.rs +++ b/processor/ethereum/contracts/build.rs @@ -1,45 +1,3 @@ -use std::process::Command; - fn main() { - println!("cargo:rerun-if-changed=contracts/*"); - println!("cargo:rerun-if-changed=artifacts/*"); - - for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout) - .unwrap() - .lines() - { - if let Some(version) = line.strip_prefix("Version: ") { - let version = version.split('+').next().unwrap(); - assert_eq!(version, "0.8.25"); - } - } - - #[rustfmt::skip] - let args = [ - "--base-path", ".", - "-o", "./artifacts", "--overwrite", - "--bin", "--abi", - "--via-ir", "--optimize", - - "./contracts/IERC20.sol", - - "./contracts/Schnorr.sol", - "./contracts/Deployer.sol", - "./contracts/Sandbox.sol", - "./contracts/Router.sol", - - "./contracts/tests/Schnorr.sol", - "./contracts/tests/ERC20.sol", - - "--no-color", - ]; - let solc = Command::new("solc").args(args).output().unwrap(); - assert!(solc.status.success()); - let stderr = String::from_utf8(solc.stderr).unwrap(); - for line in stderr.lines() { - if line.contains("Error:") { - println!("{stderr}"); - panic!() - } - } + build_solidity_contracts::build("contracts", "artifacts").unwrap(); } From a0702e621b6d560f81528662a05a17c95423a9b0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 15 Sep 2024 00:41:16 -0400 Subject: [PATCH 132/179] Dedicated crate for the Schnorr contract --- .github/workflows/networks-tests.yml | 1 + Cargo.lock | 20 ++++ Cargo.toml | 1 + deny.toml | 1 + networks/ethereum/build-contracts/src/lib.rs | 2 +- networks/ethereum/schnorr/.gitignore | 1 + networks/ethereum/schnorr/Cargo.toml | 42 +++++++ networks/ethereum/schnorr/LICENSE | 15 +++ networks/ethereum/schnorr/README.md | 5 + networks/ethereum/schnorr/build.rs | 3 + .../ethereum/schnorr}/contracts/Schnorr.sol | 32 +++--- .../schnorr}/contracts/tests/Schnorr.sol | 8 +- networks/ethereum/schnorr/src/lib.rs | 15 +++ networks/ethereum/schnorr/src/public_key.rs | 68 ++++++++++++ networks/ethereum/schnorr/src/signature.rs | 95 ++++++++++++++++ networks/ethereum/schnorr/src/tests.rs | 103 ++++++++++++++++++ processor/ethereum/contracts/.gitignore | 2 - .../ethereum/ethereum-serai/src/crypto.rs | 102 ----------------- .../ethereum/ethereum-serai/src/tests/mod.rs | 2 - .../ethereum-serai/src/tests/schnorr.rs | 93 ---------------- 20 files changed, 389 insertions(+), 222 deletions(-) create mode 100644 networks/ethereum/schnorr/.gitignore create mode 100644 networks/ethereum/schnorr/Cargo.toml create mode 100644 networks/ethereum/schnorr/LICENSE create mode 100644 networks/ethereum/schnorr/README.md create mode 100644 networks/ethereum/schnorr/build.rs rename {processor/ethereum/contracts => networks/ethereum/schnorr}/contracts/Schnorr.sol (50%) rename {processor/ethereum/contracts => networks/ethereum/schnorr}/contracts/tests/Schnorr.sol (53%) create mode 100644 networks/ethereum/schnorr/src/lib.rs create mode 100644 networks/ethereum/schnorr/src/public_key.rs create mode 100644 networks/ethereum/schnorr/src/signature.rs create mode 100644 networks/ethereum/schnorr/src/tests.rs delete mode 100644 processor/ethereum/ethereum-serai/src/tests/schnorr.rs diff --git a/.github/workflows/networks-tests.yml b/.github/workflows/networks-tests.yml index ee095df69..920449784 100644 --- a/.github/workflows/networks-tests.yml +++ b/.github/workflows/networks-tests.yml @@ -31,6 +31,7 @@ jobs: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p bitcoin-serai \ -p build-solidity-contracts \ + -p ethereum-schnorr-contract \ -p alloy-simple-request-transport \ -p serai-ethereum-relayer \ -p monero-io \ diff --git a/Cargo.lock b/Cargo.lock index f4584f65a..353206e94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2483,6 +2483,26 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ethereum-schnorr-contract" +version = "0.1.0" +dependencies = [ + "alloy-core", + "alloy-node-bindings", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-types", + "build-solidity-contracts", + "group", + "k256", + "rand_core", + "sha3", + "subtle", + "tokio", +] + [[package]] name = "ethereum-serai" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 08e0aabee..b30112b2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,6 +47,7 @@ members = [ "networks/bitcoin", "networks/ethereum/build-contracts", + "networks/ethereum/schnorr", "networks/ethereum/alloy-simple-request-transport", "networks/ethereum/relayer", diff --git a/deny.toml b/deny.toml index cef3a6837..ec948fef7 100644 --- a/deny.toml +++ b/deny.toml @@ -40,6 +40,7 @@ allow = [ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-env" }, + { allow = ["AGPL-3.0"], name = "ethereum-schnorr-contract" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-relayer" }, { allow = ["AGPL-3.0"], name = "serai-message-queue" }, diff --git a/networks/ethereum/build-contracts/src/lib.rs b/networks/ethereum/build-contracts/src/lib.rs index c546b1114..93ab253eb 100644 --- a/networks/ethereum/build-contracts/src/lib.rs +++ b/networks/ethereum/build-contracts/src/lib.rs @@ -34,7 +34,7 @@ pub fn build(contracts_path: &str, artifacts_path: &str) -> Result<(), String> { let args = [ "--base-path", ".", "-o", "./artifacts", "--overwrite", - "--bin", "--abi", + "--bin", "--bin-runtime", "--abi", "--via-ir", "--optimize", "--no-color", ]; diff --git a/networks/ethereum/schnorr/.gitignore b/networks/ethereum/schnorr/.gitignore new file mode 100644 index 000000000..de153db3b --- /dev/null +++ b/networks/ethereum/schnorr/.gitignore @@ -0,0 +1 @@ +artifacts diff --git a/networks/ethereum/schnorr/Cargo.toml b/networks/ethereum/schnorr/Cargo.toml new file mode 100644 index 000000000..1c5d4f024 --- /dev/null +++ b/networks/ethereum/schnorr/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "ethereum-schnorr-contract" +version = "0.1.0" +description = "A Solidity contract to verify Schnorr signatures" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/schnorr" +authors = ["Luke Parker ", "Elizabeth Binks "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +subtle = { version = "2", default-features = false, features = ["std"] } +sha3 = { version = "0.10", default-features = false, features = ["std"] } +group = { version = "0.13", default-features = false, features = ["alloc"] } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] } + +alloy-sol-types = { version = "0.8", default-features = false } + +[build-dependencies] +build-solidity-contracts = { path = "../build-contracts", version = "0.1" } + +[dev-dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std"] } + +alloy-core = { version = "0.8", default-features = false } + +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-rpc-client = { version = "0.3", default-features = false } +alloy-provider = { version = "0.3", default-features = false } + +alloy-node-bindings = { version = "0.3", default-features = false } + +tokio = { version = "1", default-features = false, features = ["macros"] } diff --git a/networks/ethereum/schnorr/LICENSE b/networks/ethereum/schnorr/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/networks/ethereum/schnorr/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/networks/ethereum/schnorr/README.md b/networks/ethereum/schnorr/README.md new file mode 100644 index 000000000..410cf5205 --- /dev/null +++ b/networks/ethereum/schnorr/README.md @@ -0,0 +1,5 @@ +# Ethereum Schnorr Contract + +An Ethereum contract to verify Schnorr signatures. + +This crate will fail to build if `solc` is not installed and available. diff --git a/networks/ethereum/schnorr/build.rs b/networks/ethereum/schnorr/build.rs new file mode 100644 index 000000000..8e310b60e --- /dev/null +++ b/networks/ethereum/schnorr/build.rs @@ -0,0 +1,3 @@ +fn main() { + build_solidity_contracts::build("contracts", "artifacts").unwrap(); +} diff --git a/processor/ethereum/contracts/contracts/Schnorr.sol b/networks/ethereum/schnorr/contracts/Schnorr.sol similarity index 50% rename from processor/ethereum/contracts/contracts/Schnorr.sol rename to networks/ethereum/schnorr/contracts/Schnorr.sol index 8edcdffd6..1c39c6d7f 100644 --- a/processor/ethereum/contracts/contracts/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/Schnorr.sol @@ -1,24 +1,20 @@ -// SPDX-License-Identifier: AGPLv3 +// SPDX-License-Identifier: AGPL-3.0-only pragma solidity ^0.8.0; -// see https://github.com/noot/schnorr-verify for implementation details +// See https://github.com/noot/schnorr-verify for implementation details library Schnorr { // secp256k1 group order - uint256 constant public Q = + uint256 constant private Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; - // Fixed parity for the public keys used in this contract - // This avoids spending a word passing the parity in a similar style to - // Bitcoin's Taproot - uint8 constant public KEY_PARITY = 27; + // We fix the key to have an even y coordinate to save a word when verifying + // signatures. This is comparable to Bitcoin Taproot's encoding of keys + uint8 constant private KEY_PARITY = 27; - error InvalidSOrA(); - error MalformedSignature(); - - // px := public key x-coord, where the public key has a parity of KEY_PARITY - // message := 32-byte hash of the message - // c := schnorr signature challenge - // s := schnorr signature + // px := public key x-coordinate, where the public key has an even y-coordinate + // message := the message signed + // c := Schnorr signature challenge + // s := Schnorr signature solution function verify( bytes32 px, bytes memory message, @@ -31,12 +27,12 @@ library Schnorr { bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); - // For safety, we want each input to ecrecover to be 0 (sa, px, ca) - // The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero + // For safety, we want each input to ecrecover to not be 0 (sa, px, ca) + // The ecrecover precompile checks `r` and `s` (`px` and `ca`) are non-zero // That leaves us to check `sa` are non-zero - if (sa == 0) revert InvalidSOrA(); + if (sa == 0) return false; address R = ecrecover(sa, KEY_PARITY, px, ca); - if (R == address(0)) revert MalformedSignature(); + if (R == address(0)) return false; // Check the signature is correct by rebuilding the challenge return c == keccak256(abi.encodePacked(R, px, message)); diff --git a/processor/ethereum/contracts/contracts/tests/Schnorr.sol b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol similarity index 53% rename from processor/ethereum/contracts/contracts/tests/Schnorr.sol rename to networks/ethereum/schnorr/contracts/tests/Schnorr.sol index 832cd2fee..18a58cf9f 100644 --- a/processor/ethereum/contracts/contracts/tests/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol @@ -1,15 +1,15 @@ -// SPDX-License-Identifier: AGPLv3 +// SPDX-License-Identifier: AGPL-3.0-only pragma solidity ^0.8.0; -import "../../../contracts/Schnorr.sol"; +import "../Schnorr.sol"; contract TestSchnorr { function verify( - bytes32 px, + bytes32 public_key, bytes calldata message, bytes32 c, bytes32 s ) external pure returns (bool) { - return Schnorr.verify(px, message, c, s); + return Schnorr.verify(public_key, message, c, s); } } diff --git a/networks/ethereum/schnorr/src/lib.rs b/networks/ethereum/schnorr/src/lib.rs new file mode 100644 index 000000000..79e2e0946 --- /dev/null +++ b/networks/ethereum/schnorr/src/lib.rs @@ -0,0 +1,15 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +/// The initialization bytecode of the Schnorr library. +pub const INIT_BYTECODE: &str = include_str!("../artifacts/Schnorr.bin"); + +mod public_key; +pub use public_key::PublicKey; +mod signature; +pub use signature::Signature; + +#[cfg(test)] +mod tests; diff --git a/networks/ethereum/schnorr/src/public_key.rs b/networks/ethereum/schnorr/src/public_key.rs new file mode 100644 index 000000000..b0cc04df9 --- /dev/null +++ b/networks/ethereum/schnorr/src/public_key.rs @@ -0,0 +1,68 @@ +use subtle::Choice; +use group::ff::PrimeField; +use k256::{ + elliptic_curve::{ + ops::Reduce, + point::{AffineCoordinates, DecompressPoint}, + }, + AffinePoint, ProjectivePoint, Scalar, U256 as KU256, +}; + +/// A public key for the Schnorr Solidity library. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct PublicKey { + A: ProjectivePoint, + x_coordinate: [u8; 32], +} + +impl PublicKey { + /// Construct a new `PublicKey`. + /// + /// This will return None if the provided point isn't eligible to be a public key (due to + /// bounds such as parity). + #[must_use] + pub fn new(A: ProjectivePoint) -> Option { + let affine = A.to_affine(); + + // Only allow even keys to save a word within Ethereum + if bool::from(affine.y_is_odd()) { + None?; + } + + let x_coordinate = affine.x(); + // Return None if the x-coordinate isn't mutual to both fields + // While reductions shouldn't be an issue, it's one less headache/concern to have + // The trivial amount of public keys this makes non-representable aren't a concern + if >::reduce_bytes(&x_coordinate).to_repr() != x_coordinate { + None?; + } + + Some(PublicKey { A, x_coordinate: x_coordinate.into() }) + } + + /// The point for this public key. + #[must_use] + pub fn point(&self) -> ProjectivePoint { + self.A + } + + /// The Ethereum representation of this public key. + #[must_use] + pub fn eth_repr(&self) -> [u8; 32] { + // We only encode the x-coordinate due to fixing the sign of the y-coordinate + self.x_coordinate + } + + /// Construct a PublicKey from its Ethereum representation. + // This wouldn't be possible if the x-coordinate had been reduced + #[must_use] + pub fn from_eth_repr(repr: [u8; 32]) -> Option { + let x_coordinate = repr; + + let y_is_odd = Choice::from(0); + let A_affine = + Option::::from(AffinePoint::decompress(&x_coordinate.into(), y_is_odd))?; + let A = ProjectivePoint::from(A_affine); + Some(PublicKey { A, x_coordinate }) + } +} diff --git a/networks/ethereum/schnorr/src/signature.rs b/networks/ethereum/schnorr/src/signature.rs new file mode 100644 index 000000000..cd467cea6 --- /dev/null +++ b/networks/ethereum/schnorr/src/signature.rs @@ -0,0 +1,95 @@ +use std::io; + +use sha3::{Digest, Keccak256}; + +use group::ff::PrimeField; +use k256::{ + elliptic_curve::{ops::Reduce, sec1::ToEncodedPoint}, + ProjectivePoint, Scalar, U256 as KU256, +}; + +use crate::PublicKey; + +/// A signature for the Schnorr Solidity library. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Signature { + c: Scalar, + s: Scalar, +} + +impl Signature { + /// Construct a new `Signature`. + #[must_use] + pub fn new(c: Scalar, s: Scalar) -> Signature { + Signature { c, s } + } + + /// The challenge for a signature. + #[must_use] + pub fn challenge(R: ProjectivePoint, key: &PublicKey, message: &[u8]) -> Scalar { + // H(R || A || m) + let mut hash = Keccak256::new(); + // We transcript the nonce as an address since ecrecover yields an address + hash.update({ + let uncompressed_encoded_point = R.to_encoded_point(false); + // Skip the prefix byte marking this as uncompressed + let x_and_y_coordinates = &uncompressed_encoded_point.as_ref()[1 ..]; + // Last 20 bytes of the hash of the x and y coordinates + &Keccak256::digest(x_and_y_coordinates)[12 ..] + }); + hash.update(key.eth_repr()); + hash.update(message); + >::reduce_bytes(&hash.finalize()) + } + + /// Verify a signature. + #[must_use] + pub fn verify(&self, key: &PublicKey, message: &[u8]) -> bool { + // Recover the nonce + let R = (ProjectivePoint::GENERATOR * self.s) - (key.point() * self.c); + // Check the challenge + Self::challenge(R, key, message) == self.c + } + + /// The challenge present within this signature. + pub fn c(&self) -> Scalar { + self.c + } + + /// The signature solution present within this signature. + pub fn s(&self) -> Scalar { + self.s + } + + /// Convert the signature to bytes. + #[must_use] + pub fn to_bytes(&self) -> [u8; 64] { + let mut res = [0; 64]; + res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); + res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); + res + } + + /// Write the signature. + pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&self.to_bytes()) + } + + /// Read a signature. + pub fn read(reader: &mut impl io::Read) -> io::Result { + let mut read_F = || -> io::Result { + let mut bytes = [0; 32]; + reader.read_exact(&mut bytes)?; + Option::::from(Scalar::from_repr(bytes.into())) + .ok_or_else(|| io::Error::other("invalid scalar")) + }; + let c = read_F()?; + let s = read_F()?; + Ok(Signature { c, s }) + } + + /// Read a signature from bytes. + pub fn from_bytes(bytes: [u8; 64]) -> io::Result { + Self::read(&mut bytes.as_slice()) + } +} diff --git a/networks/ethereum/schnorr/src/tests.rs b/networks/ethereum/schnorr/src/tests.rs new file mode 100644 index 000000000..1c3509cc2 --- /dev/null +++ b/networks/ethereum/schnorr/src/tests.rs @@ -0,0 +1,103 @@ +use std::sync::Arc; + +use rand_core::{RngCore, OsRng}; + +use group::ff::{Field, PrimeField}; +use k256::{Scalar, ProjectivePoint}; + +use alloy_core::primitives::Address; +use alloy_sol_types::SolCall; + +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; + +use crate::{PublicKey, Signature}; + +#[allow(warnings)] +#[allow(needless_pass_by_value)] +#[allow(clippy::all)] +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_types::sol!("contracts/tests/Schnorr.sol"); + pub(crate) use TestSchnorr::*; +} + +async fn setup_test() -> (AnvilInstance, Arc>, Address) { + let anvil = Anvil::new().spawn(); + + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + )); + + let mut address = [0; 20]; + OsRng.fill_bytes(&mut address); + let address = Address::from(address); + let _: () = provider + .raw_request( + "anvil_setCode".into(), + [address.to_string(), include_str!("../artifacts/TestSchnorr.bin-runtime").to_string()], + ) + .await + .unwrap(); + + (anvil, provider, address) +} + +async fn call_verify( + provider: &RootProvider, + address: Address, + public_key: &PublicKey, + message: &[u8], + signature: &Signature, +) -> bool { + let public_key: [u8; 32] = public_key.eth_repr(); + let c_bytes: [u8; 32] = signature.c().to_repr().into(); + let s_bytes: [u8; 32] = signature.s().to_repr().into(); + let call = TransactionRequest::default().to(address).input(TransactionInput::new( + abi::verifyCall::new(( + public_key.into(), + message.to_vec().into(), + c_bytes.into(), + s_bytes.into(), + )) + .abi_encode() + .into(), + )); + let bytes = provider.call(&call).await.unwrap(); + let res = abi::verifyCall::abi_decode_returns(&bytes, true).unwrap(); + + res._0 +} + +#[tokio::test] +async fn test_verify() { + let (_anvil, provider, address) = setup_test().await; + + for _ in 0 .. 100 { + let (key, public_key) = loop { + let key = Scalar::random(&mut OsRng); + if let Some(public_key) = PublicKey::new(ProjectivePoint::GENERATOR * key) { + break (key, public_key); + } + }; + + let nonce = Scalar::random(&mut OsRng); + let mut message = vec![0; 1 + usize::try_from(OsRng.next_u32() % 256).unwrap()]; + OsRng.fill_bytes(&mut message); + + let c = Signature::challenge(ProjectivePoint::GENERATOR * nonce, &public_key, &message); + let s = nonce + (c * key); + + let sig = Signature::new(c, s); + assert!(sig.verify(&public_key, &message)); + assert!(call_verify(&provider, address, &public_key, &message, &sig).await); + // Mutate the message and make sure the signature now fails to verify + message[0] = message[0].wrapping_add(1); + assert!(!call_verify(&provider, address, &public_key, &message, &sig).await); + } +} diff --git a/processor/ethereum/contracts/.gitignore b/processor/ethereum/contracts/.gitignore index 2dccdce9b..de153db3b 100644 --- a/processor/ethereum/contracts/.gitignore +++ b/processor/ethereum/contracts/.gitignore @@ -1,3 +1 @@ -# Solidity build outputs -cache artifacts diff --git a/processor/ethereum/ethereum-serai/src/crypto.rs b/processor/ethereum/ethereum-serai/src/crypto.rs index 326343d86..3366b744c 100644 --- a/processor/ethereum/ethereum-serai/src/crypto.rs +++ b/processor/ethereum/ethereum-serai/src/crypto.rs @@ -62,56 +62,6 @@ pub fn deterministically_sign(tx: &TxLegacy) -> Signed { } } -/// The public key for a Schnorr-signing account. -#[allow(non_snake_case)] -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct PublicKey { - pub(crate) A: ProjectivePoint, - pub(crate) px: Scalar, -} - -impl PublicKey { - /// Construct a new `PublicKey`. - /// - /// This will return None if the provided point isn't eligible to be a public key (due to - /// bounds such as parity). - #[allow(non_snake_case)] - pub fn new(A: ProjectivePoint) -> Option { - let affine = A.to_affine(); - // Only allow even keys to save a word within Ethereum - let is_odd = bool::from(affine.y_is_odd()); - if is_odd { - None?; - } - - let x_coord = affine.x(); - let x_coord_scalar = >::reduce_bytes(&x_coord); - // Return None if a reduction would occur - // Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less - // headache/concern to have - // This does ban a trivial amoount of public keys - if x_coord_scalar.to_repr() != x_coord { - None?; - } - - Some(PublicKey { A, px: x_coord_scalar }) - } - - pub fn point(&self) -> ProjectivePoint { - self.A - } - - pub fn eth_repr(&self) -> [u8; 32] { - self.px.to_repr().into() - } - - pub fn from_eth_repr(repr: [u8; 32]) -> Option { - #[allow(non_snake_case)] - let A = Option::::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into(); - Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px }) - } -} - /// The HRAm to use for the Schnorr contract. #[derive(Clone, Default)] pub struct EthereumHram {} @@ -128,58 +78,6 @@ impl Hram for EthereumHram { } } -/// A signature for the Schnorr contract. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Signature { - pub(crate) c: Scalar, - pub(crate) s: Scalar, -} -impl Signature { - pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool { - #[allow(non_snake_case)] - let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c); - EthereumHram::hram(&R, &public_key.A, message) == self.c - } - - /// Construct a new `Signature`. - /// - /// This will return None if the signature is invalid. - pub fn new( - public_key: &PublicKey, - message: &[u8], - signature: SchnorrSignature, - ) -> Option { - let c = EthereumHram::hram(&signature.R, &public_key.A, message); - if !signature.verify(public_key.A, c) { - None?; - } - - let res = Signature { c, s: signature.s }; - assert!(res.verify(public_key, message)); - Some(res) - } - - pub fn c(&self) -> Scalar { - self.c - } - pub fn s(&self) -> Scalar { - self.s - } - - pub fn to_bytes(&self) -> [u8; 64] { - let mut res = [0; 64]; - res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); - res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); - res - } - - pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result { - let mut reader = bytes.as_slice(); - let c = Secp256k1::read_F(&mut reader)?; - let s = Secp256k1::read_F(&mut reader)?; - Ok(Signature { c, s }) - } -} impl From<&Signature> for AbiSignature { fn from(sig: &Signature) -> AbiSignature { let c: [u8; 32] = sig.c.to_repr().into(); diff --git a/processor/ethereum/ethereum-serai/src/tests/mod.rs b/processor/ethereum/ethereum-serai/src/tests/mod.rs index bdfa84142..91b03d9b7 100644 --- a/processor/ethereum/ethereum-serai/src/tests/mod.rs +++ b/processor/ethereum/ethereum-serai/src/tests/mod.rs @@ -23,8 +23,6 @@ mod crypto; #[cfg(test)] use contracts::tests as abi; #[cfg(test)] -mod schnorr; -#[cfg(test)] mod router; pub fn key_gen() -> (HashMap>, PublicKey) { diff --git a/processor/ethereum/ethereum-serai/src/tests/schnorr.rs b/processor/ethereum/ethereum-serai/src/tests/schnorr.rs deleted file mode 100644 index 2c72ed19d..000000000 --- a/processor/ethereum/ethereum-serai/src/tests/schnorr.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::sync::Arc; - -use rand_core::OsRng; - -use group::ff::PrimeField; -use k256::Scalar; - -use frost::{ - curve::Secp256k1, - algorithm::IetfSchnorr, - tests::{algorithm_machines, sign}, -}; - -use alloy_core::primitives::Address; - -use alloy_sol_types::SolCall; - -use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; -use alloy_simple_request_transport::SimpleRequest; -use alloy_rpc_client::ClientBuilder; -use alloy_provider::{Provider, RootProvider}; - -use alloy_node_bindings::{Anvil, AnvilInstance}; - -use crate::{ - Error, - crypto::*, - tests::{key_gen, deploy_contract, abi::schnorr as abi}, -}; - -async fn setup_test() -> (AnvilInstance, Arc>, Address) { - let anvil = Anvil::new().spawn(); - - let provider = RootProvider::new( - ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), - ); - let wallet = anvil.keys()[0].clone().into(); - let client = Arc::new(provider); - - let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap(); - (anvil, client, address) -} - -#[tokio::test] -async fn test_deploy_contract() { - setup_test().await; -} - -pub async fn call_verify( - provider: &RootProvider, - contract: Address, - public_key: &PublicKey, - message: &[u8], - signature: &Signature, -) -> Result<(), Error> { - let px: [u8; 32] = public_key.px.to_repr().into(); - let c_bytes: [u8; 32] = signature.c.to_repr().into(); - let s_bytes: [u8; 32] = signature.s.to_repr().into(); - let call = TransactionRequest::default().to(contract).input(TransactionInput::new( - abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into())) - .abi_encode() - .into(), - )); - let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?; - let res = - abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; - - if res._0 { - Ok(()) - } else { - Err(Error::InvalidSignature) - } -} - -#[tokio::test] -async fn test_ecrecover_hack() { - let (_anvil, client, contract) = setup_test().await; - - let (keys, public_key) = key_gen(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); - let sig = Signature::new(&public_key, MESSAGE, sig).unwrap(); - - call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap(); - // Test an invalid signature fails - let mut sig = sig; - sig.s += Scalar::ONE; - assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err()); -} From c42137ddfb41469206886448196945c9ba11dcaf Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 15 Sep 2024 00:42:05 -0400 Subject: [PATCH 133/179] Remove publish = false --- networks/ethereum/schnorr/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/networks/ethereum/schnorr/Cargo.toml b/networks/ethereum/schnorr/Cargo.toml index 1c5d4f024..d9bb77b0c 100644 --- a/networks/ethereum/schnorr/Cargo.toml +++ b/networks/ethereum/schnorr/Cargo.toml @@ -6,7 +6,6 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/schnorr" authors = ["Luke Parker ", "Elizabeth Binks "] edition = "2021" -publish = false rust-version = "1.79" [package.metadata.docs.rs] From 11edc909a6f31c2f30b8780a5bfcb67a6f1c0bc9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 15 Sep 2024 00:56:38 -0400 Subject: [PATCH 134/179] rust-toolchain 1.81 --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 73cb338ca..d99e65884 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.80" +channel = "1.81" targets = ["wasm32-unknown-unknown"] profile = "minimal" components = ["rust-src", "rustfmt", "clippy"] From 5d2164fc8f8da87a4bf97703472fdedd9401a571 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 15 Sep 2024 00:57:43 -0400 Subject: [PATCH 135/179] OUT_DIR > artifacts --- Cargo.lock | 2 +- networks/ethereum/build-contracts/Cargo.toml | 2 +- networks/ethereum/build-contracts/src/lib.rs | 4 ++-- networks/ethereum/schnorr/Cargo.toml | 2 +- networks/ethereum/schnorr/build.rs | 8 +++++++- networks/ethereum/schnorr/src/lib.rs | 3 ++- networks/ethereum/schnorr/src/tests.rs | 19 +++++++++++++------ 7 files changed, 27 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 353206e94..1338ae260 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1320,7 +1320,7 @@ dependencies = [ [[package]] name = "build-solidity-contracts" -version = "0.1.0" +version = "0.1.1" [[package]] name = "bumpalo" diff --git a/networks/ethereum/build-contracts/Cargo.toml b/networks/ethereum/build-contracts/Cargo.toml index cb47a28d6..41d1f993b 100644 --- a/networks/ethereum/build-contracts/Cargo.toml +++ b/networks/ethereum/build-contracts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "build-solidity-contracts" -version = "0.1.0" +version = "0.1.1" description = "A helper function to build Solidity contracts" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/build-contracts" diff --git a/networks/ethereum/build-contracts/src/lib.rs b/networks/ethereum/build-contracts/src/lib.rs index 93ab253eb..4fee315af 100644 --- a/networks/ethereum/build-contracts/src/lib.rs +++ b/networks/ethereum/build-contracts/src/lib.rs @@ -4,7 +4,7 @@ use std::{path::PathBuf, fs, process::Command}; -/// Build contracts placed in `contracts/`, outputting to `artifacts/`. +/// Build contracts from the specified path, outputting the artifacts to the specified path. /// /// Requires solc 0.8.25. pub fn build(contracts_path: &str, artifacts_path: &str) -> Result<(), String> { @@ -33,7 +33,7 @@ pub fn build(contracts_path: &str, artifacts_path: &str) -> Result<(), String> { #[rustfmt::skip] let args = [ "--base-path", ".", - "-o", "./artifacts", "--overwrite", + "-o", artifacts_path, "--overwrite", "--bin", "--bin-runtime", "--abi", "--via-ir", "--optimize", "--no-color", diff --git a/networks/ethereum/schnorr/Cargo.toml b/networks/ethereum/schnorr/Cargo.toml index d9bb77b0c..5c9c15965 100644 --- a/networks/ethereum/schnorr/Cargo.toml +++ b/networks/ethereum/schnorr/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/schnorr" authors = ["Luke Parker ", "Elizabeth Binks "] edition = "2021" -rust-version = "1.79" +rust-version = "1.81" [package.metadata.docs.rs] all-features = true diff --git a/networks/ethereum/schnorr/build.rs b/networks/ethereum/schnorr/build.rs index 8e310b60e..300f89497 100644 --- a/networks/ethereum/schnorr/build.rs +++ b/networks/ethereum/schnorr/build.rs @@ -1,3 +1,9 @@ +use std::{env, fs}; + fn main() { - build_solidity_contracts::build("contracts", "artifacts").unwrap(); + let artifacts_path = env::var("OUT_DIR").unwrap().to_string() + "/ethereum-schnorr-contract"; + if !fs::exists(&artifacts_path).unwrap() { + fs::create_dir(&artifacts_path).unwrap(); + } + build_solidity_contracts::build("contracts", &artifacts_path).unwrap(); } diff --git a/networks/ethereum/schnorr/src/lib.rs b/networks/ethereum/schnorr/src/lib.rs index 79e2e0946..3f67fbbff 100644 --- a/networks/ethereum/schnorr/src/lib.rs +++ b/networks/ethereum/schnorr/src/lib.rs @@ -4,7 +4,8 @@ #![allow(non_snake_case)] /// The initialization bytecode of the Schnorr library. -pub const INIT_BYTECODE: &str = include_str!("../artifacts/Schnorr.bin"); +pub const INIT_BYTECODE: &str = + include_str!(concat!(env!("OUT_DIR"), "/ethereum-schnorr-contract/Schnorr.bin")); mod public_key; pub use public_key::PublicKey; diff --git a/networks/ethereum/schnorr/src/tests.rs b/networks/ethereum/schnorr/src/tests.rs index 1c3509cc2..62bb85424 100644 --- a/networks/ethereum/schnorr/src/tests.rs +++ b/networks/ethereum/schnorr/src/tests.rs @@ -17,11 +17,11 @@ use alloy_node_bindings::{Anvil, AnvilInstance}; use crate::{PublicKey, Signature}; -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] mod abi { alloy_sol_types::sol!("contracts/tests/Schnorr.sol"); pub(crate) use TestSchnorr::*; @@ -40,7 +40,14 @@ async fn setup_test() -> (AnvilInstance, Arc>, Addre let _: () = provider .raw_request( "anvil_setCode".into(), - [address.to_string(), include_str!("../artifacts/TestSchnorr.bin-runtime").to_string()], + [ + address.to_string(), + include_str!(concat!( + env!("OUT_DIR"), + "/ethereum-schnorr-contract/TestSchnorr.bin-runtime" + )) + .to_string(), + ], ) .await .unwrap(); From cb5755fbe84bea93c31378fdb51319a1aafc1ccd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 15 Sep 2024 02:11:49 -0400 Subject: [PATCH 136/179] Add tests for the premise of the Schnorr contract to the Schnorr crate --- networks/ethereum/schnorr/Cargo.toml | 5 +- .../ethereum/schnorr/contracts/Schnorr.sol | 19 ++- networks/ethereum/schnorr/src/public_key.rs | 8 +- .../schnorr/src/{tests.rs => tests/mod.rs} | 2 + .../ethereum/schnorr/src/tests/premise.rs | 111 ++++++++++++++++++ .../ethereum-serai/src/tests/crypto.rs | 76 ------------ 6 files changed, 136 insertions(+), 85 deletions(-) rename networks/ethereum/schnorr/src/{tests.rs => tests/mod.rs} (99%) create mode 100644 networks/ethereum/schnorr/src/tests/premise.rs diff --git a/networks/ethereum/schnorr/Cargo.toml b/networks/ethereum/schnorr/Cargo.toml index 5c9c15965..2e9597c86 100644 --- a/networks/ethereum/schnorr/Cargo.toml +++ b/networks/ethereum/schnorr/Cargo.toml @@ -21,15 +21,16 @@ sha3 = { version = "0.10", default-features = false, features = ["std"] } group = { version = "0.13", default-features = false, features = ["alloc"] } k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] } -alloy-sol-types = { version = "0.8", default-features = false } - [build-dependencies] build-solidity-contracts = { path = "../build-contracts", version = "0.1" } [dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["std"] } +k256 = { version = "^0.13.1", default-features = false, features = ["ecdsa"] } + alloy-core = { version = "0.8", default-features = false } +alloy-sol-types = { version = "0.8", default-features = false } alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-rpc-types-eth = { version = "0.3", default-features = false } diff --git a/networks/ethereum/schnorr/contracts/Schnorr.sol b/networks/ethereum/schnorr/contracts/Schnorr.sol index 1c39c6d7f..b13696cff 100644 --- a/networks/ethereum/schnorr/contracts/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/Schnorr.sol @@ -7,8 +7,9 @@ library Schnorr { uint256 constant private Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; - // We fix the key to have an even y coordinate to save a word when verifying - // signatures. This is comparable to Bitcoin Taproot's encoding of keys + // We fix the key to have: + // 1) An even y-coordinate + // 2) An x-coordinate < Q uint8 constant private KEY_PARITY = 27; // px := public key x-coordinate, where the public key has an even y-coordinate @@ -27,11 +28,17 @@ library Schnorr { bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); - // For safety, we want each input to ecrecover to not be 0 (sa, px, ca) - // The ecrecover precompile checks `r` and `s` (`px` and `ca`) are non-zero - // That leaves us to check `sa` are non-zero - if (sa == 0) return false; + /* + The ecrecover precompile checks `r` and `s` (`px` and `ca`) are non-zero, + banning the two keys with zero for their x-coordinate and zero challenge. + Each has negligible probability of occuring (assuming zero x-coordinates + are even on-curve in the first place). + + `sa` is not checked to be non-zero yet it does not need to be. The inverse + of it is never taken. + */ address R = ecrecover(sa, KEY_PARITY, px, ca); + // The ecrecover failed if (R == address(0)) return false; // Check the signature is correct by rebuilding the challenge diff --git a/networks/ethereum/schnorr/src/public_key.rs b/networks/ethereum/schnorr/src/public_key.rs index b0cc04df9..3c39552fe 100644 --- a/networks/ethereum/schnorr/src/public_key.rs +++ b/networks/ethereum/schnorr/src/public_key.rs @@ -37,7 +37,13 @@ impl PublicKey { None?; } - Some(PublicKey { A, x_coordinate: x_coordinate.into() }) + let x_coordinate: [u8; 32] = x_coordinate.into(); + // Returns None if the x-coordinate is 0 + // Such keys will never have their signatures able to be verified + if x_coordinate == [0; 32] { + None?; + } + Some(PublicKey { A, x_coordinate }) } /// The point for this public key. diff --git a/networks/ethereum/schnorr/src/tests.rs b/networks/ethereum/schnorr/src/tests/mod.rs similarity index 99% rename from networks/ethereum/schnorr/src/tests.rs rename to networks/ethereum/schnorr/src/tests/mod.rs index 62bb85424..90774e30b 100644 --- a/networks/ethereum/schnorr/src/tests.rs +++ b/networks/ethereum/schnorr/src/tests/mod.rs @@ -17,6 +17,8 @@ use alloy_node_bindings::{Anvil, AnvilInstance}; use crate::{PublicKey, Signature}; +mod premise; + #[expect(warnings)] #[expect(needless_pass_by_value)] #[expect(clippy::all)] diff --git a/networks/ethereum/schnorr/src/tests/premise.rs b/networks/ethereum/schnorr/src/tests/premise.rs new file mode 100644 index 000000000..01571a437 --- /dev/null +++ b/networks/ethereum/schnorr/src/tests/premise.rs @@ -0,0 +1,111 @@ +use rand_core::{RngCore, OsRng}; + +use sha3::{Digest, Keccak256}; +use group::ff::{Field, PrimeField}; +use k256::{ + elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, + ecdsa::{ + self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, + }, + U256, Scalar, ProjectivePoint, +}; + +use alloy_core::primitives::Address; + +use crate::{PublicKey, Signature}; + +// The ecrecover opcode, yet with if the y is odd replacing v +fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { + let sig = ecdsa::Signature::from_scalars(r, s).ok()?; + let message: [u8; 32] = message.to_repr().into(); + alloy_core::primitives::Signature::from_signature_and_parity( + sig, + alloy_core::primitives::Parity::Parity(odd_y), + ) + .ok()? + .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) + .ok() + .map(Into::into) +} + +// Test ecrecover behaves as expected +#[test] +fn test_ecrecover() { + let private = SigningKey::random(&mut OsRng); + let public = VerifyingKey::from(&private); + + // Sign the signature + const MESSAGE: &[u8] = b"Hello, World!"; + let (sig, recovery_id) = private + .as_nonzero_scalar() + .try_sign_prehashed(Scalar::random(&mut OsRng), &Keccak256::digest(MESSAGE)) + .unwrap(); + + // Sanity check the signature verifies + #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result + { + assert_eq!(public.verify_prehash(&Keccak256::digest(MESSAGE), &sig).unwrap(), ()); + } + + // Perform the ecrecover + assert_eq!( + ecrecover( + >::reduce_bytes(&Keccak256::digest(MESSAGE)), + u8::from(recovery_id.unwrap().is_y_odd()) == 1, + *sig.r(), + *sig.s() + ) + .unwrap(), + Address::from_raw_public_key(&public.to_encoded_point(false).as_ref()[1 ..]), + ); +} + +// Test that we can recover the nonce from a Schnorr signature via a call to ecrecover, the premise +// of efficiently verifying Schnorr signatures in an Ethereum contract +#[test] +fn nonce_recovery_via_ecrecover() { + let (key, public_key) = loop { + let key = Scalar::random(&mut OsRng); + if let Some(public_key) = PublicKey::new(ProjectivePoint::GENERATOR * key) { + break (key, public_key); + } + }; + + let nonce = Scalar::random(&mut OsRng); + let R = ProjectivePoint::GENERATOR * nonce; + + let mut message = vec![0; 1 + usize::try_from(OsRng.next_u32() % 256).unwrap()]; + OsRng.fill_bytes(&mut message); + + let c = Signature::challenge(R, &public_key, &message); + let s = nonce + (c * key); + + /* + An ECDSA signature is `(r, s)` with `s = (H(m) + rx) / k`, where: + - `m` is the message + - `r` is the x-coordinate of the nonce, reduced into a scalar + - `x` is the private key + - `k` is the nonce + + We fix the recovery ID to be for the even key with an x-coordinate < the order. Accordingly, + `kG = Point::from(Even, r)`. This enables recovering the public key via + `((s Point::from(Even, r)) - H(m)G) / r`. + + We want to calculate `R` from `(c, s)` where `s = r + cx`. That means we need to calculate + `sG - cX`. + + We can calculate `sG - cX` with `((s Point::from(Even, r)) - H(m)G) / r` if: + - Latter `r` = `X.x` + - Latter `s` = `c` + - `H(m)` = former `s` + This gets us to `(cX - sG) / X.x`. If we additionally scale the latter's `s, H(m)` values (the + former's `c, s` values) by `X.x`, we get `cX - sG`. This just requires negating each to achieve + `sG - cX`. + */ + let x_scalar = >::reduce_bytes(&public_key.point().to_affine().x()); + let sa = -(s * x_scalar); + let ca = -(c * x_scalar); + + let q = ecrecover(sa, false, x_scalar, ca).unwrap(); + assert_eq!(q, Address::from_raw_public_key(&R.to_encoded_point(false).as_ref()[1 ..])); +} diff --git a/processor/ethereum/ethereum-serai/src/tests/crypto.rs b/processor/ethereum/ethereum-serai/src/tests/crypto.rs index a668b2d6d..a4f86ae9f 100644 --- a/processor/ethereum/ethereum-serai/src/tests/crypto.rs +++ b/processor/ethereum/ethereum-serai/src/tests/crypto.rs @@ -16,54 +16,6 @@ use frost::{ use crate::{crypto::*, tests::key_gen}; -// The ecrecover opcode, yet with parity replacing v -pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { - let sig = ecdsa::Signature::from_scalars(r, s).ok()?; - let message: [u8; 32] = message.to_repr().into(); - alloy_core::primitives::Signature::from_signature_and_parity( - sig, - alloy_core::primitives::Parity::Parity(odd_y), - ) - .ok()? - .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) - .ok() - .map(Into::into) -} - -#[test] -fn test_ecrecover() { - let private = SigningKey::random(&mut OsRng); - let public = VerifyingKey::from(&private); - - // Sign the signature - const MESSAGE: &[u8] = b"Hello, World!"; - let (sig, recovery_id) = private - .as_nonzero_scalar() - .try_sign_prehashed( - ::F::random(&mut OsRng), - &keccak256(MESSAGE).into(), - ) - .unwrap(); - - // Sanity check the signature verifies - #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result - { - assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ()); - } - - // Perform the ecrecover - assert_eq!( - ecrecover( - hash_to_scalar(MESSAGE), - u8::from(recovery_id.unwrap().is_y_odd()) == 1, - *sig.r(), - *sig.s() - ) - .unwrap(), - address(&ProjectivePoint::from(public.as_affine())) - ); -} - // Run the sign test with the EthereumHram #[test] fn test_signing() { @@ -75,31 +27,3 @@ fn test_signing() { let _sig = sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); } - -#[allow(non_snake_case)] -pub fn preprocess_signature_for_ecrecover( - R: ProjectivePoint, - public_key: &PublicKey, - m: &[u8], - s: Scalar, -) -> (Scalar, Scalar) { - let c = EthereumHram::hram(&R, &public_key.A, m); - let sa = -(s * public_key.px); - let ca = -(c * public_key.px); - (sa, ca) -} - -#[test] -fn test_ecrecover_hack() { - let (keys, public_key) = key_gen(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); - - let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s); - let q = ecrecover(sa, false, public_key.px, ca).unwrap(); - assert_eq!(q, address(&sig.R)); -} From b70c91e63c383a39edbbb1751e650b6865606a94 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 15 Sep 2024 05:56:57 -0400 Subject: [PATCH 137/179] Remove the Sandbox contract If instead of intaking calls, we intake code, we can deploy a fresh contract which makes arbitrary calls *without* attempting to build our abstraction layer over the concept. This should have the same gas costs, as we still have one contract deployment. The new contract only has a constructor, so it should have no actual code and beat the Sandbox in that regard? We do have to call into ourselves to meter the gas, yet we already had to call into the deployed Sandbox to achieve that. Also re-defines the OutInstruction to include tokens, implements OutInstruction-specified gas amounts, bumps the Solidity version, and other such misc changes. --- .github/actions/build-dependencies/action.yml | 4 +- networks/ethereum/build-contracts/src/lib.rs | 24 +- networks/ethereum/schnorr/.gitignore | 1 - networks/ethereum/schnorr/build.rs | 2 +- .../ethereum/schnorr/contracts/Schnorr.sol | 2 +- .../schnorr/contracts/tests/Schnorr.sol | 2 +- orchestration/src/processor.rs | 4 +- processor/ethereum/contracts/Cargo.toml | 2 +- processor/ethereum/contracts/build.rs | 7 +- .../ethereum/contracts/contracts/Deployer.sol | 4 +- .../ethereum/contracts/contracts/IERC20.sol | 2 +- .../ethereum/contracts/contracts/Router.sol | 290 +++++++++--------- .../ethereum/contracts/contracts/Sandbox.sol | 48 --- .../contracts/contracts/tests/ERC20.sol | 4 +- processor/ethereum/contracts/src/lib.rs | 2 - processor/ethereum/contracts/src/tests.rs | 13 - 16 files changed, 176 insertions(+), 235 deletions(-) delete mode 100644 networks/ethereum/schnorr/.gitignore delete mode 100644 processor/ethereum/contracts/contracts/Sandbox.sol delete mode 100644 processor/ethereum/contracts/src/tests.rs diff --git a/.github/actions/build-dependencies/action.yml b/.github/actions/build-dependencies/action.yml index 5994b7232..47d775222 100644 --- a/.github/actions/build-dependencies/action.yml +++ b/.github/actions/build-dependencies/action.yml @@ -42,8 +42,8 @@ runs: shell: bash run: | cargo install svm-rs - svm install 0.8.25 - svm use 0.8.25 + svm install 0.8.26 + svm use 0.8.26 # - name: Cache Rust # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 diff --git a/networks/ethereum/build-contracts/src/lib.rs b/networks/ethereum/build-contracts/src/lib.rs index 4fee315af..5213059e2 100644 --- a/networks/ethereum/build-contracts/src/lib.rs +++ b/networks/ethereum/build-contracts/src/lib.rs @@ -6,8 +6,12 @@ use std::{path::PathBuf, fs, process::Command}; /// Build contracts from the specified path, outputting the artifacts to the specified path. /// -/// Requires solc 0.8.25. -pub fn build(contracts_path: &str, artifacts_path: &str) -> Result<(), String> { +/// Requires solc 0.8.26. +pub fn build( + include_paths: &[&str], + contracts_path: &str, + artifacts_path: &str, +) -> Result<(), String> { println!("cargo:rerun-if-changed={contracts_path}/*"); println!("cargo:rerun-if-changed={artifacts_path}/*"); @@ -24,20 +28,24 @@ pub fn build(contracts_path: &str, artifacts_path: &str) -> Result<(), String> { if let Some(version) = line.strip_prefix("Version: ") { let version = version.split('+').next().ok_or_else(|| "no value present on line".to_string())?; - if version != "0.8.25" { - Err(format!("version was {version}, 0.8.25 required"))? + if version != "0.8.26" { + Err(format!("version was {version}, 0.8.26 required"))? } } } #[rustfmt::skip] - let args = [ + let mut args = vec![ "--base-path", ".", "-o", artifacts_path, "--overwrite", "--bin", "--bin-runtime", "--abi", "--via-ir", "--optimize", "--no-color", ]; + for include_path in include_paths { + args.push("--include-path"); + args.push(include_path); + } let mut args = args.into_iter().map(str::to_string).collect::>(); let mut queue = vec![PathBuf::from(contracts_path)]; @@ -70,17 +78,17 @@ pub fn build(contracts_path: &str, artifacts_path: &str) -> Result<(), String> { } let solc = Command::new("solc") - .args(args) + .args(args.clone()) .output() .map_err(|_| "couldn't fetch solc output".to_string())?; let stderr = String::from_utf8(solc.stderr).map_err(|_| "solc stderr wasn't UTF-8".to_string())?; if !solc.status.success() { - Err(format!("solc didn't successfully execute: {stderr}"))?; + Err(format!("solc (`{}`) didn't successfully execute: {stderr}", args.join(" ")))?; } for line in stderr.lines() { if line.contains("Error:") { - Err(format!("solc output had error: {stderr}"))?; + Err(format!("solc (`{}`) output had error: {stderr}", args.join(" ")))?; } } diff --git a/networks/ethereum/schnorr/.gitignore b/networks/ethereum/schnorr/.gitignore deleted file mode 100644 index de153db3b..000000000 --- a/networks/ethereum/schnorr/.gitignore +++ /dev/null @@ -1 +0,0 @@ -artifacts diff --git a/networks/ethereum/schnorr/build.rs b/networks/ethereum/schnorr/build.rs index 300f89497..7b7c30fd2 100644 --- a/networks/ethereum/schnorr/build.rs +++ b/networks/ethereum/schnorr/build.rs @@ -5,5 +5,5 @@ fn main() { if !fs::exists(&artifacts_path).unwrap() { fs::create_dir(&artifacts_path).unwrap(); } - build_solidity_contracts::build("contracts", &artifacts_path).unwrap(); + build_solidity_contracts::build(&[], "contracts", &artifacts_path).unwrap(); } diff --git a/networks/ethereum/schnorr/contracts/Schnorr.sol b/networks/ethereum/schnorr/contracts/Schnorr.sol index b13696cff..182e90e35 100644 --- a/networks/ethereum/schnorr/contracts/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/Schnorr.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: AGPL-3.0-only -pragma solidity ^0.8.0; +pragma solidity ^0.8.26; // See https://github.com/noot/schnorr-verify for implementation details library Schnorr { diff --git a/networks/ethereum/schnorr/contracts/tests/Schnorr.sol b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol index 18a58cf9f..26be683dc 100644 --- a/networks/ethereum/schnorr/contracts/tests/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: AGPL-3.0-only -pragma solidity ^0.8.0; +pragma solidity ^0.8.26; import "../Schnorr.sol"; diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index 3387c4ede..00f9243dd 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -21,8 +21,8 @@ pub fn processor( if coin == "ethereum" { r#" RUN cargo install svm-rs -RUN svm install 0.8.25 -RUN svm use 0.8.25 +RUN svm install 0.8.26 +RUN svm use 0.8.26 "# } else { "" diff --git a/processor/ethereum/contracts/Cargo.toml b/processor/ethereum/contracts/Cargo.toml index f09eb938e..64fbccad6 100644 --- a/processor/ethereum/contracts/Cargo.toml +++ b/processor/ethereum/contracts/Cargo.toml @@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -alloy-sol-types = { version = "0.8", default-features = false } +alloy-sol-types = { version = "0.8", default-features = false, features = ["json"] } [build-dependencies] build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts" } diff --git a/processor/ethereum/contracts/build.rs b/processor/ethereum/contracts/build.rs index 8e310b60e..0af416084 100644 --- a/processor/ethereum/contracts/build.rs +++ b/processor/ethereum/contracts/build.rs @@ -1,3 +1,8 @@ fn main() { - build_solidity_contracts::build("contracts", "artifacts").unwrap(); + build_solidity_contracts::build( + &["../../../networks/ethereum/schnorr/contracts"], + "contracts", + "artifacts", + ) + .unwrap(); } diff --git a/processor/ethereum/contracts/contracts/Deployer.sol b/processor/ethereum/contracts/contracts/Deployer.sol index 475be4c1b..1c05e38af 100644 --- a/processor/ethereum/contracts/contracts/Deployer.sol +++ b/processor/ethereum/contracts/contracts/Deployer.sol @@ -1,5 +1,5 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; /* The expected deployment process of the Router is as follows: diff --git a/processor/ethereum/contracts/contracts/IERC20.sol b/processor/ethereum/contracts/contracts/IERC20.sol index 70f1f93c9..c2de5ca02 100644 --- a/processor/ethereum/contracts/contracts/IERC20.sol +++ b/processor/ethereum/contracts/contracts/IERC20.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: CC0 -pragma solidity ^0.8.0; +pragma solidity ^0.8.26; interface IERC20 { event Transfer(address indexed from, address indexed to, uint256 value); diff --git a/processor/ethereum/contracts/contracts/Router.sol b/processor/ethereum/contracts/contracts/Router.sol index c5e1efa2f..65541a108 100644 --- a/processor/ethereum/contracts/contracts/Router.sol +++ b/processor/ethereum/contracts/contracts/Router.sol @@ -1,23 +1,31 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; import "./IERC20.sol"; -import "./Schnorr.sol"; -import "./Sandbox.sol"; +import "Schnorr.sol"; +// _ is used as a prefix for internal functions and smart-contract-scoped variables contract Router { - // Nonce is incremented for each batch of transactions executed/key update - uint256 public nonce; + // Nonce is incremented for each command executed, preventing replays + uint256 private _nonce; - // Current public key's x-coordinate - // This key must always have the parity defined within the Schnorr contract - bytes32 public seraiKey; + // The nonce which will be used for the smart contracts we deploy, enabling + // predicting their addresses + uint256 private _smartContractNonce; - struct OutInstruction { - address to; - Call[] calls; + // The current public key, defined as per the Schnorr library + bytes32 private _seraiKey; + + enum DestinationType { + Address, + Code + } + struct OutInstruction { + DestinationType destinationType; + bytes destination; + address coin; uint256 value; } @@ -26,70 +34,42 @@ contract Router { bytes32 s; } - event SeraiKeyUpdated( - uint256 indexed nonce, - bytes32 indexed key, - Signature signature - ); - event InInstruction( - address indexed from, - address indexed coin, - uint256 amount, - bytes instruction - ); - // success is a uint256 representing a bitfield of transaction successes - event Executed( - uint256 indexed nonce, - bytes32 indexed batch, - uint256 success, - Signature signature - ); - - // error types - error InvalidKey(); + event SeraiKeyUpdated(uint256 indexed nonce, bytes32 indexed key); + event InInstruction(address indexed from, address indexed coin, uint256 amount, bytes instruction); + event Executed(uint256 indexed nonce, bytes32 indexed batch); + error InvalidSignature(); error InvalidAmount(); error FailedTransfer(); - error TooManyTransactions(); - - modifier _updateSeraiKeyAtEndOfFn( - uint256 _nonce, - bytes32 key, - Signature memory sig - ) { - if ( - (key == bytes32(0)) || - ((bytes32(uint256(key) % Schnorr.Q)) != key) - ) { - revert InvalidKey(); - } + // Update the Serai key at the end of the current function. + modifier _updateSeraiKeyAtEndOfFn(uint256 nonceUpdatedWith, bytes32 newSeraiKey) { + // Run the function itself. _; - seraiKey = key; - emit SeraiKeyUpdated(_nonce, key, sig); + // Update the key. + _seraiKey = newSeraiKey; + emit SeraiKeyUpdated(nonceUpdatedWith, newSeraiKey); } - constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn( - 0, - _seraiKey, - Signature({ c: bytes32(0), s: bytes32(0) }) - ) { - nonce = 1; + constructor(bytes32 initialSeraiKey) _updateSeraiKeyAtEndOfFn(0, initialSeraiKey) { + // We consumed nonce 0 when setting the initial Serai key + _nonce = 1; + // Nonces are incremented by 1 upon account creation, prior to any code execution, per EIP-161 + // This is incompatible with any networks which don't have their nonces start at 0 + _smartContractNonce = 1; } - // updateSeraiKey validates the given Schnorr signature against the current - // public key, and if successful, updates the contract's public key to the - // given one. + // updateSeraiKey validates the given Schnorr signature against the current public key, and if + // successful, updates the contract's public key to the one specified. function updateSeraiKey( - bytes32 _seraiKey, - Signature calldata sig - ) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) { - bytes memory message = - abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey); - nonce++; - - if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { + bytes32 newSeraiKey, + Signature calldata signature + ) external _updateSeraiKeyAtEndOfFn(_nonce, newSeraiKey) { + bytes memory message = abi.encodePacked("updateSeraiKey", block.chainid, _nonce, newSeraiKey); + _nonce++; + + if (!Schnorr.verify(_seraiKey, message, signature.c, signature.s)) { revert InvalidSignature(); } } @@ -114,109 +94,121 @@ contract Router { ) ); - // Require there was nothing returned, which is done by some non-standard - // tokens, or that the ERC20 contract did in fact return true - bool nonStandardResOrTrue = - (res.length == 0) || abi.decode(res, (bool)); + // Require there was nothing returned, which is done by some non-standard tokens, or that the + // ERC20 contract did in fact return true + bool nonStandardResOrTrue = (res.length == 0) || abi.decode(res, (bool)); if (!(success && nonStandardResOrTrue)) { revert FailedTransfer(); } } /* - Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. - The amount instructed to transfer may not actually be the amount - transferred. - - If we add nonReentrant to every single function which can effect the - balance, we can check the amount exactly matches. This prevents transfers of - less value than expected occurring, at least, not without an additional - transfer to top up the difference (which isn't routed through this contract - and accordingly isn't trying to artificially create events). - - If we don't add nonReentrant, a transfer can be started, and then a new - transfer for the difference can follow it up (again and again until a - rounding error is reached). This contract would believe all transfers were - done in full, despite each only being done in part (except for the last - one). - - Given fee-on-transfer tokens aren't intended to be supported, the only - token planned to be supported is Dai and it doesn't have any fee-on-transfer - logic, fee-on-transfer tokens aren't even able to be supported at this time, - we simply classify this entire class of tokens as non-standard - implementations which induce undefined behavior. It is the Serai network's - role not to add support for any non-standard implementations. + Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. The amount + instructed to be transferred may not actually be the amount transferred. + + If we add nonReentrant to every single function which can effect the balance, we can check the + amount exactly matches. This prevents transfers of less value than expected occurring, at + least, not without an additional transfer to top up the difference (which isn't routed through + this contract and accordingly isn't trying to artificially create events from this contract). + + If we don't add nonReentrant, a transfer can be started, and then a new transfer for the + difference can follow it up (again and again until a rounding error is reached). This contract + would believe all transfers were done in full, despite each only being done in part (except + for the last one). + + Given fee-on-transfer tokens aren't intended to be supported, the only token actively planned + to be supported is Dai and it doesn't have any fee-on-transfer logic, and how fee-on-transfer + tokens aren't even able to be supported at this time by the larger Serai network, we simply + classify this entire class of tokens as non-standard implementations which induce undefined + behavior. + + It is the Serai network's role not to add support for any non-standard implementations. */ emit InInstruction(msg.sender, coin, amount, instruction); } - // execute accepts a list of transactions to execute as well as a signature. - // if signature verification passes, the given transactions are executed. - // if signature verification fails, this function will revert. - function execute( - OutInstruction[] calldata transactions, - Signature calldata sig - ) external { - if (transactions.length > 256) { - revert TooManyTransactions(); + // Perform a transfer out + function _transferOut(address to, address coin, uint256 value) private { + /* + We on purposely do not check if these calls succeed. A call either succeeded, and there's no + problem, or the call failed due to: + A) An insolvency + B) A malicious receiver + C) A non-standard token + A is an invariant, B should be dropped, C is something out of the control of this contract. + It is again the Serai's network role to not add support for any non-standard tokens, + */ + if (coin == address(0)) { + // Enough gas to service the transfer and a minimal amount of logic + to.call{ value: value, gas: 5_000 }(""); + } else { + coin.call{ gas: 100_000 }(abi.encodeWithSelector(IERC20.transfer.selector, msg.sender, value)); } + } - bytes memory message = - abi.encode("execute", block.chainid, nonce, transactions); - uint256 executed_with_nonce = nonce; - // This prevents re-entrancy from causing double spends yet does allow - // out-of-order execution via re-entrancy - nonce++; + /* + Serai supports arbitrary calls out via deploying smart contracts (with user-specified code), + letting them execute whatever calls they're coded for. Since we can't meter CREATE, we call + CREATE from this function which we call not internally, but with CALL (which we can meter). + */ + function arbitaryCallOut(bytes memory code) external { + // Because we're creating a contract, increment our nonce + _smartContractNonce += 1; + + address contractAddress; + assembly { + contractAddress := create(0, add(code, 0x20), mload(code)) + } + } - if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { + // Execute a list of transactions if they were signed by the current key with the current nonce + function execute(OutInstruction[] calldata transactions, Signature calldata signature) external { + // Verify the signature + bytes memory message = abi.encode("execute", block.chainid, _nonce, transactions); + if (!Schnorr.verify(_seraiKey, message, signature.c, signature.s)) { revert InvalidSignature(); } - uint256 successes; + // Since the signature was verified, perform execution + emit Executed(_nonce, keccak256(message)); + // While this is sufficient to prevent replays, it's still technically possible for instructions + // from later batches to be executed before these instructions upon re-entrancy + _nonce++; + for (uint256 i = 0; i < transactions.length; i++) { - bool success; - - // If there are no calls, send to `to` the value - if (transactions[i].calls.length == 0) { - (success, ) = transactions[i].to.call{ - value: transactions[i].value, - gas: 5_000 - }(""); + // If the destination is an address, we perform a direct transfer + if (transactions[i].destinationType == DestinationType.Address) { + // This may cause a panic and the contract to become stuck if the destination isn't actually + // 20 bytes. Serai is trusted to not pass a malformed destination + (address destination) = abi.decode(transactions[i].destination, (address)); + _transferOut(destination, transactions[i].coin, transactions[i].value); } else { - // If there are calls, ignore `to`. Deploy a new Sandbox and proxy the - // calls through that - // - // We could use a single sandbox in order to reduce gas costs, yet that - // risks one person creating an approval that's hooked before another - // user's intended action executes, in order to drain their coins - // - // While technically, that would be a flaw in the sandboxed flow, this - // is robust and prevents such flaws from being possible - // - // We also don't want people to set state via the Sandbox and expect it - // future available when anyone else could set a distinct value - Sandbox sandbox = new Sandbox(); - (success, ) = address(sandbox).call{ - value: transactions[i].value, - // TODO: Have the Call specify the gas up front - gas: 350_000 - }( - abi.encodeWithSelector( - Sandbox.sandbox.selector, - transactions[i].calls - ) - ); - } - - assembly { - successes := or(successes, shl(i, success)) + // The destination is a piece of initcode. We calculate the hash of the will-be contract, + // transfer to it, and then run the initcode + address nextAddress = + address(uint160(uint256(keccak256(abi.encode(address(this), _smartContractNonce))))); + + // Perform the transfer + _transferOut(nextAddress, transactions[i].coin, transactions[i].value); + + // Perform the calls with a set gas budget + (uint24 gas, bytes memory code) = abi.decode(transactions[i].destination, (uint24, bytes)); + address(this).call{ + gas: gas + }(abi.encodeWithSelector(Router.arbitaryCallOut.selector, code)); } } - emit Executed( - executed_with_nonce, - keccak256(message), - successes, - sig - ); + } + + function nonce() external view returns (uint256) { + return _nonce; + } + + function smartContractNonce() external view returns (uint256) { + return _smartContractNonce; + } + + function seraiKey() external view returns (bytes32) { + return _seraiKey; } } diff --git a/processor/ethereum/contracts/contracts/Sandbox.sol b/processor/ethereum/contracts/contracts/Sandbox.sol deleted file mode 100644 index a82a3afda..000000000 --- a/processor/ethereum/contracts/contracts/Sandbox.sol +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.24; - -struct Call { - address to; - uint256 value; - bytes data; -} - -// A minimal sandbox focused on gas efficiency. -// -// The first call is executed if any of the calls fail, making it a fallback. -// All other calls are executed sequentially. -contract Sandbox { - error AlreadyCalled(); - error CallsFailed(); - - function sandbox(Call[] calldata calls) external payable { - // Prevent re-entrancy due to this executing arbitrary calls from anyone - // and anywhere - bool called; - assembly { called := tload(0) } - if (called) { - revert AlreadyCalled(); - } - assembly { tstore(0, 1) } - - // Execute the calls, starting from 1 - for (uint256 i = 1; i < calls.length; i++) { - (bool success, ) = - calls[i].to.call{ value: calls[i].value }(calls[i].data); - - // If this call failed, execute the fallback (call 0) - if (!success) { - (success, ) = - calls[0].to.call{ value: address(this).balance }(calls[0].data); - // If this call also failed, revert entirely - if (!success) { - revert CallsFailed(); - } - return; - } - } - - // We don't clear the re-entrancy guard as this contract should never be - // called again, so there's no reason to spend the effort - } -} diff --git a/processor/ethereum/contracts/contracts/tests/ERC20.sol b/processor/ethereum/contracts/contracts/tests/ERC20.sol index e157974c7..f38bfea40 100644 --- a/processor/ethereum/contracts/contracts/tests/ERC20.sol +++ b/processor/ethereum/contracts/contracts/tests/ERC20.sol @@ -1,5 +1,5 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; contract TestERC20 { event Transfer(address indexed from, address indexed to, uint256 value); diff --git a/processor/ethereum/contracts/src/lib.rs b/processor/ethereum/contracts/src/lib.rs index fef10288e..d8de29b38 100644 --- a/processor/ethereum/contracts/src/lib.rs +++ b/processor/ethereum/contracts/src/lib.rs @@ -44,5 +44,3 @@ pub mod router { pub const BYTECODE: &str = include_str!("../artifacts/Router.bin"); pub use super::router_container::Router::*; } - -pub mod tests; diff --git a/processor/ethereum/contracts/src/tests.rs b/processor/ethereum/contracts/src/tests.rs deleted file mode 100644 index 9f141c291..000000000 --- a/processor/ethereum/contracts/src/tests.rs +++ /dev/null @@ -1,13 +0,0 @@ -use alloy_sol_types::sol; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod schnorr_container { - use super::*; - sol!("contracts/tests/Schnorr.sol"); -} -pub use schnorr_container::TestSchnorr as schnorr; From ce1829496719e68349bcedadd12bc29a49021087 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 15 Sep 2024 12:04:57 -0400 Subject: [PATCH 138/179] Remove artifacts for serai-processor-ethereum-contracts --- Cargo.lock | 19 +- networks/ethereum/build-contracts/src/lib.rs | 7 + networks/ethereum/schnorr/build.rs | 7 +- processor/ethereum/contracts/.gitignore | 1 - processor/ethereum/contracts/Cargo.toml | 9 + processor/ethereum/contracts/build.rs | 63 +- .../ethereum/contracts/src/abigen/deployer.rs | 584 ++++ .../ethereum/contracts/src/abigen/erc20.rs | 1838 ++++++++++ .../ethereum/contracts/src/abigen/mod.rs | 3 + .../ethereum/contracts/src/abigen/router.rs | 2958 +++++++++++++++++ processor/ethereum/contracts/src/lib.rs | 43 +- processor/ethereum/ethereum-serai/Cargo.toml | 1 + .../ethereum/ethereum-serai/src/crypto.rs | 10 +- .../ethereum/ethereum-serai/src/machine.rs | 8 +- .../ethereum/ethereum-serai/src/router.rs | 11 +- 15 files changed, 5497 insertions(+), 65 deletions(-) delete mode 100644 processor/ethereum/contracts/.gitignore create mode 100644 processor/ethereum/contracts/src/abigen/deployer.rs create mode 100644 processor/ethereum/contracts/src/abigen/erc20.rs create mode 100644 processor/ethereum/contracts/src/abigen/mod.rs create mode 100644 processor/ethereum/contracts/src/abigen/router.rs diff --git a/Cargo.lock b/Cargo.lock index 1338ae260..d62240930 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2516,6 +2516,7 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-simple-request-transport", "alloy-sol-types", + "ethereum-schnorr-contract", "flexible-transcript", "group", "k256", @@ -6126,6 +6127,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "prettyplease" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +dependencies = [ + "proc-macro2", + "syn 2.0.77", +] + [[package]] name = "primeorder" version = "0.13.6" @@ -6291,7 +6302,7 @@ dependencies = [ "log", "multimap", "petgraph", - "prettyplease", + "prettyplease 0.1.25", "prost", "prost-types", "regex", @@ -8700,8 +8711,14 @@ dependencies = [ name = "serai-processor-ethereum-contracts" version = "0.1.0" dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", "alloy-sol-types", "build-solidity-contracts", + "prettyplease 0.2.22", + "serde_json", + "syn 2.0.77", + "syn-solidity", ] [[package]] diff --git a/networks/ethereum/build-contracts/src/lib.rs b/networks/ethereum/build-contracts/src/lib.rs index 5213059e2..b1c9c87f2 100644 --- a/networks/ethereum/build-contracts/src/lib.rs +++ b/networks/ethereum/build-contracts/src/lib.rs @@ -12,6 +12,13 @@ pub fn build( contracts_path: &str, artifacts_path: &str, ) -> Result<(), String> { + if !fs::exists(artifacts_path) + .map_err(|e| format!("couldn't check if artifacts directory already exists: {e:?}"))? + { + fs::create_dir(artifacts_path) + .map_err(|e| format!("couldn't create the non-existent artifacts directory: {e:?}"))?; + } + println!("cargo:rerun-if-changed={contracts_path}/*"); println!("cargo:rerun-if-changed={artifacts_path}/*"); diff --git a/networks/ethereum/schnorr/build.rs b/networks/ethereum/schnorr/build.rs index 7b7c30fd2..cf12f948c 100644 --- a/networks/ethereum/schnorr/build.rs +++ b/networks/ethereum/schnorr/build.rs @@ -1,9 +1,4 @@ -use std::{env, fs}; - fn main() { - let artifacts_path = env::var("OUT_DIR").unwrap().to_string() + "/ethereum-schnorr-contract"; - if !fs::exists(&artifacts_path).unwrap() { - fs::create_dir(&artifacts_path).unwrap(); - } + let artifacts_path = std::env::var("OUT_DIR").unwrap().to_string() + "/ethereum-schnorr-contract"; build_solidity_contracts::build(&[], "contracts", &artifacts_path).unwrap(); } diff --git a/processor/ethereum/contracts/.gitignore b/processor/ethereum/contracts/.gitignore deleted file mode 100644 index de153db3b..000000000 --- a/processor/ethereum/contracts/.gitignore +++ /dev/null @@ -1 +0,0 @@ -artifacts diff --git a/processor/ethereum/contracts/Cargo.toml b/processor/ethereum/contracts/Cargo.toml index 64fbccad6..5ed540b66 100644 --- a/processor/ethereum/contracts/Cargo.toml +++ b/processor/ethereum/contracts/Cargo.toml @@ -21,3 +21,12 @@ alloy-sol-types = { version = "0.8", default-features = false, features = ["json [build-dependencies] build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts" } + +syn = { version = "2", default-features = false, features = ["proc-macro"] } + +serde_json = { version = "1", default-features = false, features = ["std"] } + +syn-solidity = { version = "0.8", default-features = false } +alloy-sol-macro-input = { version = "0.8", default-features = false } +alloy-sol-macro-expander = { version = "0.8", default-features = false } +prettyplease = { version = "0.2", default-features = false } diff --git a/processor/ethereum/contracts/build.rs b/processor/ethereum/contracts/build.rs index 0af416084..23d1e9072 100644 --- a/processor/ethereum/contracts/build.rs +++ b/processor/ethereum/contracts/build.rs @@ -1,8 +1,69 @@ +use std::{env, fs}; + +use alloy_sol_macro_input::{SolInputKind, SolInput}; + +fn write(sol: syn_solidity::File, file: &str) { + let sol = alloy_sol_macro_expander::expand::expand(sol).unwrap(); + fs::write( + file, + // TODO: Replace `prettyplease::unparse` with `to_string` + prettyplease::unparse(&syn::File { + attrs: vec![], + items: vec![syn::parse2(sol).unwrap()], + shebang: None, + }) + .as_bytes(), + ) + .unwrap(); +} + +fn sol(sol: &str, file: &str) { + let alloy_sol_macro_input::SolInputKind::Sol(sol) = + syn::parse_str(&std::fs::read_to_string(sol).unwrap()).unwrap() + else { + panic!("parsed .sol file wasn't SolInputKind::Sol"); + }; + write(sol, file); +} + +fn abi(ident: &str, abi: &str, file: &str) { + let SolInputKind::Sol(sol) = (SolInput { + attrs: vec![], + path: None, + kind: SolInputKind::Json( + syn::parse_str(ident).unwrap(), + serde_json::from_str(&fs::read_to_string(abi).unwrap()).unwrap(), + ), + }) + .normalize_json() + .unwrap() + .kind + else { + panic!("normalized JSON wasn't SolInputKind::Sol"); + }; + write(sol, file); +} + fn main() { + let artifacts_path = + env::var("OUT_DIR").unwrap().to_string() + "/serai-processor-ethereum-contracts"; build_solidity_contracts::build( &["../../../networks/ethereum/schnorr/contracts"], "contracts", - "artifacts", + &artifacts_path, ) .unwrap(); + + // TODO: Use OUT_DIR for the generated code + if !fs::exists("src/abigen").unwrap() { + fs::create_dir("src/abigen").unwrap(); + } + + // These can be handled with the sol! macro + sol("contracts/IERC20.sol", "src/abigen/erc20.rs"); + sol("contracts/Deployer.sol", "src/abigen/deployer.rs"); + // This cannot be handled with the sol! macro. The Solidity requires an import, the ABI is built + // to OUT_DIR and the macro doesn't support non-static paths: + // https://github.com/alloy-rs/core/issues/738 + abi("Router", &(artifacts_path.clone() + "/Router.abi"), "src/abigen/router.rs"); } diff --git a/processor/ethereum/contracts/src/abigen/deployer.rs b/processor/ethereum/contracts/src/abigen/deployer.rs new file mode 100644 index 000000000..f4bcb3a66 --- /dev/null +++ b/processor/ethereum/contracts/src/abigen/deployer.rs @@ -0,0 +1,584 @@ +///Module containing a contract's types and functions. +/** + +```solidity +contract Deployer { + event Deployment(bytes32 indexed init_code_hash, address created); + error DeploymentFailed(); + function deploy(bytes memory init_code) external { } +} +```*/ +#[allow(non_camel_case_types, non_snake_case, clippy::style)] +pub mod Deployer { + use super::*; + use ::alloy_sol_types as alloy_sol_types; + /**Event with signature `Deployment(bytes32,address)` and selector `0x60b877a3bae7bf0f0bd5e1c40ebf44ea158201397f6b72d7c05360157b1ec0fc`. +```solidity +event Deployment(bytes32 indexed init_code_hash, address created); +```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + #[derive(Clone)] + pub struct Deployment { + #[allow(missing_docs)] + pub init_code_hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub created: ::alloy_sol_types::private::Address, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for Deployment { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + const SIGNATURE: &'static str = "Deployment(bytes32,address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ + 96u8, + 184u8, + 119u8, + 163u8, + 186u8, + 231u8, + 191u8, + 15u8, + 11u8, + 213u8, + 225u8, + 196u8, + 14u8, + 191u8, + 68u8, + 234u8, + 21u8, + 130u8, + 1u8, + 57u8, + 127u8, + 107u8, + 114u8, + 215u8, + 192u8, + 83u8, + 96u8, + 21u8, + 123u8, + 30u8, + 192u8, + 252u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + init_code_hash: topics.1, + created: data.0, + } + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.created, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.init_code_hash.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken( + Self::SIGNATURE_HASH, + ); + out[1usize] = <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic(&self.init_code_hash); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for Deployment { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&Deployment> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &Deployment) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Custom error with signature `DeploymentFailed()` and selector `0x30116425`. +```solidity +error DeploymentFailed(); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct DeploymentFailed {} + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: DeploymentFailed) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for DeploymentFailed { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + #[automatically_derived] + impl alloy_sol_types::SolError for DeploymentFailed { + type Parameters<'a> = UnderlyingSolTuple<'a>; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "DeploymentFailed()"; + const SELECTOR: [u8; 4] = [48u8, 17u8, 100u8, 37u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + } + }; + /**Function with signature `deploy(bytes)` and selector `0x00774360`. +```solidity +function deploy(bytes memory init_code) external { } +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct deployCall { + pub init_code: ::alloy_sol_types::private::Bytes, + } + ///Container type for the return parameters of the [`deploy(bytes)`](deployCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct deployReturn {} + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deployCall) -> Self { + (value.init_code,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deployCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { init_code: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deployReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deployReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for deployCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Bytes,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = deployReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "deploy(bytes)"; + const SELECTOR: [u8; 4] = [0u8, 119u8, 67u8, 96u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.init_code, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + ///Container for all the [`Deployer`](self) function calls. + pub enum DeployerCalls { + deploy(deployCall), + } + #[automatically_derived] + impl DeployerCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[[0u8, 119u8, 67u8, 96u8]]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for DeployerCalls { + const NAME: &'static str = "DeployerCalls"; + const MIN_DATA_LENGTH: usize = 64usize; + const COUNT: usize = 1usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::deploy(_) => ::SELECTOR, + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(unsafe_code, non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) -> alloy_sol_types::Result] = &[ + { + fn deploy( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(DeployerCalls::deploy) + } + deploy + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err( + alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + ), + ); + }; + (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::deploy(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::deploy(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`Deployer`](self) custom errors. + pub enum DeployerErrors { + DeploymentFailed(DeploymentFailed), + } + #[automatically_derived] + impl DeployerErrors { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[[48u8, 17u8, 100u8, 37u8]]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for DeployerErrors { + const NAME: &'static str = "DeployerErrors"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 1usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::DeploymentFailed(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(unsafe_code, non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) -> alloy_sol_types::Result] = &[ + { + fn DeploymentFailed( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(DeployerErrors::DeploymentFailed) + } + DeploymentFailed + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err( + alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + ), + ); + }; + (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::DeploymentFailed(inner) => { + ::abi_encoded_size( + inner, + ) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::DeploymentFailed(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + } + } + } + ///Container for all the [`Deployer`](self) events. + pub enum DeployerEvents { + Deployment(Deployment), + } + #[automatically_derived] + impl DeployerEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 96u8, + 184u8, + 119u8, + 163u8, + 186u8, + 231u8, + 191u8, + 15u8, + 11u8, + 213u8, + 225u8, + 196u8, + 14u8, + 191u8, + 68u8, + 234u8, + 21u8, + 130u8, + 1u8, + 57u8, + 127u8, + 107u8, + 114u8, + 215u8, + 192u8, + 83u8, + 96u8, + 21u8, + 123u8, + 30u8, + 192u8, + 252u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for DeployerEvents { + const NAME: &'static str = "DeployerEvents"; + const COUNT: usize = 1usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, + data, + validate, + ) + .map(Self::Deployment) + } + _ => { + alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }) + } + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for DeployerEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::Deployment(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::Deployment(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/processor/ethereum/contracts/src/abigen/erc20.rs b/processor/ethereum/contracts/src/abigen/erc20.rs new file mode 100644 index 000000000..d9c0dd6e6 --- /dev/null +++ b/processor/ethereum/contracts/src/abigen/erc20.rs @@ -0,0 +1,1838 @@ +///Module containing a contract's types and functions. +/** + +```solidity +interface IERC20 { + event Transfer(address indexed from, address indexed to, uint256 value); + event Approval(address indexed owner, address indexed spender, uint256 value); + function name() external view returns (string memory); + function symbol() external view returns (string memory); + function decimals() external view returns (uint8); + function totalSupply() external view returns (uint256); + function balanceOf(address owner) external view returns (uint256); + function transfer(address to, uint256 value) external returns (bool); + function transferFrom(address from, address to, uint256 value) external returns (bool); + function approve(address spender, uint256 value) external returns (bool); + function allowance(address owner, address spender) external view returns (uint256); +} +```*/ +#[allow(non_camel_case_types, non_snake_case, clippy::style)] +pub mod IERC20 { + use super::*; + use ::alloy_sol_types as alloy_sol_types; + /**Event with signature `Transfer(address,address,uint256)` and selector `0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef`. +```solidity +event Transfer(address indexed from, address indexed to, uint256 value); +```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + #[derive(Clone)] + pub struct Transfer { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub value: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for Transfer { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "Transfer(address,address,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ + 221u8, + 242u8, + 82u8, + 173u8, + 27u8, + 226u8, + 200u8, + 155u8, + 105u8, + 194u8, + 176u8, + 104u8, + 252u8, + 55u8, + 141u8, + 170u8, + 149u8, + 43u8, + 167u8, + 241u8, + 99u8, + 196u8, + 161u8, + 22u8, + 40u8, + 245u8, + 90u8, + 77u8, + 245u8, + 35u8, + 179u8, + 239u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + from: topics.1, + to: topics.2, + value: data.0, + } + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.value), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.from.clone(), self.to.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken( + Self::SIGNATURE_HASH, + ); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.from, + ); + out[2usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.to, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for Transfer { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&Transfer> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &Transfer) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `Approval(address,address,uint256)` and selector `0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925`. +```solidity +event Approval(address indexed owner, address indexed spender, uint256 value); +```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + #[derive(Clone)] + pub struct Approval { + #[allow(missing_docs)] + pub owner: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub spender: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub value: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for Approval { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "Approval(address,address,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ + 140u8, + 91u8, + 225u8, + 229u8, + 235u8, + 236u8, + 125u8, + 91u8, + 209u8, + 79u8, + 113u8, + 66u8, + 125u8, + 30u8, + 132u8, + 243u8, + 221u8, + 3u8, + 20u8, + 192u8, + 247u8, + 178u8, + 41u8, + 30u8, + 91u8, + 32u8, + 10u8, + 200u8, + 199u8, + 195u8, + 185u8, + 37u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + owner: topics.1, + spender: topics.2, + value: data.0, + } + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.value), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.owner.clone(), self.spender.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken( + Self::SIGNATURE_HASH, + ); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.owner, + ); + out[2usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.spender, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for Approval { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&Approval> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &Approval) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `name()` and selector `0x06fdde03`. +```solidity +function name() external view returns (string memory); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct nameCall {} + ///Container type for the return parameters of the [`name()`](nameCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct nameReturn { + pub _0: ::alloy_sol_types::private::String, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: nameCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for nameCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: nameReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for nameReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for nameCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = nameReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::String,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "name()"; + const SELECTOR: [u8; 4] = [6u8, 253u8, 222u8, 3u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `symbol()` and selector `0x95d89b41`. +```solidity +function symbol() external view returns (string memory); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct symbolCall {} + ///Container type for the return parameters of the [`symbol()`](symbolCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct symbolReturn { + pub _0: ::alloy_sol_types::private::String, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: symbolCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for symbolCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: symbolReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for symbolReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for symbolCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = symbolReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::String,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "symbol()"; + const SELECTOR: [u8; 4] = [149u8, 216u8, 155u8, 65u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `decimals()` and selector `0x313ce567`. +```solidity +function decimals() external view returns (uint8); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct decimalsCall {} + ///Container type for the return parameters of the [`decimals()`](decimalsCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct decimalsReturn { + pub _0: u8, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: decimalsCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for decimalsCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<8>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u8,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: decimalsReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for decimalsReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for decimalsCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = decimalsReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<8>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "decimals()"; + const SELECTOR: [u8; 4] = [49u8, 60u8, 229u8, 103u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `totalSupply()` and selector `0x18160ddd`. +```solidity +function totalSupply() external view returns (uint256); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct totalSupplyCall {} + ///Container type for the return parameters of the [`totalSupply()`](totalSupplyCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct totalSupplyReturn { + pub _0: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: totalSupplyCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for totalSupplyCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: totalSupplyReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for totalSupplyReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for totalSupplyCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = totalSupplyReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "totalSupply()"; + const SELECTOR: [u8; 4] = [24u8, 22u8, 13u8, 221u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `balanceOf(address)` and selector `0x70a08231`. +```solidity +function balanceOf(address owner) external view returns (uint256); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct balanceOfCall { + pub owner: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`balanceOf(address)`](balanceOfCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct balanceOfReturn { + pub _0: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: balanceOfCall) -> Self { + (value.owner,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for balanceOfCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { owner: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: balanceOfReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for balanceOfReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for balanceOfCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = balanceOfReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "balanceOf(address)"; + const SELECTOR: [u8; 4] = [112u8, 160u8, 130u8, 49u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.owner, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `transfer(address,uint256)` and selector `0xa9059cbb`. +```solidity +function transfer(address to, uint256 value) external returns (bool); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct transferCall { + pub to: ::alloy_sol_types::private::Address, + pub value: ::alloy_sol_types::private::primitives::aliases::U256, + } + ///Container type for the return parameters of the [`transfer(address,uint256)`](transferCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct transferReturn { + pub _0: bool, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: transferCall) -> Self { + (value.to, value.value) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for transferCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + to: tuple.0, + value: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bool,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (bool,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: transferReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for transferReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for transferCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = transferReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Bool,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "transfer(address,uint256)"; + const SELECTOR: [u8; 4] = [169u8, 5u8, 156u8, 187u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.value), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `transferFrom(address,address,uint256)` and selector `0x23b872dd`. +```solidity +function transferFrom(address from, address to, uint256 value) external returns (bool); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct transferFromCall { + pub from: ::alloy_sol_types::private::Address, + pub to: ::alloy_sol_types::private::Address, + pub value: ::alloy_sol_types::private::primitives::aliases::U256, + } + ///Container type for the return parameters of the [`transferFrom(address,address,uint256)`](transferFromCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct transferFromReturn { + pub _0: bool, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: transferFromCall) -> Self { + (value.from, value.to, value.value) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for transferFromCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + from: tuple.0, + to: tuple.1, + value: tuple.2, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bool,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (bool,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: transferFromReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for transferFromReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for transferFromCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = transferFromReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Bool,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "transferFrom(address,address,uint256)"; + const SELECTOR: [u8; 4] = [35u8, 184u8, 114u8, 221u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.from, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.value), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `approve(address,uint256)` and selector `0x095ea7b3`. +```solidity +function approve(address spender, uint256 value) external returns (bool); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct approveCall { + pub spender: ::alloy_sol_types::private::Address, + pub value: ::alloy_sol_types::private::primitives::aliases::U256, + } + ///Container type for the return parameters of the [`approve(address,uint256)`](approveCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct approveReturn { + pub _0: bool, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCall) -> Self { + (value.spender, value.value) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + spender: tuple.0, + value: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bool,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (bool,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for approveCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = approveReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Bool,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "approve(address,uint256)"; + const SELECTOR: [u8; 4] = [9u8, 94u8, 167u8, 179u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.spender, + ), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.value), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `allowance(address,address)` and selector `0xdd62ed3e`. +```solidity +function allowance(address owner, address spender) external view returns (uint256); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct allowanceCall { + pub owner: ::alloy_sol_types::private::Address, + pub spender: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`allowance(address,address)`](allowanceCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct allowanceReturn { + pub _0: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Address, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: allowanceCall) -> Self { + (value.owner, value.spender) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for allowanceCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + owner: tuple.0, + spender: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: allowanceReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for allowanceReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for allowanceCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = allowanceReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "allowance(address,address)"; + const SELECTOR: [u8; 4] = [221u8, 98u8, 237u8, 62u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.owner, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.spender, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + ///Container for all the [`IERC20`](self) function calls. + pub enum IERC20Calls { + name(nameCall), + symbol(symbolCall), + decimals(decimalsCall), + totalSupply(totalSupplyCall), + balanceOf(balanceOfCall), + transfer(transferCall), + transferFrom(transferFromCall), + approve(approveCall), + allowance(allowanceCall), + } + #[automatically_derived] + impl IERC20Calls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [6u8, 253u8, 222u8, 3u8], + [9u8, 94u8, 167u8, 179u8], + [24u8, 22u8, 13u8, 221u8], + [35u8, 184u8, 114u8, 221u8], + [49u8, 60u8, 229u8, 103u8], + [112u8, 160u8, 130u8, 49u8], + [149u8, 216u8, 155u8, 65u8], + [169u8, 5u8, 156u8, 187u8], + [221u8, 98u8, 237u8, 62u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for IERC20Calls { + const NAME: &'static str = "IERC20Calls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 9usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::name(_) => ::SELECTOR, + Self::symbol(_) => ::SELECTOR, + Self::decimals(_) => ::SELECTOR, + Self::totalSupply(_) => { + ::SELECTOR + } + Self::balanceOf(_) => { + ::SELECTOR + } + Self::transfer(_) => ::SELECTOR, + Self::transferFrom(_) => { + ::SELECTOR + } + Self::approve(_) => ::SELECTOR, + Self::allowance(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(unsafe_code, non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) -> alloy_sol_types::Result] = &[ + { + fn name( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(IERC20Calls::name) + } + name + }, + { + fn approve( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(IERC20Calls::approve) + } + approve + }, + { + fn totalSupply( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(IERC20Calls::totalSupply) + } + totalSupply + }, + { + fn transferFrom( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(IERC20Calls::transferFrom) + } + transferFrom + }, + { + fn decimals( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(IERC20Calls::decimals) + } + decimals + }, + { + fn balanceOf( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(IERC20Calls::balanceOf) + } + balanceOf + }, + { + fn symbol( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(IERC20Calls::symbol) + } + symbol + }, + { + fn transfer( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(IERC20Calls::transfer) + } + transfer + }, + { + fn allowance( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(IERC20Calls::allowance) + } + allowance + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err( + alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + ), + ); + }; + (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::name(inner) => { + ::abi_encoded_size(inner) + } + Self::symbol(inner) => { + ::abi_encoded_size(inner) + } + Self::decimals(inner) => { + ::abi_encoded_size(inner) + } + Self::totalSupply(inner) => { + ::abi_encoded_size( + inner, + ) + } + Self::balanceOf(inner) => { + ::abi_encoded_size(inner) + } + Self::transfer(inner) => { + ::abi_encoded_size(inner) + } + Self::transferFrom(inner) => { + ::abi_encoded_size( + inner, + ) + } + Self::approve(inner) => { + ::abi_encoded_size(inner) + } + Self::allowance(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::name(inner) => { + ::abi_encode_raw(inner, out) + } + Self::symbol(inner) => { + ::abi_encode_raw(inner, out) + } + Self::decimals(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::totalSupply(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::balanceOf(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::transfer(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::transferFrom(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::approve(inner) => { + ::abi_encode_raw(inner, out) + } + Self::allowance(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + } + } + } + ///Container for all the [`IERC20`](self) events. + pub enum IERC20Events { + Transfer(Transfer), + Approval(Approval), + } + #[automatically_derived] + impl IERC20Events { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 140u8, + 91u8, + 225u8, + 229u8, + 235u8, + 236u8, + 125u8, + 91u8, + 209u8, + 79u8, + 113u8, + 66u8, + 125u8, + 30u8, + 132u8, + 243u8, + 221u8, + 3u8, + 20u8, + 192u8, + 247u8, + 178u8, + 41u8, + 30u8, + 91u8, + 32u8, + 10u8, + 200u8, + 199u8, + 195u8, + 185u8, + 37u8, + ], + [ + 221u8, + 242u8, + 82u8, + 173u8, + 27u8, + 226u8, + 200u8, + 155u8, + 105u8, + 194u8, + 176u8, + 104u8, + 252u8, + 55u8, + 141u8, + 170u8, + 149u8, + 43u8, + 167u8, + 241u8, + 99u8, + 196u8, + 161u8, + 22u8, + 40u8, + 245u8, + 90u8, + 77u8, + 245u8, + 35u8, + 179u8, + 239u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IERC20Events { + const NAME: &'static str = "IERC20Events"; + const COUNT: usize = 2usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, + data, + validate, + ) + .map(Self::Transfer) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, + data, + validate, + ) + .map(Self::Approval) + } + _ => { + alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }) + } + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IERC20Events { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::Transfer(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::Approval(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::Transfer(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::Approval(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/processor/ethereum/contracts/src/abigen/mod.rs b/processor/ethereum/contracts/src/abigen/mod.rs new file mode 100644 index 000000000..541c2980d --- /dev/null +++ b/processor/ethereum/contracts/src/abigen/mod.rs @@ -0,0 +1,3 @@ +pub mod erc20; +pub mod deployer; +pub mod router; diff --git a/processor/ethereum/contracts/src/abigen/router.rs b/processor/ethereum/contracts/src/abigen/router.rs new file mode 100644 index 000000000..cea1858f1 --- /dev/null +++ b/processor/ethereum/contracts/src/abigen/router.rs @@ -0,0 +1,2958 @@ +/** + +Generated by the following Solidity interface... +```solidity +interface Router { + type DestinationType is uint8; + struct OutInstruction { + DestinationType destinationType; + bytes destination; + address coin; + uint256 value; + } + struct Signature { + bytes32 c; + bytes32 s; + } + + error FailedTransfer(); + error InvalidAmount(); + error InvalidSignature(); + + event Executed(uint256 indexed nonce, bytes32 indexed batch); + event InInstruction(address indexed from, address indexed coin, uint256 amount, bytes instruction); + event SeraiKeyUpdated(uint256 indexed nonce, bytes32 indexed key); + + constructor(bytes32 initialSeraiKey); + + function arbitaryCallOut(bytes memory code) external; + function execute(OutInstruction[] memory transactions, Signature memory signature) external; + function inInstruction(address coin, uint256 amount, bytes memory instruction) external payable; + function nonce() external view returns (uint256); + function seraiKey() external view returns (bytes32); + function smartContractNonce() external view returns (uint256); + function updateSeraiKey(bytes32 newSeraiKey, Signature memory signature) external; +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "constructor", + "inputs": [ + { + "name": "initialSeraiKey", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "arbitaryCallOut", + "inputs": [ + { + "name": "code", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "execute", + "inputs": [ + { + "name": "transactions", + "type": "tuple[]", + "internalType": "struct Router.OutInstruction[]", + "components": [ + { + "name": "destinationType", + "type": "uint8", + "internalType": "enum Router.DestinationType" + }, + { + "name": "destination", + "type": "bytes", + "internalType": "bytes" + }, + { + "name": "coin", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + } + ] + }, + { + "name": "signature", + "type": "tuple", + "internalType": "struct Router.Signature", + "components": [ + { + "name": "c", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "s", + "type": "bytes32", + "internalType": "bytes32" + } + ] + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "inInstruction", + "inputs": [ + { + "name": "coin", + "type": "address", + "internalType": "address" + }, + { + "name": "amount", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "instruction", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "payable" + }, + { + "type": "function", + "name": "nonce", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint256", + "internalType": "uint256" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "seraiKey", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "smartContractNonce", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint256", + "internalType": "uint256" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "updateSeraiKey", + "inputs": [ + { + "name": "newSeraiKey", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "signature", + "type": "tuple", + "internalType": "struct Router.Signature", + "components": [ + { + "name": "c", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "s", + "type": "bytes32", + "internalType": "bytes32" + } + ] + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "Executed", + "inputs": [ + { + "name": "nonce", + "type": "uint256", + "indexed": true, + "internalType": "uint256" + }, + { + "name": "batch", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "InInstruction", + "inputs": [ + { + "name": "from", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "coin", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "amount", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "instruction", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "SeraiKeyUpdated", + "inputs": [ + { + "name": "nonce", + "type": "uint256", + "indexed": true, + "internalType": "uint256" + }, + { + "name": "key", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "error", + "name": "FailedTransfer", + "inputs": [] + }, + { + "type": "error", + "name": "InvalidAmount", + "inputs": [] + }, + { + "type": "error", + "name": "InvalidSignature", + "inputs": [] + } +] +```*/ +#[allow(non_camel_case_types, non_snake_case, clippy::style)] +pub mod Router { + use super::*; + use ::alloy_sol_types as alloy_sol_types; + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct DestinationType(u8); + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for u8 { + #[inline] + fn stv_to_tokens( + &self, + ) -> <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::Token<'_> { + alloy_sol_types::private::SolTypeValue::< + ::alloy_sol_types::sol_data::Uint<8>, + >::stv_to_tokens(self) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::tokenize(self) + .0 + } + #[inline] + fn stv_abi_encode_packed_to( + &self, + out: &mut alloy_sol_types::private::Vec, + ) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::abi_encode_packed_to(self, out) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::abi_encoded_size(self) + } + } + #[automatically_derived] + impl DestinationType { + /// The Solidity type name. + pub const NAME: &'static str = stringify!(@ name); + /// Convert from the underlying value type. + #[inline] + pub const fn from(value: u8) -> Self { + Self(value) + } + /// Return the underlying value. + #[inline] + pub const fn into(self) -> u8 { + self.0 + } + /// Return the single encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode(&self) -> alloy_sol_types::private::Vec { + ::abi_encode(&self.0) + } + /// Return the packed encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode_packed(&self) -> alloy_sol_types::private::Vec { + ::abi_encode_packed(&self.0) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for DestinationType { + type RustType = u8; + type Token<'a> = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = Self::NAME; + const ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + Self::type_check(token).is_ok() + } + #[inline] + fn type_check(token: &Self::Token<'_>) -> alloy_sol_types::Result<()> { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::type_check(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::detokenize(token) + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for DestinationType { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::topic_preimage_length(rust) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic_preimage(rust, out) + } + #[inline] + fn encode_topic( + rust: &Self::RustType, + ) -> alloy_sol_types::abi::token::WordToken { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic(rust) + } + } + }; + /**```solidity +struct OutInstruction { DestinationType destinationType; bytes destination; address coin; uint256 value; } +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct OutInstruction { + pub destinationType: ::RustType, + pub destination: ::alloy_sol_types::private::Bytes, + pub coin: ::alloy_sol_types::private::Address, + pub value: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + DestinationType, + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::RustType, + ::alloy_sol_types::private::Bytes, + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: OutInstruction) -> Self { + (value.destinationType, value.destination, value.coin, value.value) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for OutInstruction { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + destinationType: tuple.0, + destination: tuple.1, + coin: tuple.2, + value: tuple.3, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for OutInstruction { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for OutInstruction { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + ::tokenize( + &self.destinationType, + ), + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.destination, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.coin, + ), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.value), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to( + &self, + out: &mut alloy_sol_types::private::Vec, + ) { + let tuple = as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to(&tuple, out) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size(&tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for OutInstruction { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for OutInstruction { + const NAME: &'static str = "OutInstruction"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "OutInstruction(uint8 destinationType,bytes destination,address coin,uint256 value)", + ) + } + #[inline] + fn eip712_components() -> alloy_sol_types::private::Vec< + alloy_sol_types::private::Cow<'static, str>, + > { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + ::eip712_data_word( + &self.destinationType, + ) + .0, + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::eip712_data_word( + &self.destination, + ) + .0, + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::eip712_data_word( + &self.coin, + ) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.value) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for OutInstruction { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + ::topic_preimage_length( + &rust.destinationType, + ) + + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.destination, + ) + + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.coin, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.value) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve( + ::topic_preimage_length(rust), + ); + ::encode_topic_preimage( + &rust.destinationType, + out, + ); + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.destination, + out, + ); + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.coin, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.value, + out, + ); + } + #[inline] + fn encode_topic( + rust: &Self::RustType, + ) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage( + rust, + &mut out, + ); + alloy_sol_types::abi::token::WordToken( + alloy_sol_types::private::keccak256(out), + ) + } + } + }; + /**```solidity +struct Signature { bytes32 c; bytes32 s; } +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct Signature { + pub c: ::alloy_sol_types::private::FixedBytes<32>, + pub s: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Signature) -> Self { + (value.c, value.s) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Signature { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { c: tuple.0, s: tuple.1 } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Signature { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Signature { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.c), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.s), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to( + &self, + out: &mut alloy_sol_types::private::Vec, + ) { + let tuple = as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to(&tuple, out) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size(&tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Signature { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Signature { + const NAME: &'static str = "Signature"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed("Signature(bytes32 c,bytes32 s)") + } + #[inline] + fn eip712_components() -> alloy_sol_types::private::Vec< + alloy_sol_types::private::Cow<'static, str>, + > { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.c) + .0, + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.s) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Signature { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.c) + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.s) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve( + ::topic_preimage_length(rust), + ); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage(&rust.c, out); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage(&rust.s, out); + } + #[inline] + fn encode_topic( + rust: &Self::RustType, + ) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage( + rust, + &mut out, + ); + alloy_sol_types::abi::token::WordToken( + alloy_sol_types::private::keccak256(out), + ) + } + } + }; + /**Custom error with signature `FailedTransfer()` and selector `0xbfa871c5`. +```solidity +error FailedTransfer(); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct FailedTransfer {} + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: FailedTransfer) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for FailedTransfer { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + #[automatically_derived] + impl alloy_sol_types::SolError for FailedTransfer { + type Parameters<'a> = UnderlyingSolTuple<'a>; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "FailedTransfer()"; + const SELECTOR: [u8; 4] = [191u8, 168u8, 113u8, 197u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + } + }; + /**Custom error with signature `InvalidAmount()` and selector `0x2c5211c6`. +```solidity +error InvalidAmount(); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct InvalidAmount {} + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: InvalidAmount) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for InvalidAmount { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + #[automatically_derived] + impl alloy_sol_types::SolError for InvalidAmount { + type Parameters<'a> = UnderlyingSolTuple<'a>; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "InvalidAmount()"; + const SELECTOR: [u8; 4] = [44u8, 82u8, 17u8, 198u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + } + }; + /**Custom error with signature `InvalidSignature()` and selector `0x8baa579f`. +```solidity +error InvalidSignature(); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct InvalidSignature {} + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: InvalidSignature) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for InvalidSignature { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + #[automatically_derived] + impl alloy_sol_types::SolError for InvalidSignature { + type Parameters<'a> = UnderlyingSolTuple<'a>; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "InvalidSignature()"; + const SELECTOR: [u8; 4] = [139u8, 170u8, 87u8, 159u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + } + }; + /**Event with signature `Executed(uint256,bytes32)` and selector `0xc218c77e54cac1162571e52b65bb27aa0cdfcc70b7c7296ad83933914b132091`. +```solidity +event Executed(uint256 indexed nonce, bytes32 indexed batch); +```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + #[derive(Clone)] + pub struct Executed { + #[allow(missing_docs)] + pub nonce: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub batch: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for Executed { + type DataTuple<'a> = (); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + const SIGNATURE: &'static str = "Executed(uint256,bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ + 194u8, + 24u8, + 199u8, + 126u8, + 84u8, + 202u8, + 193u8, + 22u8, + 37u8, + 113u8, + 229u8, + 43u8, + 101u8, + 187u8, + 39u8, + 170u8, + 12u8, + 223u8, + 204u8, + 112u8, + 183u8, + 199u8, + 41u8, + 106u8, + 216u8, + 57u8, + 51u8, + 145u8, + 75u8, + 19u8, + 32u8, + 145u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + nonce: topics.1, + batch: topics.2, + } + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + () + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.nonce.clone(), self.batch.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken( + Self::SIGNATURE_HASH, + ); + out[1usize] = <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic(&self.nonce); + out[2usize] = <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic(&self.batch); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for Executed { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&Executed> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &Executed) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `InInstruction(address,address,uint256,bytes)` and selector `0x346fd5cd6d19d26d3afd222f43033ecd0d5614ca64bec0aed101482cd87e922f`. +```solidity +event InInstruction(address indexed from, address indexed coin, uint256 amount, bytes instruction); +```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + #[derive(Clone)] + pub struct InInstruction { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub coin: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub amount: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub instruction: ::alloy_sol_types::private::Bytes, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for InInstruction { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Bytes, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "InInstruction(address,address,uint256,bytes)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ + 52u8, + 111u8, + 213u8, + 205u8, + 109u8, + 25u8, + 210u8, + 109u8, + 58u8, + 253u8, + 34u8, + 47u8, + 67u8, + 3u8, + 62u8, + 205u8, + 13u8, + 86u8, + 20u8, + 202u8, + 100u8, + 190u8, + 192u8, + 174u8, + 209u8, + 1u8, + 72u8, + 44u8, + 216u8, + 126u8, + 146u8, + 47u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + from: topics.1, + coin: topics.2, + amount: data.0, + instruction: data.1, + } + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.amount), + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.instruction, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.from.clone(), self.coin.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken( + Self::SIGNATURE_HASH, + ); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.from, + ); + out[2usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.coin, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for InInstruction { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&InInstruction> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &InInstruction) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `SeraiKeyUpdated(uint256,bytes32)` and selector `0x1b9ff0164e811045a617ae783e807501a8e27762a7cb8f2fbd027851752570b5`. +```solidity +event SeraiKeyUpdated(uint256 indexed nonce, bytes32 indexed key); +```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + #[derive(Clone)] + pub struct SeraiKeyUpdated { + #[allow(missing_docs)] + pub nonce: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for SeraiKeyUpdated { + type DataTuple<'a> = (); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + const SIGNATURE: &'static str = "SeraiKeyUpdated(uint256,bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ + 27u8, + 159u8, + 240u8, + 22u8, + 78u8, + 129u8, + 16u8, + 69u8, + 166u8, + 23u8, + 174u8, + 120u8, + 62u8, + 128u8, + 117u8, + 1u8, + 168u8, + 226u8, + 119u8, + 98u8, + 167u8, + 203u8, + 143u8, + 47u8, + 189u8, + 2u8, + 120u8, + 81u8, + 117u8, + 37u8, + 112u8, + 181u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + nonce: topics.1, + key: topics.2, + } + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + () + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.nonce.clone(), self.key.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken( + Self::SIGNATURE_HASH, + ); + out[1usize] = <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic(&self.nonce); + out[2usize] = <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic(&self.key); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for SeraiKeyUpdated { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&SeraiKeyUpdated> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &SeraiKeyUpdated) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Constructor`. +```solidity +constructor(bytes32 initialSeraiKey); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct constructorCall { + pub initialSeraiKey: ::alloy_sol_types::private::FixedBytes<32>, + } + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::FixedBytes<32>,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: constructorCall) -> Self { + (value.initialSeraiKey,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for constructorCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { initialSeraiKey: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolConstructor for constructorCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.initialSeraiKey), + ) + } + } + }; + /**Function with signature `arbitaryCallOut(bytes)` and selector `0x3cbd2bf6`. +```solidity +function arbitaryCallOut(bytes memory code) external; +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct arbitaryCallOutCall { + pub code: ::alloy_sol_types::private::Bytes, + } + ///Container type for the return parameters of the [`arbitaryCallOut(bytes)`](arbitaryCallOutCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct arbitaryCallOutReturn {} + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: arbitaryCallOutCall) -> Self { + (value.code,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for arbitaryCallOutCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { code: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From + for UnderlyingRustTuple<'_> { + fn from(value: arbitaryCallOutReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> + for arbitaryCallOutReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for arbitaryCallOutCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Bytes,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = arbitaryCallOutReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "arbitaryCallOut(bytes)"; + const SELECTOR: [u8; 4] = [60u8, 189u8, 43u8, 246u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.code, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `execute((uint8,bytes,address,uint256)[],(bytes32,bytes32))` and selector `0xd5f22182`. +```solidity +function execute(OutInstruction[] memory transactions, Signature memory signature) external; +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct executeCall { + pub transactions: ::alloy_sol_types::private::Vec< + ::RustType, + >, + pub signature: ::RustType, + } + ///Container type for the return parameters of the [`execute((uint8,bytes,address,uint256)[],(bytes32,bytes32))`](executeCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct executeReturn {} + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Array, + Signature, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Vec< + ::RustType, + >, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: executeCall) -> Self { + (value.transactions, value.signature) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for executeCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + transactions: tuple.0, + signature: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: executeReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for executeReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for executeCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Array, + Signature, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = executeReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "execute((uint8,bytes,address,uint256)[],(bytes32,bytes32))"; + const SELECTOR: [u8; 4] = [213u8, 242u8, 33u8, 130u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Array< + OutInstruction, + > as alloy_sol_types::SolType>::tokenize(&self.transactions), + ::tokenize(&self.signature), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `inInstruction(address,uint256,bytes)` and selector `0x0759a1a4`. +```solidity +function inInstruction(address coin, uint256 amount, bytes memory instruction) external payable; +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct inInstructionCall { + pub coin: ::alloy_sol_types::private::Address, + pub amount: ::alloy_sol_types::private::primitives::aliases::U256, + pub instruction: ::alloy_sol_types::private::Bytes, + } + ///Container type for the return parameters of the [`inInstruction(address,uint256,bytes)`](inInstructionCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct inInstructionReturn {} + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Bytes, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::Bytes, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: inInstructionCall) -> Self { + (value.coin, value.amount, value.instruction) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for inInstructionCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + coin: tuple.0, + amount: tuple.1, + instruction: tuple.2, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: inInstructionReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for inInstructionReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for inInstructionCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Bytes, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = inInstructionReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "inInstruction(address,uint256,bytes)"; + const SELECTOR: [u8; 4] = [7u8, 89u8, 161u8, 164u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.coin, + ), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.amount), + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.instruction, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `nonce()` and selector `0xaffed0e0`. +```solidity +function nonce() external view returns (uint256); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct nonceCall {} + ///Container type for the return parameters of the [`nonce()`](nonceCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct nonceReturn { + pub _0: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: nonceCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for nonceCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: nonceReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for nonceReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for nonceCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = nonceReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "nonce()"; + const SELECTOR: [u8; 4] = [175u8, 254u8, 208u8, 224u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `seraiKey()` and selector `0x9d6eea0a`. +```solidity +function seraiKey() external view returns (bytes32); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct seraiKeyCall {} + ///Container type for the return parameters of the [`seraiKey()`](seraiKeyCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct seraiKeyReturn { + pub _0: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: seraiKeyCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for seraiKeyCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::FixedBytes<32>,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: seraiKeyReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for seraiKeyReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for seraiKeyCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = seraiKeyReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "seraiKey()"; + const SELECTOR: [u8; 4] = [157u8, 110u8, 234u8, 10u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `smartContractNonce()` and selector `0xc3727534`. +```solidity +function smartContractNonce() external view returns (uint256); +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct smartContractNonceCall {} + ///Container type for the return parameters of the [`smartContractNonce()`](smartContractNonceCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct smartContractNonceReturn { + pub _0: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From + for UnderlyingRustTuple<'_> { + fn from(value: smartContractNonceCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> + for smartContractNonceCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From + for UnderlyingRustTuple<'_> { + fn from(value: smartContractNonceReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> + for smartContractNonceReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for smartContractNonceCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = smartContractNonceReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "smartContractNonce()"; + const SELECTOR: [u8; 4] = [195u8, 114u8, 117u8, 52u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + /**Function with signature `updateSeraiKey(bytes32,(bytes32,bytes32))` and selector `0xb5071c6a`. +```solidity +function updateSeraiKey(bytes32 newSeraiKey, Signature memory signature) external; +```*/ + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct updateSeraiKeyCall { + pub newSeraiKey: ::alloy_sol_types::private::FixedBytes<32>, + pub signature: ::RustType, + } + ///Container type for the return parameters of the [`updateSeraiKey(bytes32,(bytes32,bytes32))`](updateSeraiKeyCall) function. + #[allow(non_camel_case_types, non_snake_case)] + #[derive(Clone)] + pub struct updateSeraiKeyReturn {} + #[allow(non_camel_case_types, non_snake_case, clippy::style)] + const _: () = { + use ::alloy_sol_types as alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + Signature, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: updateSeraiKeyCall) -> Self { + (value.newSeraiKey, value.signature) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for updateSeraiKeyCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + newSeraiKey: tuple.0, + signature: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion( + _t: alloy_sol_types::private::AssertTypeEq, + ) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From + for UnderlyingRustTuple<'_> { + fn from(value: updateSeraiKeyReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> + for updateSeraiKeyReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for updateSeraiKeyCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + Signature, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = updateSeraiKeyReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "updateSeraiKey(bytes32,(bytes32,bytes32))"; + const SELECTOR: [u8; 4] = [181u8, 7u8, 28u8, 106u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.newSeraiKey), + ::tokenize(&self.signature), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) + .map(Into::into) + } + } + }; + ///Container for all the [`Router`](self) function calls. + pub enum RouterCalls { + arbitaryCallOut(arbitaryCallOutCall), + execute(executeCall), + inInstruction(inInstructionCall), + nonce(nonceCall), + seraiKey(seraiKeyCall), + smartContractNonce(smartContractNonceCall), + updateSeraiKey(updateSeraiKeyCall), + } + #[automatically_derived] + impl RouterCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [7u8, 89u8, 161u8, 164u8], + [60u8, 189u8, 43u8, 246u8], + [157u8, 110u8, 234u8, 10u8], + [175u8, 254u8, 208u8, 224u8], + [181u8, 7u8, 28u8, 106u8], + [195u8, 114u8, 117u8, 52u8], + [213u8, 242u8, 33u8, 130u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for RouterCalls { + const NAME: &'static str = "RouterCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 7usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::arbitaryCallOut(_) => { + ::SELECTOR + } + Self::execute(_) => ::SELECTOR, + Self::inInstruction(_) => { + ::SELECTOR + } + Self::nonce(_) => ::SELECTOR, + Self::seraiKey(_) => ::SELECTOR, + Self::smartContractNonce(_) => { + ::SELECTOR + } + Self::updateSeraiKey(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(unsafe_code, non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) -> alloy_sol_types::Result] = &[ + { + fn inInstruction( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterCalls::inInstruction) + } + inInstruction + }, + { + fn arbitaryCallOut( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterCalls::arbitaryCallOut) + } + arbitaryCallOut + }, + { + fn seraiKey( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterCalls::seraiKey) + } + seraiKey + }, + { + fn nonce( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterCalls::nonce) + } + nonce + }, + { + fn updateSeraiKey( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterCalls::updateSeraiKey) + } + updateSeraiKey + }, + { + fn smartContractNonce( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterCalls::smartContractNonce) + } + smartContractNonce + }, + { + fn execute( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterCalls::execute) + } + execute + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err( + alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + ), + ); + }; + (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::arbitaryCallOut(inner) => { + ::abi_encoded_size( + inner, + ) + } + Self::execute(inner) => { + ::abi_encoded_size(inner) + } + Self::inInstruction(inner) => { + ::abi_encoded_size( + inner, + ) + } + Self::nonce(inner) => { + ::abi_encoded_size(inner) + } + Self::seraiKey(inner) => { + ::abi_encoded_size(inner) + } + Self::smartContractNonce(inner) => { + ::abi_encoded_size( + inner, + ) + } + Self::updateSeraiKey(inner) => { + ::abi_encoded_size( + inner, + ) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::arbitaryCallOut(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::execute(inner) => { + ::abi_encode_raw(inner, out) + } + Self::inInstruction(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::nonce(inner) => { + ::abi_encode_raw(inner, out) + } + Self::seraiKey(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::smartContractNonce(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::updateSeraiKey(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + } + } + } + ///Container for all the [`Router`](self) custom errors. + pub enum RouterErrors { + FailedTransfer(FailedTransfer), + InvalidAmount(InvalidAmount), + InvalidSignature(InvalidSignature), + } + #[automatically_derived] + impl RouterErrors { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [44u8, 82u8, 17u8, 198u8], + [139u8, 170u8, 87u8, 159u8], + [191u8, 168u8, 113u8, 197u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for RouterErrors { + const NAME: &'static str = "RouterErrors"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 3usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::FailedTransfer(_) => { + ::SELECTOR + } + Self::InvalidAmount(_) => { + ::SELECTOR + } + Self::InvalidSignature(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(unsafe_code, non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) -> alloy_sol_types::Result] = &[ + { + fn InvalidAmount( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterErrors::InvalidAmount) + } + InvalidAmount + }, + { + fn InvalidSignature( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterErrors::InvalidSignature) + } + InvalidSignature + }, + { + fn FailedTransfer( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, + validate, + ) + .map(RouterErrors::FailedTransfer) + } + FailedTransfer + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err( + alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + ), + ); + }; + (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::FailedTransfer(inner) => { + ::abi_encoded_size( + inner, + ) + } + Self::InvalidAmount(inner) => { + ::abi_encoded_size(inner) + } + Self::InvalidSignature(inner) => { + ::abi_encoded_size( + inner, + ) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::FailedTransfer(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::InvalidAmount(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + Self::InvalidSignature(inner) => { + ::abi_encode_raw( + inner, + out, + ) + } + } + } + } + ///Container for all the [`Router`](self) events. + pub enum RouterEvents { + Executed(Executed), + InInstruction(InInstruction), + SeraiKeyUpdated(SeraiKeyUpdated), + } + #[automatically_derived] + impl RouterEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 27u8, + 159u8, + 240u8, + 22u8, + 78u8, + 129u8, + 16u8, + 69u8, + 166u8, + 23u8, + 174u8, + 120u8, + 62u8, + 128u8, + 117u8, + 1u8, + 168u8, + 226u8, + 119u8, + 98u8, + 167u8, + 203u8, + 143u8, + 47u8, + 189u8, + 2u8, + 120u8, + 81u8, + 117u8, + 37u8, + 112u8, + 181u8, + ], + [ + 52u8, + 111u8, + 213u8, + 205u8, + 109u8, + 25u8, + 210u8, + 109u8, + 58u8, + 253u8, + 34u8, + 47u8, + 67u8, + 3u8, + 62u8, + 205u8, + 13u8, + 86u8, + 20u8, + 202u8, + 100u8, + 190u8, + 192u8, + 174u8, + 209u8, + 1u8, + 72u8, + 44u8, + 216u8, + 126u8, + 146u8, + 47u8, + ], + [ + 194u8, + 24u8, + 199u8, + 126u8, + 84u8, + 202u8, + 193u8, + 22u8, + 37u8, + 113u8, + 229u8, + 43u8, + 101u8, + 187u8, + 39u8, + 170u8, + 12u8, + 223u8, + 204u8, + 112u8, + 183u8, + 199u8, + 41u8, + 106u8, + 216u8, + 57u8, + 51u8, + 145u8, + 75u8, + 19u8, + 32u8, + 145u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for RouterEvents { + const NAME: &'static str = "RouterEvents"; + const COUNT: usize = 3usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, + data, + validate, + ) + .map(Self::Executed) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, + data, + validate, + ) + .map(Self::InInstruction) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, + data, + validate, + ) + .map(Self::SeraiKeyUpdated) + } + _ => { + alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }) + } + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for RouterEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::Executed(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::InInstruction(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::SeraiKeyUpdated(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::Executed(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::InInstruction(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::SeraiKeyUpdated(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/processor/ethereum/contracts/src/lib.rs b/processor/ethereum/contracts/src/lib.rs index d8de29b38..451760673 100644 --- a/processor/ethereum/contracts/src/lib.rs +++ b/processor/ethereum/contracts/src/lib.rs @@ -1,46 +1,21 @@ -use alloy_sol_types::sol; - #[rustfmt::skip] #[expect(warnings)] #[expect(needless_pass_by_value)] #[expect(clippy::all)] #[expect(clippy::ignored_unit_patterns)] #[expect(clippy::redundant_closure_for_method_calls)] -mod erc20_container { - use super::*; - sol!("contracts/IERC20.sol"); -} -pub mod erc20 { - pub const BYTECODE: &str = include_str!("../artifacts/Deployer.bin"); - pub use super::erc20_container::IERC20::*; -} +mod abigen; -#[rustfmt::skip] -#[expect(warnings)] -#[expect(needless_pass_by_value)] -#[expect(clippy::all)] -#[expect(clippy::ignored_unit_patterns)] -#[expect(clippy::redundant_closure_for_method_calls)] -mod deployer_container { - use super::*; - sol!("contracts/Deployer.sol"); +pub mod erc20 { + pub use super::abigen::erc20::IERC20::*; } pub mod deployer { - pub const BYTECODE: &str = include_str!("../artifacts/Deployer.bin"); - pub use super::deployer_container::Deployer::*; -} - -#[rustfmt::skip] -#[expect(warnings)] -#[expect(needless_pass_by_value)] -#[expect(clippy::all)] -#[expect(clippy::ignored_unit_patterns)] -#[expect(clippy::redundant_closure_for_method_calls)] -mod router_container { - use super::*; - sol!(Router, "artifacts/Router.abi"); + pub const BYTECODE: &str = + include_str!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-contracts/Deployer.bin")); + pub use super::abigen::deployer::Deployer::*; } pub mod router { - pub const BYTECODE: &str = include_str!("../artifacts/Router.bin"); - pub use super::router_container::Router::*; + pub const BYTECODE: &str = + include_str!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-contracts/Router.bin")); + pub use super::abigen::router::Router::*; } diff --git a/processor/ethereum/ethereum-serai/Cargo.toml b/processor/ethereum/ethereum-serai/Cargo.toml index f0ea323f9..a2bec4817 100644 --- a/processor/ethereum/ethereum-serai/Cargo.toml +++ b/processor/ethereum/ethereum-serai/Cargo.toml @@ -38,6 +38,7 @@ alloy-provider = { version = "0.3", default-features = false } alloy-node-bindings = { version = "0.3", default-features = false, optional = true } +ethereum-schnorr-contract = { path = "../../../networks/ethereum/schnorr", default-features = false } contracts = { package = "serai-processor-ethereum-contracts", path = "../contracts" } [dev-dependencies] diff --git a/processor/ethereum/ethereum-serai/src/crypto.rs b/processor/ethereum/ethereum-serai/src/crypto.rs index 3366b744c..d013eeffe 100644 --- a/processor/ethereum/ethereum-serai/src/crypto.rs +++ b/processor/ethereum/ethereum-serai/src/crypto.rs @@ -13,6 +13,8 @@ use frost::{ curve::{Ciphersuite, Secp256k1}, }; +pub use ethereum_schnorr_contract::*; + use alloy_core::primitives::{Parity, Signature as AlloySignature}; use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; @@ -77,11 +79,3 @@ impl Hram for EthereumHram { >::reduce_bytes(&keccak256(&data).into()) } } - -impl From<&Signature> for AbiSignature { - fn from(sig: &Signature) -> AbiSignature { - let c: [u8; 32] = sig.c.to_repr().into(); - let s: [u8; 32] = sig.s.to_repr().into(); - AbiSignature { c: c.into(), s: s.into() } - } -} diff --git a/processor/ethereum/ethereum-serai/src/machine.rs b/processor/ethereum/ethereum-serai/src/machine.rs index 0d5dc7a59..b9a0628e2 100644 --- a/processor/ethereum/ethereum-serai/src/machine.rs +++ b/processor/ethereum/ethereum-serai/src/machine.rs @@ -236,7 +236,7 @@ impl RouterCommand { writer.write_all(&[0])?; writer.write_all(&chain_id.as_le_bytes())?; writer.write_all(&nonce.as_le_bytes())?; - writer.write_all(&key.A.to_bytes()) + writer.write_all(&key.point().to_bytes()) } RouterCommand::Execute { chain_id, nonce, outs } => { writer.write_all(&[1])?; @@ -406,9 +406,9 @@ impl SignatureMachine for RouterCommandSignatureMachine { self, shares: HashMap, ) -> Result { - let sig = self.machine.complete(shares)?; - let signature = Signature::new(&self.key, &self.command.msg(), sig) - .expect("machine produced an invalid signature"); + let signature = self.machine.complete(shares)?; + let signature = Signature::new(signature).expect("machine produced an invalid signature"); + assert!(signature.verify(&self.key, &self.command.msg())); Ok(SignedRouterCommand { command: self.command, signature }) } } diff --git a/processor/ethereum/ethereum-serai/src/router.rs b/processor/ethereum/ethereum-serai/src/router.rs index 95866e675..3dbd8fa86 100644 --- a/processor/ethereum/ethereum-serai/src/router.rs +++ b/processor/ethereum/ethereum-serai/src/router.rs @@ -127,7 +127,6 @@ impl InInstruction { pub struct Executed { pub tx_id: [u8; 32], pub nonce: u64, - pub signature: [u8; 64], } /// The contract Serai uses to manage its state. @@ -142,7 +141,7 @@ impl Router { pub(crate) fn init_code(key: &PublicKey) -> Vec { let mut bytecode = Self::code(); // Append the constructor arguments - bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode()); + bytecode.extend((abi::constructorCall { initialSeraiKey: key.eth_repr().into() }).abi_encode()); bytecode } @@ -392,13 +391,9 @@ impl Router { let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - let mut signature = [0; 64]; - signature[.. 32].copy_from_slice(log.signature.c.as_ref()); - signature[32 ..].copy_from_slice(log.signature.s.as_ref()); res.push(Executed { tx_id, nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, - signature, }); } } @@ -418,13 +413,9 @@ impl Router { let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - let mut signature = [0; 64]; - signature[.. 32].copy_from_slice(log.signature.c.as_ref()); - signature[32 ..].copy_from_slice(log.signature.s.as_ref()); res.push(Executed { tx_id, nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, - signature, }); } } From 002cf1b1135266727c6ffddb60603ad17345f031 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 15 Sep 2024 12:48:09 -0400 Subject: [PATCH 139/179] Remove OutInstruction's data field It makes sense for networks which support arbitrary data to do as part of their address. This reduces the ability to perform DoSs, achieves better performance, and better uses the type system (as now networks we don't support data on don't have a data field). Updates the Ethereum address definition in serai-client accordingly --- .../ethereum/contracts/contracts/Router.sol | 2 +- processor/primitives/src/payment.rs | 16 +--- processor/scanner/src/scan/mod.rs | 11 --- processor/scanner/src/substrate/mod.rs | 2 +- processor/scheduler/smart-contract/src/lib.rs | 2 +- .../scheduler/utxo/primitives/src/tree.rs | 8 +- processor/scheduler/utxo/standard/src/lib.rs | 4 +- .../utxo/transaction-chaining/src/lib.rs | 4 +- processor/src/tests/signer.rs | 1 - processor/src/tests/wallet.rs | 2 - substrate/client/src/networks/ethereum.rs | 96 +++++++++++++++---- substrate/client/tests/burn.rs | 64 ++++++------- substrate/coins/primitives/src/lib.rs | 4 +- substrate/in-instructions/pallet/src/lib.rs | 6 +- substrate/primitives/src/lib.rs | 50 +--------- tests/coordinator/src/tests/sign.rs | 1 - tests/full-stack/src/tests/mint_and_burn.rs | 2 +- tests/processor/src/tests/send.rs | 2 +- 18 files changed, 124 insertions(+), 153 deletions(-) diff --git a/processor/ethereum/contracts/contracts/Router.sol b/processor/ethereum/contracts/contracts/Router.sol index 65541a108..1d0846983 100644 --- a/processor/ethereum/contracts/contracts/Router.sol +++ b/processor/ethereum/contracts/contracts/Router.sol @@ -192,7 +192,7 @@ contract Router { _transferOut(nextAddress, transactions[i].coin, transactions[i].value); // Perform the calls with a set gas budget - (uint24 gas, bytes memory code) = abi.decode(transactions[i].destination, (uint24, bytes)); + (uint32 gas, bytes memory code) = abi.decode(transactions[i].destination, (uint32, bytes)); address(this).call{ gas: gas }(abi.encodeWithSelector(Router.arbitaryCallOut.selector, code)); diff --git a/processor/primitives/src/payment.rs b/processor/primitives/src/payment.rs index 4c1e04f47..59b10f7f9 100644 --- a/processor/primitives/src/payment.rs +++ b/processor/primitives/src/payment.rs @@ -3,7 +3,7 @@ use std::io; use scale::{Encode, Decode, IoReader}; use borsh::{BorshSerialize, BorshDeserialize}; -use serai_primitives::{Balance, Data}; +use serai_primitives::Balance; use serai_coins_primitives::OutInstructionWithBalance; use crate::Address; @@ -13,7 +13,6 @@ use crate::Address; pub struct Payment { address: A, balance: Balance, - data: Option>, } impl TryFrom for Payment { @@ -22,15 +21,14 @@ impl TryFrom for Payment { Ok(Payment { address: out_instruction_with_balance.instruction.address.try_into().map_err(|_| ())?, balance: out_instruction_with_balance.balance, - data: out_instruction_with_balance.instruction.data.map(Data::consume), }) } } impl Payment { /// Create a new Payment. - pub fn new(address: A, balance: Balance, data: Option>) -> Self { - Payment { address, balance, data } + pub fn new(address: A, balance: Balance) -> Self { + Payment { address, balance } } /// The address to pay. @@ -41,24 +39,18 @@ impl Payment { pub fn balance(&self) -> Balance { self.balance } - /// The data to associate with this payment. - pub fn data(&self) -> &Option> { - &self.data - } /// Read a Payment. pub fn read(reader: &mut impl io::Read) -> io::Result { let address = A::deserialize_reader(reader)?; let reader = &mut IoReader(reader); let balance = Balance::decode(reader).map_err(io::Error::other)?; - let data = Option::>::decode(reader).map_err(io::Error::other)?; - Ok(Self { address, balance, data }) + Ok(Self { address, balance }) } /// Write the Payment. pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { self.address.serialize(writer)?; self.balance.encode_to(writer); - self.data.encode_to(writer); Ok(()) } } diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index c54dc3e05..b235ff154 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -4,7 +4,6 @@ use std::collections::HashMap; use scale::Decode; use serai_db::{Get, DbTxn, Db}; -use serai_primitives::MAX_DATA_LEN; use serai_in_instructions_primitives::{ Shorthand, RefundableInInstruction, InInstruction, InInstructionWithBalance, }; @@ -56,16 +55,6 @@ fn in_instruction_from_output( let presumed_origin = output.presumed_origin(); let mut data = output.data(); - let max_data_len = usize::try_from(MAX_DATA_LEN).unwrap(); - if data.len() > max_data_len { - log::info!( - "data in output {} exceeded MAX_DATA_LEN ({MAX_DATA_LEN}): {}. skipping", - hex::encode(output.id()), - data.len(), - ); - return (presumed_origin, None); - } - let shorthand = match Shorthand::decode(&mut data) { Ok(shorthand) => shorthand, Err(e) => { diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs index a7302e5ce..89186c69f 100644 --- a/processor/scanner/src/substrate/mod.rs +++ b/processor/scanner/src/substrate/mod.rs @@ -142,7 +142,7 @@ impl ContinuallyRan for SubstrateTask { if let Some(report::ReturnInformation { address, balance }) = return_information { burns.push(OutInstructionWithBalance { - instruction: OutInstruction { address: address.into(), data: None }, + instruction: OutInstruction { address: address.into() }, balance, }); } diff --git a/processor/scheduler/smart-contract/src/lib.rs b/processor/scheduler/smart-contract/src/lib.rs index 7630a0267..0c9c690b4 100644 --- a/processor/scheduler/smart-contract/src/lib.rs +++ b/processor/scheduler/smart-contract/src/lib.rs @@ -130,7 +130,7 @@ impl> SchedulerTrait for S .returns() .iter() .map(|to_return| { - Payment::new(to_return.address().clone(), to_return.output().balance(), None) + Payment::new(to_return.address().clone(), to_return.output().balance()) }) .collect::>(), ), diff --git a/processor/scheduler/utxo/primitives/src/tree.rs b/processor/scheduler/utxo/primitives/src/tree.rs index b52f3ba3b..d5b47309e 100644 --- a/processor/scheduler/utxo/primitives/src/tree.rs +++ b/processor/scheduler/utxo/primitives/src/tree.rs @@ -115,11 +115,7 @@ impl TreeTransaction { .filter_map(|(payment, amount)| { amount.map(|amount| { // The existing payment, with the new amount - Payment::new( - payment.address().clone(), - Balance { coin, amount: Amount(amount) }, - payment.data().clone(), - ) + Payment::new(payment.address().clone(), Balance { coin, amount: Amount(amount) }) }) }) .collect() @@ -130,7 +126,7 @@ impl TreeTransaction { .filter_map(|amount| { amount.map(|amount| { // A branch output with the new amount - Payment::new(branch_address.clone(), Balance { coin, amount: Amount(amount) }, None) + Payment::new(branch_address.clone(), Balance { coin, amount: Amount(amount) }) }) }) .collect() diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs index dc2ccb064..e826c300f 100644 --- a/processor/scheduler/utxo/standard/src/lib.rs +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -489,7 +489,7 @@ impl> SchedulerTrait for Schedul &mut 0, block, vec![forward.clone()], - vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance())], None, ) .await? @@ -501,7 +501,7 @@ impl> SchedulerTrait for Schedul for to_return in update.returns() { let key = to_return.output().key(); let out_instruction = - Payment::new(to_return.address().clone(), to_return.output().balance(), None); + Payment::new(to_return.address().clone(), to_return.output().balance()); let Some(plan) = self .planner .plan_transaction_with_fee_amortization( diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 93bdf1f39..bb39dcd30 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -507,7 +507,7 @@ impl>> Sched &mut 0, block, vec![forward.clone()], - vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance())], None, ) .await? @@ -519,7 +519,7 @@ impl>> Sched for to_return in update.returns() { let key = to_return.output().key(); let out_instruction = - Payment::new(to_return.address().clone(), to_return.output().balance(), None); + Payment::new(to_return.address().clone(), to_return.output().balance()); let Some(plan) = self .planner .plan_transaction_with_fee_amortization( diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs index 77307ef26..6b4456081 100644 --- a/processor/src/tests/signer.rs +++ b/processor/src/tests/signer.rs @@ -184,7 +184,6 @@ pub async fn test_signer( let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); let payments = vec![Payment { address: N::external_address(&network, key).await, - data: None, balance: Balance { coin: match N::NETWORK { NetworkId::Serai => panic!("test_signer called with Serai"), diff --git a/processor/src/tests/wallet.rs b/processor/src/tests/wallet.rs index 86a27349d..0451f30c3 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/src/tests/wallet.rs @@ -88,7 +88,6 @@ pub async fn test_wallet( outputs.clone(), vec![Payment { address: N::external_address(&network, key).await, - data: None, balance: Balance { coin: match N::NETWORK { NetworkId::Serai => panic!("test_wallet called with Serai"), @@ -116,7 +115,6 @@ pub async fn test_wallet( plans[0].payments, vec![Payment { address: N::external_address(&network, key).await, - data: None, balance: Balance { coin: match N::NETWORK { NetworkId::Serai => panic!("test_wallet called with Serai"), diff --git a/substrate/client/src/networks/ethereum.rs b/substrate/client/src/networks/ethereum.rs index 092851699..28ada6356 100644 --- a/substrate/client/src/networks/ethereum.rs +++ b/substrate/client/src/networks/ethereum.rs @@ -1,35 +1,93 @@ -use core::{str::FromStr, fmt}; +use core::str::FromStr; +use std::io::Read; use borsh::{BorshSerialize, BorshDeserialize}; -use crate::primitives::ExternalAddress; +use crate::primitives::{MAX_ADDRESS_LEN, ExternalAddress}; -/// A representation of an Ethereum address. -#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] -pub struct Address([u8; 20]); +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub struct ContractDeployment { + /// The gas limit to use for this contract's execution. + /// + /// THis MUST be less than the Serai gas limit. The cost of it will be deducted from the amount + /// transferred. + gas: u32, + /// The initialization code of the contract to deploy. + /// + /// This contract will be deployed (executing the initialization code). No further calls will + /// be made. + code: Vec, +} -impl From<[u8; 20]> for Address { - fn from(address: [u8; 20]) -> Self { - Self(address) +/// A contract to deploy, enabling executing arbitrary code. +impl ContractDeployment { + pub fn new(gas: u32, code: Vec) -> Option { + // The max address length, minus the type byte, minus the size of the gas + const MAX_CODE_LEN: usize = (MAX_ADDRESS_LEN as usize) - (1 + core::mem::size_of::()); + if code.len() > MAX_CODE_LEN { + None?; + } + Some(Self { gas, code }) } } -impl From
for [u8; 20] { - fn from(address: Address) -> Self { - address.0 +/// A representation of an Ethereum address. +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub enum Address { + /// A traditional address. + Address([u8; 20]), + /// A contract to deploy, enabling executing arbitrary code. + Contract(ContractDeployment), +} + +impl From<[u8; 20]> for Address { + fn from(address: [u8; 20]) -> Self { + Address::Address(address) } } impl TryFrom for Address { type Error = (); fn try_from(data: ExternalAddress) -> Result { - Ok(Self(data.as_ref().try_into().map_err(|_| ())?)) + let mut kind = [0xff]; + let mut reader: &[u8] = data.as_ref(); + reader.read_exact(&mut kind).map_err(|_| ())?; + Ok(match kind[0] { + 0 => { + let mut address = [0xff; 20]; + reader.read_exact(&mut address).map_err(|_| ())?; + Address::Address(address) + } + 1 => { + let mut gas = [0xff; 4]; + reader.read_exact(&mut gas).map_err(|_| ())?; + // The code is whatever's left since the ExternalAddress is a delimited container of + // appropriately bounded length + Address::Contract(ContractDeployment { + gas: u32::from_le_bytes(gas), + code: reader.to_vec(), + }) + } + _ => Err(())?, + }) } } impl From
for ExternalAddress { fn from(address: Address) -> ExternalAddress { - // This is 20 bytes which is less than MAX_ADDRESS_LEN - ExternalAddress::new(address.0.to_vec()).unwrap() + let mut res = Vec::with_capacity(1 + 20); + match address { + Address::Address(address) => { + res.push(0); + res.extend(&address); + } + Address::Contract(ContractDeployment { gas, code }) => { + res.push(1); + res.extend(&gas.to_le_bytes()); + res.extend(&code); + } + } + // We only construct addresses whose code is small enough this can safely be constructed + ExternalAddress::new(res).unwrap() } } @@ -40,12 +98,8 @@ impl FromStr for Address { if address.len() != 40 { Err(())? }; - Ok(Self(hex::decode(address.to_lowercase()).map_err(|_| ())?.try_into().unwrap())) - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "0x{}", hex::encode(self.0)) + Ok(Address::Address( + hex::decode(address.to_lowercase()).map_err(|_| ())?.try_into().map_err(|_| ())?, + )) } } diff --git a/substrate/client/tests/burn.rs b/substrate/client/tests/burn.rs index a30dabec1..b8b849d3c 100644 --- a/substrate/client/tests/burn.rs +++ b/substrate/client/tests/burn.rs @@ -12,7 +12,7 @@ use sp_core::Pair; use serai_client::{ primitives::{ - Amount, NetworkId, Coin, Balance, BlockHash, SeraiAddress, Data, ExternalAddress, + Amount, NetworkId, Coin, Balance, BlockHash, SeraiAddress, ExternalAddress, insecure_pair_from_name, }, in_instructions::{ @@ -55,39 +55,35 @@ serai_test!( let block = provide_batch(&serai, batch.clone()).await; let instruction = { - let serai = serai.as_of(block); - let batches = serai.in_instructions().batch_events().await.unwrap(); - assert_eq!( - batches, - vec![InInstructionsEvent::Batch { - network, - id, - block: block_hash, - instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), - }] - ); - - assert_eq!( - serai.coins().mint_events().await.unwrap(), - vec![CoinsEvent::Mint { to: address, balance }] - ); - assert_eq!(serai.coins().coin_supply(coin).await.unwrap(), amount); - assert_eq!(serai.coins().coin_balance(coin, address).await.unwrap(), amount); - - // Now burn it - let mut rand_bytes = vec![0; 32]; - OsRng.fill_bytes(&mut rand_bytes); - let external_address = ExternalAddress::new(rand_bytes).unwrap(); - - let mut rand_bytes = vec![0; 32]; - OsRng.fill_bytes(&mut rand_bytes); - let data = Data::new(rand_bytes).unwrap(); - - OutInstructionWithBalance { - balance, - instruction: OutInstruction { address: external_address, data: Some(data) }, - } -}; + let serai = serai.as_of(block); + let batches = serai.in_instructions().batch_events().await.unwrap(); + assert_eq!( + batches, + vec![InInstructionsEvent::Batch { + network, + id, + block: block_hash, + instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), + }] + ); + + assert_eq!( + serai.coins().mint_events().await.unwrap(), + vec![CoinsEvent::Mint { to: address, balance }] + ); + assert_eq!(serai.coins().coin_supply(coin).await.unwrap(), amount); + assert_eq!(serai.coins().coin_balance(coin, address).await.unwrap(), amount); + + // Now burn it + let mut rand_bytes = vec![0; 32]; + OsRng.fill_bytes(&mut rand_bytes); + let external_address = ExternalAddress::new(rand_bytes).unwrap(); + + OutInstructionWithBalance { + balance, + instruction: OutInstruction { address: external_address }, + } + }; let block = publish_tx( &serai, diff --git a/substrate/coins/primitives/src/lib.rs b/substrate/coins/primitives/src/lib.rs index a7b45cf0c..53db73820 100644 --- a/substrate/coins/primitives/src/lib.rs +++ b/substrate/coins/primitives/src/lib.rs @@ -13,17 +13,17 @@ use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; -use serai_primitives::{Balance, SeraiAddress, ExternalAddress, Data, system_address}; +use serai_primitives::{Balance, SeraiAddress, ExternalAddress, system_address}; pub const FEE_ACCOUNT: SeraiAddress = system_address(b"Coins-fees"); +// TODO: Replace entirely with just Address #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct OutInstruction { pub address: ExternalAddress, - pub data: Option, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index f90ae4122..1cb05c409 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -205,11 +205,7 @@ pub mod pallet { let coin_balance = Coins::::balance(IN_INSTRUCTION_EXECUTOR.into(), out_balance.coin); let instruction = OutInstructionWithBalance { - instruction: OutInstruction { - address: out_address.as_external().unwrap(), - // TODO: Properly pass data. Replace address with an OutInstruction entirely? - data: None, - }, + instruction: OutInstruction { address: out_address.as_external().unwrap() }, balance: Balance { coin: out_balance.coin, amount: coin_balance }, }; Coins::::burn_with_instruction(origin.into(), instruction)?; diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index 2cf37e009..b2515a7e3 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -59,10 +59,7 @@ pub fn borsh_deserialize_bounded_vec for ExternalAddress { } } -// Should be enough for a Uniswap v3 call -pub const MAX_DATA_LEN: u32 = 512; -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Data( - #[cfg_attr( - feature = "borsh", - borsh( - serialize_with = "borsh_serialize_bounded_vec", - deserialize_with = "borsh_deserialize_bounded_vec" - ) - )] - BoundedVec>, -); - -#[cfg(feature = "std")] -impl Zeroize for Data { - fn zeroize(&mut self) { - self.0.as_mut().zeroize() - } -} - -impl Data { - #[cfg(feature = "std")] - pub fn new(data: Vec) -> Result { - Ok(Data(data.try_into().map_err(|_| "data length exceeds {MAX_DATA_LEN}")?)) - } - - pub fn data(&self) -> &[u8] { - self.0.as_ref() - } - - #[cfg(feature = "std")] - pub fn consume(self) -> Vec { - self.0.into_inner() - } -} - -impl AsRef<[u8]> for Data { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - /// Lexicographically reverses a given byte array. pub fn reverse_lexicographic_order(bytes: [u8; N]) -> [u8; N] { let mut res = [0u8; N]; diff --git a/tests/coordinator/src/tests/sign.rs b/tests/coordinator/src/tests/sign.rs index db8a72034..6e9142fec 100644 --- a/tests/coordinator/src/tests/sign.rs +++ b/tests/coordinator/src/tests/sign.rs @@ -247,7 +247,6 @@ async fn sign_test() { balance, instruction: OutInstruction { address: ExternalAddress::new(b"external".to_vec()).unwrap(), - data: None, }, }; serai diff --git a/tests/full-stack/src/tests/mint_and_burn.rs b/tests/full-stack/src/tests/mint_and_burn.rs index ce19808fd..8987facc7 100644 --- a/tests/full-stack/src/tests/mint_and_burn.rs +++ b/tests/full-stack/src/tests/mint_and_burn.rs @@ -493,7 +493,7 @@ async fn mint_and_burn_test() { move |nonce, coin, amount, address| async move { let out_instruction = OutInstructionWithBalance { balance: Balance { coin, amount: Amount(amount) }, - instruction: OutInstruction { address, data: None }, + instruction: OutInstruction { address }, }; serai diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 8dfb53535..4c811e2b5 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -246,7 +246,7 @@ fn send_test() { }, block: substrate_block_num, burns: vec![OutInstructionWithBalance { - instruction: OutInstruction { address: wallet.address(), data: None }, + instruction: OutInstruction { address: wallet.address() }, balance: Balance { coin: balance_sent.coin, amount: amount_minted }, }], batches: vec![batch.batch.id], From ae49e8e3bfdb509b6a2498abf0154d58e0f7841c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 15 Sep 2024 17:13:10 -0400 Subject: [PATCH 140/179] Break Ethereum Deployer into crate --- .github/workflows/tests.yml | 3 + Cargo.lock | 26 +++++ Cargo.toml | 2 + deny.toml | 4 +- .../ethereum/contracts/contracts/Deployer.sol | 52 --------- processor/ethereum/contracts/src/lib.rs | 5 - processor/ethereum/deployer/Cargo.toml | 34 ++++++ processor/ethereum/deployer/LICENSE | 15 +++ processor/ethereum/deployer/README.md | 23 ++++ processor/ethereum/deployer/build.rs | 5 + .../ethereum/deployer/contracts/Deployer.sol | 81 ++++++++++++++ processor/ethereum/deployer/src/lib.rs | 104 ++++++++++++++++++ processor/ethereum/ethereum-serai/Cargo.toml | 2 +- .../ethereum/ethereum-serai/src/crypto.rs | 23 ++-- processor/ethereum/ethereum-serai/src/lib.rs | 2 + .../ethereum/ethereum-serai/src/machine.rs | 13 +++ processor/ethereum/primitives/Cargo.toml | 24 ++++ processor/ethereum/primitives/LICENSE | 15 +++ processor/ethereum/primitives/README.md | 3 + processor/ethereum/primitives/src/lib.rs | 49 +++++++++ 20 files changed, 411 insertions(+), 74 deletions(-) delete mode 100644 processor/ethereum/contracts/contracts/Deployer.sol create mode 100644 processor/ethereum/deployer/Cargo.toml create mode 100644 processor/ethereum/deployer/LICENSE create mode 100644 processor/ethereum/deployer/README.md create mode 100644 processor/ethereum/deployer/build.rs create mode 100644 processor/ethereum/deployer/contracts/Deployer.sol create mode 100644 processor/ethereum/deployer/src/lib.rs create mode 100644 processor/ethereum/primitives/Cargo.toml create mode 100644 processor/ethereum/primitives/LICENSE create mode 100644 processor/ethereum/primitives/README.md create mode 100644 processor/ethereum/primitives/src/lib.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 9b90ee916..382d9a2f2 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -53,6 +53,9 @@ jobs: -p serai-processor-bin \ -p serai-bitcoin-processor \ -p serai-processor-ethereum-contracts \ + -p serai-processor-ethereum-primitives \ + -p serai-processor-ethereum-deployer \ + -p ethereum-serai \ -p serai-ethereum-processor \ -p serai-monero-processor \ -p tendermint-machine \ diff --git a/Cargo.lock b/Cargo.lock index d62240930..0253cf32b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8721,6 +8721,32 @@ dependencies = [ "syn-solidity", ] +[[package]] +name = "serai-processor-ethereum-deployer" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-core", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-macro", + "alloy-sol-types", + "alloy-transport", + "build-solidity-contracts", + "serai-processor-ethereum-primitives", +] + +[[package]] +name = "serai-processor-ethereum-primitives" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-core", + "group", + "k256", +] + [[package]] name = "serai-processor-frost-attempt-manager" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index b30112b2b..c00106594 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,6 +88,8 @@ members = [ "processor/bin", "processor/bitcoin", "processor/ethereum/contracts", + "processor/ethereum/primitives", + "processor/ethereum/deployer", "processor/ethereum/ethereum-serai", "processor/ethereum", "processor/monero", diff --git a/deny.toml b/deny.toml index ec948fef7..8b630fb9a 100644 --- a/deny.toml +++ b/deny.toml @@ -59,8 +59,10 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-signers" }, { allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" }, - { allow = ["AGPL-3.0"], name = "ethereum-serai" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-contracts" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-deployer" }, + { allow = ["AGPL-3.0"], name = "ethereum-serai" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, { allow = ["AGPL-3.0"], name = "serai-monero-processor" }, diff --git a/processor/ethereum/contracts/contracts/Deployer.sol b/processor/ethereum/contracts/contracts/Deployer.sol deleted file mode 100644 index 1c05e38af..000000000 --- a/processor/ethereum/contracts/contracts/Deployer.sol +++ /dev/null @@ -1,52 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -pragma solidity ^0.8.26; - -/* -The expected deployment process of the Router is as follows: - -1) A transaction deploying Deployer is made. Then, a deterministic signature is - created such that an account with an unknown private key is the creator of - the contract. Anyone can fund this address, and once anyone does, the - transaction deploying Deployer can be published by anyone. No other - transaction may be made from that account. - -2) Anyone deploys the Router through the Deployer. This uses a sequential nonce - such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible. - While such attacks would still be feasible if the Deployer's address was - controllable, the usage of a deterministic signature with a NUMS method - prevents that. - -This doesn't have any denial-of-service risks and will resolve once anyone steps -forward as deployer. This does fail to guarantee an identical address across -every chain, though it enables letting anyone efficiently ask the Deployer for -the address (with the Deployer having an identical address on every chain). - -Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the -Deployer contract to use a consistent salt for the Router, yet the Router must -be deployed with a specific public key for Serai. Since Ethereum isn't able to -determine a valid public key (one the result of a Serai DKG) from a dishonest -public key, we have to allow multiple deployments with Serai being the one to -determine which to use. - -The alternative would be to have a council publish the Serai key on-Ethereum, -with Serai verifying the published result. This would introduce a DoS risk in -the council not publishing the correct key/not publishing any key. -*/ - -contract Deployer { - event Deployment(bytes32 indexed init_code_hash, address created); - - error DeploymentFailed(); - - function deploy(bytes memory init_code) external { - address created; - assembly { - created := create(0, add(init_code, 0x20), mload(init_code)) - } - if (created == address(0)) { - revert DeploymentFailed(); - } - // These may be emitted out of order upon re-entrancy - emit Deployment(keccak256(init_code), created); - } -} diff --git a/processor/ethereum/contracts/src/lib.rs b/processor/ethereum/contracts/src/lib.rs index 451760673..d0a5c076e 100644 --- a/processor/ethereum/contracts/src/lib.rs +++ b/processor/ethereum/contracts/src/lib.rs @@ -9,11 +9,6 @@ mod abigen; pub mod erc20 { pub use super::abigen::erc20::IERC20::*; } -pub mod deployer { - pub const BYTECODE: &str = - include_str!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-contracts/Deployer.bin")); - pub use super::abigen::deployer::Deployer::*; -} pub mod router { pub const BYTECODE: &str = include_str!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-contracts/Router.bin")); diff --git a/processor/ethereum/deployer/Cargo.toml b/processor/ethereum/deployer/Cargo.toml new file mode 100644 index 000000000..9b0ed1464 --- /dev/null +++ b/processor/ethereum/deployer/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "serai-processor-ethereum-deployer" +version = "0.1.0" +description = "The deployer for Serai's Ethereum contracts" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/deployer" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +alloy-core = { version = "0.8", default-features = false } +alloy-consensus = { version = "0.3", default-features = false } + +alloy-sol-types = { version = "0.8", default-features = false } +alloy-sol-macro = { version = "0.8", default-features = false } + +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-transport = { version = "0.3", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.3", default-features = false } + +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } + +[build-dependencies] +build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts", default-features = false } diff --git a/processor/ethereum/deployer/LICENSE b/processor/ethereum/deployer/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/deployer/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/deployer/README.md b/processor/ethereum/deployer/README.md new file mode 100644 index 000000000..6b4396506 --- /dev/null +++ b/processor/ethereum/deployer/README.md @@ -0,0 +1,23 @@ +# Ethereum Smart Contracts Deployer + +The deployer for Serai's Ethereum contracts. + +## Goals + +It should be possible to efficiently locate the Serai Router on an blockchain with the EVM, without +relying on any centralized (or even federated) entities. While deploying and locating an instance of +the Router would be trivial, by using a fixed signature for the deployment transaction, the Router +must be constructed with the correct key for the Serai network (or set to have the correct key +post-construction). Since this cannot be guaranteed to occur, the process must be retryable and the +first successful invocation must be efficiently findable. + +## Methodology + +We define a contract, the Deployer, to deploy the router. This contract could use `CREATE2` with the +key representing Serai as the salt, yet this would be open to collision attacks with just 2**80 +complexity. Instead, we use `CREATE` which would require 2**80 on-chain transactions (infeasible) to +use as the basis of a collision. + +In order to efficiently find the contract for a key, the Deployer contract saves the addresses of +deployed contracts (indexed by the initialization code hash). This allows using a single call to a +contract with a known address to find the proper Router. diff --git a/processor/ethereum/deployer/build.rs b/processor/ethereum/deployer/build.rs new file mode 100644 index 000000000..1906f1df5 --- /dev/null +++ b/processor/ethereum/deployer/build.rs @@ -0,0 +1,5 @@ +fn main() { + let artifacts_path = + std::env::var("OUT_DIR").unwrap().to_string() + "/serai-processor-ethereum-deployer"; + build_solidity_contracts::build(&[], "contracts", &artifacts_path).unwrap(); +} diff --git a/processor/ethereum/deployer/contracts/Deployer.sol b/processor/ethereum/deployer/contracts/Deployer.sol new file mode 100644 index 000000000..24ea1cb40 --- /dev/null +++ b/processor/ethereum/deployer/contracts/Deployer.sol @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +/* + The expected deployment process of the Router is as follows: + + 1) A transaction deploying Deployer is made. Then, a deterministic signature is + created such that an account with an unknown private key is the creator of + the contract. Anyone can fund this address, and once anyone does, the + transaction deploying Deployer can be published by anyone. No other + transaction may be made from that account. + + 2) Anyone deploys the Router through the Deployer. This uses a sequential nonce + such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible. + While such attacks would still be feasible if the Deployer's address was + controllable, the usage of a deterministic signature with a NUMS method + prevents that. + + This doesn't have any denial-of-service risks and will resolve once anyone steps + forward as deployer. This does fail to guarantee an identical address across + every chain, though it enables letting anyone efficiently ask the Deployer for + the address (with the Deployer having an identical address on every chain). + + Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the + Deployer contract to use a consistent salt for the Router, yet the Router must + be deployed with a specific public key for Serai. Since Ethereum isn't able to + determine a valid public key (one the result of a Serai DKG) from a dishonest + public key, we have to allow multiple deployments with Serai being the one to + determine which to use. + + The alternative would be to have a council publish the Serai key on-Ethereum, + with Serai verifying the published result. This would introduce a DoS risk in + the council not publishing the correct key/not publishing any key. +*/ + +contract Deployer { + struct Deployment { + uint64 block_number; + address created_contract; + } + mapping(bytes32 => Deployment) public deployments; + + error Reentrancy(); + error PriorDeployed(); + error DeploymentFailed(); + + function deploy(bytes memory init_code) external { + // Prevent re-entrancy + // If we did allow it, one could deploy the same contract multiple times (with one overwriting + // the other's set value in storage) + bool called; + // This contract doesn't have any other use of transient storage, nor is to be inherited, making + // this usage of the zero address safe + assembly { called := tload(0) } + if (called) { + revert Reentrancy(); + } + assembly { tstore(0, 1) } + + // Check this wasn't prior deployed + bytes32 init_code_hash = keccak256(init_code); + Deployment memory deployment = deployments[init_code_hash]; + if (deployment.created_contract == address(0)) { + revert PriorDeployed(); + } + + // Deploy the contract + address created_contract; + assembly { + created_contract := create(0, add(init_code, 0x20), mload(init_code)) + } + if (created_contract == address(0)) { + revert DeploymentFailed(); + } + + // Set the dpeloyment to storage + deployment.block_number = uint64(block.number); + deployment.created_contract = created_contract; + deployments[init_code_hash] = deployment; + } +} diff --git a/processor/ethereum/deployer/src/lib.rs b/processor/ethereum/deployer/src/lib.rs new file mode 100644 index 000000000..bf2d1a9cf --- /dev/null +++ b/processor/ethereum/deployer/src/lib.rs @@ -0,0 +1,104 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::sync::Arc; + +use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; +use alloy_consensus::{Signed, TxLegacy}; + +use alloy_sol_types::SolCall; + +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_macro::sol!("contracts/Deployer.sol"); +} + +/// The Deployer contract for the Serai Router contract. +/// +/// This Deployer has a deterministic address, letting it be immediately identified on any +/// compatible chain. It then supports retrieving the Router contract's address (which isn't +/// deterministic) using a single call. +#[derive(Clone, Debug)] +pub struct Deployer; +impl Deployer { + /// Obtain the transaction to deploy this contract, already signed. + /// + /// The account this transaction is sent from (which is populated in `from`) must be sufficiently + /// funded for this transaction to be submitted. This account has no known private key to anyone + /// so ETH sent can be neither misappropriated nor returned. + pub fn deployment_tx() -> Signed { + pub const BYTECODE: &str = + include_str!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-deployer/Deployer.bin")); + let bytecode = + Bytes::from_hex(BYTECODE).expect("compiled-in Deployer bytecode wasn't valid hex"); + + let tx = TxLegacy { + chain_id: None, + nonce: 0, + // 100 gwei + gas_price: 100_000_000_000u128, + // TODO: Use a more accurate gas limit + gas_limit: 1_000_000u128, + to: TxKind::Create, + value: U256::ZERO, + input: bytecode, + }; + + ethereum_primitives::deterministically_sign(&tx) + } + + /// Obtain the deterministic address for this contract. + pub(crate) fn address() -> Address { + let deployer_deployer = + Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); + Address::create(&deployer_deployer, 0) + } + + /// Construct a new view of the Deployer. + pub async fn new( + provider: Arc>, + ) -> Result, RpcError> { + let address = Self::address(); + let code = provider.get_code_at(address).await?; + // Contract has yet to be deployed + if code.is_empty() { + return Ok(None); + } + Ok(Some(Self)) + } + + /// Find the deployment of a contract. + pub async fn find_deployment( + &self, + provider: Arc>, + init_code_hash: [u8; 32], + ) -> Result, RpcError> { + let call = TransactionRequest::default().to(Self::address()).input(TransactionInput::new( + abi::Deployer::deploymentsCall::new((init_code_hash.into(),)).abi_encode().into(), + )); + let bytes = provider.call(&call).await?; + let deployment = abi::Deployer::deploymentsCall::abi_decode_returns(&bytes, true) + .map_err(|e| { + TransportErrorKind::Custom( + format!("node returned a non-Deployment for function returning Deployment: {e:?}").into(), + ) + })? + ._0; + + if deployment.created_contract == [0; 20] { + return Ok(None); + } + Ok(Some(deployment)) + } +} diff --git a/processor/ethereum/ethereum-serai/Cargo.toml b/processor/ethereum/ethereum-serai/Cargo.toml index a2bec4817..73c5b2672 100644 --- a/processor/ethereum/ethereum-serai/Cargo.toml +++ b/processor/ethereum/ethereum-serai/Cargo.toml @@ -3,7 +3,7 @@ name = "ethereum-serai" version = "0.1.0" description = "An Ethereum library supporting Schnorr signing and on-chain verification" license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/ethereum-serai" authors = ["Luke Parker ", "Elizabeth Binks "] edition = "2021" publish = false diff --git a/processor/ethereum/ethereum-serai/src/crypto.rs b/processor/ethereum/ethereum-serai/src/crypto.rs index d013eeffe..fc51ae6bc 100644 --- a/processor/ethereum/ethereum-serai/src/crypto.rs +++ b/processor/ethereum/ethereum-serai/src/crypto.rs @@ -15,11 +15,9 @@ use frost::{ pub use ethereum_schnorr_contract::*; -use alloy_core::primitives::{Parity, Signature as AlloySignature}; +use alloy_core::primitives::{Parity, Signature as AlloySignature, Address}; use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; -use crate::abi::router::{Signature as AbiSignature}; - pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { alloy_core::primitives::keccak256(data).into() } @@ -28,11 +26,9 @@ pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar { >::reduce_bytes(&keccak256(data).into()) } -pub fn address(point: &ProjectivePoint) -> [u8; 20] { +pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] { let encoded_point = point.to_encoded_point(false); - // Last 20 bytes of the hash of the concatenated x and y coordinates - // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point - keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() + **Address::from_raw_public_key(&encoded_point.as_ref()[1 .. 65]) } /// Deterministically sign a transaction. @@ -64,18 +60,15 @@ pub fn deterministically_sign(tx: &TxLegacy) -> Signed { } } -/// The HRAm to use for the Schnorr contract. +/// The HRAm to use for the Schnorr Solidity library. +/// +/// This will panic if the public key being signed for is not representable within the Schnorr +/// Solidity library. #[derive(Clone, Default)] pub struct EthereumHram {} impl Hram for EthereumHram { #[allow(non_snake_case)] fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - let x_coord = A.to_affine().x(); - - let mut data = address(R).to_vec(); - data.extend(x_coord.as_slice()); - data.extend(m); - - >::reduce_bytes(&keccak256(&data).into()) + Signature::challenge(*R, &PublicKey::new(*A).unwrap(), m) } } diff --git a/processor/ethereum/ethereum-serai/src/lib.rs b/processor/ethereum/ethereum-serai/src/lib.rs index 761214018..1a013ddf4 100644 --- a/processor/ethereum/ethereum-serai/src/lib.rs +++ b/processor/ethereum/ethereum-serai/src/lib.rs @@ -15,6 +15,7 @@ pub mod alloy { pub mod crypto; +/* pub(crate) mod abi { pub use contracts::erc20; pub use contracts::deployer; @@ -37,3 +38,4 @@ pub enum Error { #[error("couldn't make call/send TX")] ConnectionError, } +*/ diff --git a/processor/ethereum/ethereum-serai/src/machine.rs b/processor/ethereum/ethereum-serai/src/machine.rs index b9a0628e2..404922f56 100644 --- a/processor/ethereum/ethereum-serai/src/machine.rs +++ b/processor/ethereum/ethereum-serai/src/machine.rs @@ -25,6 +25,19 @@ use crate::{ }, }; +/// The HRAm to use for the Schnorr Solidity library. +/// +/// This will panic if the public key being signed for is not representable within the Schnorr +/// Solidity library. +#[derive(Clone, Default)] +pub struct EthereumHram {} +impl Hram for EthereumHram { + #[allow(non_snake_case)] + fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { + Signature::challenge(*R, &PublicKey::new(*A).unwrap(), m) + } +} + #[derive(Clone, PartialEq, Eq, Debug)] pub struct Call { pub to: [u8; 20], diff --git a/processor/ethereum/primitives/Cargo.toml b/processor/ethereum/primitives/Cargo.toml new file mode 100644 index 000000000..6c6ff886c --- /dev/null +++ b/processor/ethereum/primitives/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "serai-processor-ethereum-primitives" +version = "0.1.0" +description = "Primitives for Serai's Ethereum Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/primitives" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] } + +alloy-core = { version = "0.8", default-features = false } +alloy-consensus = { version = "0.3", default-features = false, features = ["k256"] } diff --git a/processor/ethereum/primitives/LICENSE b/processor/ethereum/primitives/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/primitives/README.md b/processor/ethereum/primitives/README.md new file mode 100644 index 000000000..90da68c68 --- /dev/null +++ b/processor/ethereum/primitives/README.md @@ -0,0 +1,3 @@ +# Ethereum Processor Primitives + +This library contains miscellaneous primitives and helper functions. diff --git a/processor/ethereum/primitives/src/lib.rs b/processor/ethereum/primitives/src/lib.rs new file mode 100644 index 000000000..ccf413445 --- /dev/null +++ b/processor/ethereum/primitives/src/lib.rs @@ -0,0 +1,49 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use group::ff::PrimeField; +use k256::{elliptic_curve::ops::Reduce, U256, Scalar}; + +use alloy_core::primitives::{Parity, Signature}; +use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; + +/// The Keccak256 hash function. +pub fn keccak256(data: impl AsRef<[u8]>) -> [u8; 32] { + alloy_core::primitives::keccak256(data.as_ref()).into() +} + +/// Deterministically sign a transaction. +/// +/// This function panics if passed a transaction with a non-None chain ID. +pub fn deterministically_sign(tx: &TxLegacy) -> Signed { + pub fn hash_to_scalar(data: impl AsRef<[u8]>) -> Scalar { + >::reduce_bytes(&keccak256(data).into()) + } + + assert!( + tx.chain_id.is_none(), + "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" + ); + + let sig_hash = tx.signature_hash().0; + let mut r = hash_to_scalar([sig_hash.as_slice(), b"r"].concat()); + let mut s = hash_to_scalar([sig_hash.as_slice(), b"s"].concat()); + loop { + // Create the signature + let r_bytes: [u8; 32] = r.to_repr().into(); + let s_bytes: [u8; 32] = s.to_repr().into(); + let v = Parity::NonEip155(false); + let signature = Signature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap(); + + // Check if this is a valid signature + let tx = tx.clone().into_signed(signature); + if tx.recover_signer().is_ok() { + return tx; + } + + // Re-hash until valid + r = hash_to_scalar(r_bytes); + s = hash_to_scalar(s_bytes); + } +} From b162125d71b9fd5a0787d793cbe3df31837bd5db Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 16 Sep 2024 21:34:59 -0400 Subject: [PATCH 141/179] forge fmt --- .github/workflows/lint.yml | 9 +++++ .../ethereum/schnorr/contracts/Schnorr.sol | 16 ++++----- .../schnorr/contracts/tests/Schnorr.sol | 11 +++--- .../ethereum/contracts/contracts/Router.sol | 36 ++++++++----------- .../contracts/contracts/tests/ERC20.sol | 5 +++ .../ethereum/deployer/contracts/Deployer.sol | 9 +++-- 6 files changed, 47 insertions(+), 39 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index da0bdcfa1..63a676498 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -73,6 +73,15 @@ jobs: - name: Run rustfmt run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check + - name: Install foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 + with: + version: nightly-41d4e5437107f6f42c7711123890147bc736a609 + cache: false + + - name: Run forge fmt + run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TABLE_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol") + machete: runs-on: ubuntu-latest steps: diff --git a/networks/ethereum/schnorr/contracts/Schnorr.sol b/networks/ethereum/schnorr/contracts/Schnorr.sol index 182e90e35..69dc208a2 100644 --- a/networks/ethereum/schnorr/contracts/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/Schnorr.sol @@ -4,24 +4,22 @@ pragma solidity ^0.8.26; // See https://github.com/noot/schnorr-verify for implementation details library Schnorr { // secp256k1 group order - uint256 constant private Q = - 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + uint256 private constant Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; // We fix the key to have: // 1) An even y-coordinate // 2) An x-coordinate < Q - uint8 constant private KEY_PARITY = 27; + uint8 private constant KEY_PARITY = 27; // px := public key x-coordinate, where the public key has an even y-coordinate // message := the message signed // c := Schnorr signature challenge // s := Schnorr signature solution - function verify( - bytes32 px, - bytes memory message, - bytes32 c, - bytes32 s - ) internal pure returns (bool) { + function verify(bytes32 px, bytes memory message, bytes32 c, bytes32 s) + internal + pure + returns (bool) + { // ecrecover = (m, v, r, s) -> key // We instead pass the following to obtain the nonce (not the key) // Then we hash it and verify it matches the challenge diff --git a/networks/ethereum/schnorr/contracts/tests/Schnorr.sol b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol index 26be683dc..11a3c3bc9 100644 --- a/networks/ethereum/schnorr/contracts/tests/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol @@ -4,12 +4,11 @@ pragma solidity ^0.8.26; import "../Schnorr.sol"; contract TestSchnorr { - function verify( - bytes32 public_key, - bytes calldata message, - bytes32 c, - bytes32 s - ) external pure returns (bool) { + function verify(bytes32 public_key, bytes calldata message, bytes32 c, bytes32 s) + external + pure + returns (bool) + { return Schnorr.verify(public_key, message, c, s); } } diff --git a/processor/ethereum/contracts/contracts/Router.sol b/processor/ethereum/contracts/contracts/Router.sol index 1d0846983..136c1e629 100644 --- a/processor/ethereum/contracts/contracts/Router.sol +++ b/processor/ethereum/contracts/contracts/Router.sol @@ -35,7 +35,9 @@ contract Router { } event SeraiKeyUpdated(uint256 indexed nonce, bytes32 indexed key); - event InInstruction(address indexed from, address indexed coin, uint256 amount, bytes instruction); + event InInstruction( + address indexed from, address indexed coin, uint256 amount, bytes instruction + ); event Executed(uint256 indexed nonce, bytes32 indexed batch); error InvalidSignature(); @@ -62,10 +64,10 @@ contract Router { // updateSeraiKey validates the given Schnorr signature against the current public key, and if // successful, updates the contract's public key to the one specified. - function updateSeraiKey( - bytes32 newSeraiKey, - Signature calldata signature - ) external _updateSeraiKeyAtEndOfFn(_nonce, newSeraiKey) { + function updateSeraiKey(bytes32 newSeraiKey, Signature calldata signature) + external + _updateSeraiKeyAtEndOfFn(_nonce, newSeraiKey) + { bytes memory message = abi.encodePacked("updateSeraiKey", block.chainid, _nonce, newSeraiKey); _nonce++; @@ -74,25 +76,15 @@ contract Router { } } - function inInstruction( - address coin, - uint256 amount, - bytes memory instruction - ) external payable { + function inInstruction(address coin, uint256 amount, bytes memory instruction) external payable { if (coin == address(0)) { if (amount != msg.value) { revert InvalidAmount(); } } else { - (bool success, bytes memory res) = - address(coin).call( - abi.encodeWithSelector( - IERC20.transferFrom.selector, - msg.sender, - address(this), - amount - ) - ); + (bool success, bytes memory res) = address(coin).call( + abi.encodeWithSelector(IERC20.transferFrom.selector, msg.sender, address(this), amount) + ); // Require there was nothing returned, which is done by some non-standard tokens, or that the // ERC20 contract did in fact return true @@ -193,9 +185,9 @@ contract Router { // Perform the calls with a set gas budget (uint32 gas, bytes memory code) = abi.decode(transactions[i].destination, (uint32, bytes)); - address(this).call{ - gas: gas - }(abi.encodeWithSelector(Router.arbitaryCallOut.selector, code)); + address(this).call{ gas: gas }( + abi.encodeWithSelector(Router.arbitaryCallOut.selector, code) + ); } } } diff --git a/processor/ethereum/contracts/contracts/tests/ERC20.sol b/processor/ethereum/contracts/contracts/tests/ERC20.sol index f38bfea40..9ce4bad77 100644 --- a/processor/ethereum/contracts/contracts/tests/ERC20.sol +++ b/processor/ethereum/contracts/contracts/tests/ERC20.sol @@ -8,9 +8,11 @@ contract TestERC20 { function name() public pure returns (string memory) { return "Test ERC20"; } + function symbol() public pure returns (string memory) { return "TEST"; } + function decimals() public pure returns (uint8) { return 18; } @@ -29,11 +31,13 @@ contract TestERC20 { function balanceOf(address owner) public view returns (uint256) { return balances[owner]; } + function transfer(address to, uint256 value) public returns (bool) { balances[msg.sender] -= value; balances[to] += value; return true; } + function transferFrom(address from, address to, uint256 value) public returns (bool) { allowances[from][msg.sender] -= value; balances[from] -= value; @@ -45,6 +49,7 @@ contract TestERC20 { allowances[msg.sender][spender] = value; return true; } + function allowance(address owner, address spender) public view returns (uint256) { return allowances[owner][spender]; } diff --git a/processor/ethereum/deployer/contracts/Deployer.sol b/processor/ethereum/deployer/contracts/Deployer.sol index 24ea1cb40..ad217fdc5 100644 --- a/processor/ethereum/deployer/contracts/Deployer.sol +++ b/processor/ethereum/deployer/contracts/Deployer.sol @@ -38,6 +38,7 @@ contract Deployer { uint64 block_number; address created_contract; } + mapping(bytes32 => Deployment) public deployments; error Reentrancy(); @@ -51,11 +52,15 @@ contract Deployer { bool called; // This contract doesn't have any other use of transient storage, nor is to be inherited, making // this usage of the zero address safe - assembly { called := tload(0) } + assembly { + called := tload(0) + } if (called) { revert Reentrancy(); } - assembly { tstore(0, 1) } + assembly { + tstore(0, 1) + } // Check this wasn't prior deployed bytes32 init_code_hash = keccak256(init_code); From 8c79b78359f1484fa96fc4836d3a0b52e2371b44 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 16 Sep 2024 21:59:12 -0400 Subject: [PATCH 142/179] Smash ERC20 into its own library --- .github/workflows/tests.yml | 1 + Cargo.lock | 13 +++++ Cargo.toml | 1 + deny.toml | 1 + processor/ethereum/erc20/Cargo.toml | 28 ++++++++++ processor/ethereum/erc20/LICENSE | 15 +++++ processor/ethereum/erc20/README.md | 3 + .../{contracts => erc20}/contracts/IERC20.sol | 0 .../src/erc20.rs => erc20/src/lib.rs} | 56 +++++++++++++++---- 9 files changed, 108 insertions(+), 10 deletions(-) create mode 100644 processor/ethereum/erc20/Cargo.toml create mode 100644 processor/ethereum/erc20/LICENSE create mode 100644 processor/ethereum/erc20/README.md rename processor/ethereum/{contracts => erc20}/contracts/IERC20.sol (100%) rename processor/ethereum/{ethereum-serai/src/erc20.rs => erc20/src/lib.rs} (61%) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 382d9a2f2..f08b457bf 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -55,6 +55,7 @@ jobs: -p serai-processor-ethereum-contracts \ -p serai-processor-ethereum-primitives \ -p serai-processor-ethereum-deployer \ + -p serai-processor-ethereum-erc20 \ -p ethereum-serai \ -p serai-ethereum-processor \ -p serai-monero-processor \ diff --git a/Cargo.lock b/Cargo.lock index 0253cf32b..b52aca053 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8737,6 +8737,19 @@ dependencies = [ "serai-processor-ethereum-primitives", ] +[[package]] +name = "serai-processor-ethereum-erc20" +version = "0.1.0" +dependencies = [ + "alloy-core", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-macro", + "alloy-sol-types", + "alloy-transport", +] + [[package]] name = "serai-processor-ethereum-primitives" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index c00106594..e2de489db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,6 +90,7 @@ members = [ "processor/ethereum/contracts", "processor/ethereum/primitives", "processor/ethereum/deployer", + "processor/ethereum/erc20", "processor/ethereum/ethereum-serai", "processor/ethereum", "processor/monero", diff --git a/deny.toml b/deny.toml index 8b630fb9a..1091d1034 100644 --- a/deny.toml +++ b/deny.toml @@ -62,6 +62,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-contracts" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-primitives" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-deployer" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-erc20" }, { allow = ["AGPL-3.0"], name = "ethereum-serai" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, { allow = ["AGPL-3.0"], name = "serai-monero-processor" }, diff --git a/processor/ethereum/erc20/Cargo.toml b/processor/ethereum/erc20/Cargo.toml new file mode 100644 index 000000000..85bc83c32 --- /dev/null +++ b/processor/ethereum/erc20/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "serai-processor-ethereum-erc20" +version = "0.1.0" +description = "A library for the Serai Processor to interact with ERC20s" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/erc20" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +alloy-core = { version = "0.8", default-features = false } + +alloy-sol-types = { version = "0.8", default-features = false } +alloy-sol-macro = { version = "0.8", default-features = false } + +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-transport = { version = "0.3", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.3", default-features = false } diff --git a/processor/ethereum/erc20/LICENSE b/processor/ethereum/erc20/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/erc20/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/erc20/README.md b/processor/ethereum/erc20/README.md new file mode 100644 index 000000000..f1e447b06 --- /dev/null +++ b/processor/ethereum/erc20/README.md @@ -0,0 +1,3 @@ +# ERC20 + +A library for the Serai Processor to interact with ERC20s. diff --git a/processor/ethereum/contracts/contracts/IERC20.sol b/processor/ethereum/erc20/contracts/IERC20.sol similarity index 100% rename from processor/ethereum/contracts/contracts/IERC20.sol rename to processor/ethereum/erc20/contracts/IERC20.sol diff --git a/processor/ethereum/ethereum-serai/src/erc20.rs b/processor/ethereum/erc20/src/lib.rs similarity index 61% rename from processor/ethereum/ethereum-serai/src/erc20.rs rename to processor/ethereum/erc20/src/lib.rs index 6a32f7cc1..560ea86c1 100644 --- a/processor/ethereum/ethereum-serai/src/erc20.rs +++ b/processor/ethereum/erc20/src/lib.rs @@ -1,3 +1,7 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + use std::{sync::Arc, collections::HashSet}; use alloy_core::primitives::{Address, B256, U256}; @@ -5,18 +9,31 @@ use alloy_core::primitives::{Address, B256, U256}; use alloy_sol_types::{SolInterface, SolEvent}; use alloy_rpc_types_eth::Filter; +use alloy_transport::{TransportErrorKind, RpcError}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; -use crate::Error; -pub use crate::abi::erc20 as abi; -use abi::{IERC20Calls, Transfer, transferCall, transferFromCall}; +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_macro::sol!("contracts/IERC20.sol"); +} +use abi::IERC20::{IERC20Calls, Transfer, transferCall, transferFromCall}; +/// A top-level ERC20 transfer #[derive(Clone, Debug)] pub struct TopLevelErc20Transfer { + /// The transaction ID which effected this transfer. pub id: [u8; 32], + /// The address which made the transfer. pub from: [u8; 20], + /// The amount transferred. pub amount: U256, + /// The data appended after the call itself. pub data: Vec, } @@ -29,30 +46,43 @@ impl Erc20 { Self(provider, Address::from(&address)) } + /// Fetch all top-level transfers to the specified ERC20. pub async fn top_level_transfers( &self, block: u64, to: [u8; 20], - ) -> Result, Error> { + ) -> Result, RpcError> { let filter = Filter::new().from_block(block).to_block(block).address(self.1); let filter = filter.event_signature(Transfer::SIGNATURE_HASH); let mut to_topic = [0; 32]; to_topic[12 ..].copy_from_slice(&to); let filter = filter.topic2(B256::from(to_topic)); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + let logs = self.0.get_logs(&filter).await?; + /* + A set of all transactions we've handled a transfer from. This handles the edge case where a + top-level transfer T somehow triggers another transfer T', with equivalent contents, within + the same transaction. We only want to report one transfer as only one is top-level. + */ let mut handled = HashSet::new(); let mut top_level_transfers = vec![]; for log in logs { // Double check the address which emitted this log if log.address() != self.1 { - Err(Error::ConnectionError)?; + Err(TransportErrorKind::Custom( + "node returned logs for a different address than requested".to_string().into(), + ))?; } - let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?; - let tx = - self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?; + let tx_id = log.transaction_hash.ok_or_else(|| { + TransportErrorKind::Custom("log didn't specify its transaction hash".to_string().into()) + })?; + let tx = self.0.get_transaction_by_hash(tx_id).await?.ok_or_else(|| { + TransportErrorKind::Custom( + "node didn't have the transaction which emitted a log it had".to_string().into(), + ) + })?; // If this is a top-level call... if tx.to == Some(self.1) { @@ -70,7 +100,13 @@ impl Erc20 { _ => continue, }; - let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom(format!("failed to decode Transfer log: {e:?}").into()) + })? + .inner + .data; // Ensure the top-level transfer is equivalent, and this presumably isn't a log for an // internal transfer From 3899db8f9dac743605597032977a8b43bb3511aa Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 17 Sep 2024 01:04:08 -0400 Subject: [PATCH 143/179] Smash out the router library --- .github/workflows/tests.yml | 1 + Cargo.lock | 26 + Cargo.toml | 1 + deny.toml | 1 + .../ethereum/ethereum-serai/src/router.rs | 434 ------------- processor/ethereum/router/Cargo.toml | 49 ++ processor/ethereum/router/LICENSE | 15 + processor/ethereum/router/README.md | 1 + processor/ethereum/router/build.rs | 42 ++ .../contracts/Router.sol | 33 +- processor/ethereum/router/src/lib.rs | 582 ++++++++++++++++++ substrate/client/Cargo.toml | 2 +- substrate/client/src/networks/ethereum.rs | 7 + 13 files changed, 749 insertions(+), 445 deletions(-) delete mode 100644 processor/ethereum/ethereum-serai/src/router.rs create mode 100644 processor/ethereum/router/Cargo.toml create mode 100644 processor/ethereum/router/LICENSE create mode 100644 processor/ethereum/router/README.md create mode 100644 processor/ethereum/router/build.rs rename processor/ethereum/{contracts => router}/contracts/Router.sol (86%) create mode 100644 processor/ethereum/router/src/lib.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f08b457bf..e374d4f13 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -55,6 +55,7 @@ jobs: -p serai-processor-ethereum-contracts \ -p serai-processor-ethereum-primitives \ -p serai-processor-ethereum-deployer \ + -p serai-processor-ethereum-router \ -p serai-processor-ethereum-erc20 \ -p ethereum-serai \ -p serai-ethereum-processor \ diff --git a/Cargo.lock b/Cargo.lock index b52aca053..981275fba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8760,6 +8760,32 @@ dependencies = [ "k256", ] +[[package]] +name = "serai-processor-ethereum-router" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-core", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-macro", + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "alloy-sol-types", + "alloy-transport", + "build-solidity-contracts", + "ethereum-schnorr-contract", + "group", + "k256", + "serai-client", + "serai-processor-ethereum-deployer", + "serai-processor-ethereum-erc20", + "serai-processor-ethereum-primitives", + "syn 2.0.77", + "syn-solidity", +] + [[package]] name = "serai-processor-frost-attempt-manager" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index e2de489db..3c203cedb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,6 +90,7 @@ members = [ "processor/ethereum/contracts", "processor/ethereum/primitives", "processor/ethereum/deployer", + "processor/ethereum/router", "processor/ethereum/erc20", "processor/ethereum/ethereum-serai", "processor/ethereum", diff --git a/deny.toml b/deny.toml index 1091d1034..9ee16043a 100644 --- a/deny.toml +++ b/deny.toml @@ -62,6 +62,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-contracts" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-primitives" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-deployer" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-router" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-erc20" }, { allow = ["AGPL-3.0"], name = "ethereum-serai" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, diff --git a/processor/ethereum/ethereum-serai/src/router.rs b/processor/ethereum/ethereum-serai/src/router.rs deleted file mode 100644 index 3dbd8fa86..000000000 --- a/processor/ethereum/ethereum-serai/src/router.rs +++ /dev/null @@ -1,434 +0,0 @@ -use std::{sync::Arc, io, collections::HashSet}; - -use k256::{ - elliptic_curve::{group::GroupEncoding, sec1}, - ProjectivePoint, -}; - -use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; -#[cfg(test)] -use alloy_core::primitives::B256; -use alloy_consensus::TxLegacy; - -use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; - -use alloy_rpc_types_eth::Filter; -#[cfg(test)] -use alloy_rpc_types_eth::{BlockId, TransactionRequest, TransactionInput}; -use alloy_simple_request_transport::SimpleRequest; -use alloy_provider::{Provider, RootProvider}; - -pub use crate::{ - Error, - crypto::{PublicKey, Signature}, - abi::{erc20::Transfer, router as abi}, -}; -use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum Coin { - Ether, - Erc20([u8; 20]), -} - -impl Coin { - pub fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - Ok(match kind[0] { - 0 => Coin::Ether, - 1 => { - let mut address = [0; 20]; - reader.read_exact(&mut address)?; - Coin::Erc20(address) - } - _ => Err(io::Error::other("unrecognized Coin type"))?, - }) - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Coin::Ether => writer.write_all(&[0]), - Coin::Erc20(token) => { - writer.write_all(&[1])?; - writer.write_all(token) - } - } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct InInstruction { - pub id: ([u8; 32], u64), - pub from: [u8; 20], - pub coin: Coin, - pub amount: U256, - pub data: Vec, - pub key_at_end_of_block: ProjectivePoint, -} - -impl InInstruction { - pub fn read(reader: &mut R) -> io::Result { - let id = { - let mut id_hash = [0; 32]; - reader.read_exact(&mut id_hash)?; - let mut id_pos = [0; 8]; - reader.read_exact(&mut id_pos)?; - let id_pos = u64::from_le_bytes(id_pos); - (id_hash, id_pos) - }; - - let mut from = [0; 20]; - reader.read_exact(&mut from)?; - - let coin = Coin::read(reader)?; - let mut amount = [0; 32]; - reader.read_exact(&mut amount)?; - let amount = U256::from_le_slice(&amount); - - let mut data_len = [0; 4]; - reader.read_exact(&mut data_len)?; - let data_len = usize::try_from(u32::from_le_bytes(data_len)) - .map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?; - let mut data = vec![0; data_len]; - reader.read_exact(&mut data)?; - - let mut key_at_end_of_block = ::Repr::default(); - reader.read_exact(&mut key_at_end_of_block)?; - let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block)) - .ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?; - - Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block }) - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.id.0)?; - writer.write_all(&self.id.1.to_le_bytes())?; - - writer.write_all(&self.from)?; - - self.coin.write(writer)?; - writer.write_all(&self.amount.as_le_bytes())?; - - writer.write_all( - &u32::try_from(self.data.len()) - .map_err(|_| { - io::Error::other("InInstruction being written had data exceeding 2**32 in length") - })? - .to_le_bytes(), - )?; - writer.write_all(&self.data)?; - - writer.write_all(&self.key_at_end_of_block.to_bytes()) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Executed { - pub tx_id: [u8; 32], - pub nonce: u64, -} - -/// The contract Serai uses to manage its state. -#[derive(Clone, Debug)] -pub struct Router(Arc>, Address); -impl Router { - pub(crate) fn code() -> Vec { - let bytecode = contracts::router::BYTECODE; - Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec() - } - - pub(crate) fn init_code(key: &PublicKey) -> Vec { - let mut bytecode = Self::code(); - // Append the constructor arguments - bytecode.extend((abi::constructorCall { initialSeraiKey: key.eth_repr().into() }).abi_encode()); - bytecode - } - - // This isn't pub in order to force users to use `Deployer::find_router`. - pub(crate) fn new(provider: Arc>, address: Address) -> Self { - Self(provider, address) - } - - pub fn address(&self) -> [u8; 20] { - **self.1 - } - - /// Get the key for Serai at the specified block. - #[cfg(test)] - pub async fn serai_key(&self, at: [u8; 32]) -> Result { - let call = TransactionRequest::default() - .to(self.1) - .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); - let bytes = self - .0 - .call(&call) - .block(BlockId::Hash(B256::from(at).into())) - .await - .map_err(|_| Error::ConnectionError)?; - let res = - abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; - PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError) - } - - /// Get the message to be signed in order to update the key for Serai. - pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec { - let mut buffer = b"updateSeraiKey".to_vec(); - buffer.extend(&chain_id.to_be_bytes::<32>()); - buffer.extend(&nonce.to_be_bytes::<32>()); - buffer.extend(&key.eth_repr()); - buffer - } - - /// Update the key representing Serai. - pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { - // TODO: Set a more accurate gas - TxLegacy { - to: TxKind::Call(self.1), - input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into())) - .abi_encode() - .into(), - gas_limit: 100_000, - ..Default::default() - } - } - - /// Get the current nonce for the published batches. - #[cfg(test)] - pub async fn nonce(&self, at: [u8; 32]) -> Result { - let call = TransactionRequest::default() - .to(self.1) - .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); - let bytes = self - .0 - .call(&call) - .block(BlockId::Hash(B256::from(at).into())) - .await - .map_err(|_| Error::ConnectionError)?; - let res = - abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; - Ok(res._0) - } - - /// Get the message to be signed in order to update the key for Serai. - pub(crate) fn execute_message( - chain_id: U256, - nonce: U256, - outs: Vec, - ) -> Vec { - ("execute".to_string(), chain_id, nonce, outs).abi_encode_params() - } - - /// Execute a batch of `OutInstruction`s. - pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy { - TxLegacy { - to: TxKind::Call(self.1), - input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(), - // TODO - gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()), - ..Default::default() - } - } - - pub async fn key_at_end_of_block(&self, block: u64) -> Result, Error> { - let filter = Filter::new().from_block(0).to_block(block).address(self.1); - let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); - let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - if all_keys.is_empty() { - return Ok(None); - }; - - let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; - let last_key_x_coordinate = last_key_x_coordinate_log - .log_decode::() - .map_err(|_| Error::ConnectionError)? - .inner - .data - .key; - - let mut compressed_point = ::Repr::default(); - compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); - compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); - - let key = - Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?; - Ok(Some(key)) - } - - pub async fn in_instructions( - &self, - block: u64, - allowed_tokens: &HashSet<[u8; 20]>, - ) -> Result, Error> { - let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else { - return Ok(vec![]); - }; - - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - let mut transfer_check = HashSet::new(); - let mut in_instructions = vec![]; - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let id = ( - log.block_hash.ok_or(Error::ConnectionError)?.into(), - log.log_index.ok_or(Error::ConnectionError)?, - ); - - let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?; - let tx = self - .0 - .get_transaction_by_hash(tx_hash) - .await - .ok() - .flatten() - .ok_or(Error::ConnectionError)?; - - let log = - log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - let coin = if log.coin.0 == [0; 20] { - Coin::Ether - } else { - let token = *log.coin.0; - - if !allowed_tokens.contains(&token) { - continue; - } - - // If this also counts as a top-level transfer via the token, drop it - // - // Necessary in order to handle a potential edge case with some theoretical token - // implementations - // - // This will either let it be handled by the top-level transfer hook or will drop it - // entirely on the side of caution - if tx.to == Some(token.into()) { - continue; - } - - // Get all logs for this TX - let receipt = self - .0 - .get_transaction_receipt(tx_hash) - .await - .map_err(|_| Error::ConnectionError)? - .ok_or(Error::ConnectionError)?; - let tx_logs = receipt.inner.logs(); - - // Find a matching transfer log - let mut found_transfer = false; - for tx_log in tx_logs { - let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?; - // Ensure we didn't already use this transfer to check a distinct InInstruction event - if transfer_check.contains(&log_index) { - continue; - } - - // Check if this log is from the token we expected to be transferred - if tx_log.address().0 != token { - continue; - } - // Check if this is a transfer log - // https://github.com/alloy-rs/core/issues/589 - if tx_log.topics()[0] != Transfer::SIGNATURE_HASH { - continue; - } - let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; - // Check if this is a transfer to us for the expected amount - if (transfer.to == self.1) && (transfer.value == log.amount) { - transfer_check.insert(log_index); - found_transfer = true; - break; - } - } - if !found_transfer { - // This shouldn't be a ConnectionError - // This is an exploit, a non-conforming ERC20, or an invalid connection - // This should halt the process which is sufficient, yet this is sub-optimal - // TODO - Err(Error::ConnectionError)?; - } - - Coin::Erc20(token) - }; - - in_instructions.push(InInstruction { - id, - from: *log.from.0, - coin, - amount: log.amount, - data: log.instruction.as_ref().to_vec(), - key_at_end_of_block, - }); - } - - Ok(in_instructions) - } - - pub async fn executed_commands(&self, block: u64) -> Result, Error> { - let mut res = vec![]; - - { - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); - - let log = - log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - res.push(Executed { - tx_id, - nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, - }); - } - } - - { - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); - - let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - res.push(Executed { - tx_id, - nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, - }); - } - } - - Ok(res) - } - - #[cfg(feature = "tests")] - pub fn key_updated_filter(&self) -> Filter { - Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH) - } - #[cfg(feature = "tests")] - pub fn executed_filter(&self) -> Filter { - Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH) - } -} diff --git a/processor/ethereum/router/Cargo.toml b/processor/ethereum/router/Cargo.toml new file mode 100644 index 000000000..ed5417c0f --- /dev/null +++ b/processor/ethereum/router/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "serai-processor-ethereum-router" +version = "0.1.0" +description = "The Router used by the Serai Processor for Ethereum" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/router" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } + +alloy-core = { version = "0.8", default-features = false } +alloy-consensus = { version = "0.3", default-features = false } + +alloy-sol-types = { version = "0.8", default-features = false } +alloy-sol-macro = { version = "0.8", default-features = false } + +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-transport = { version = "0.3", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.3", default-features = false } + +ethereum-schnorr = { package = "ethereum-schnorr-contract", path = "../../../networks/ethereum/schnorr", default-features = false } + +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } +ethereum-deployer = { package = "serai-processor-ethereum-deployer", path = "../deployer", default-features = false } +erc20 = { package = "serai-processor-ethereum-erc20", path = "../erc20", default-features = false } + +serai-client = { path = "../../../substrate/client", default-features = false, features = ["ethereum"] } + +[build-dependencies] +build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts", default-features = false } + +syn = { version = "2", default-features = false, features = ["proc-macro"] } + +syn-solidity = { version = "0.8", default-features = false } +alloy-sol-macro-input = { version = "0.8", default-features = false } +alloy-sol-macro-expander = { version = "0.8", default-features = false } diff --git a/processor/ethereum/router/LICENSE b/processor/ethereum/router/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/router/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/router/README.md b/processor/ethereum/router/README.md new file mode 100644 index 000000000..b93c32195 --- /dev/null +++ b/processor/ethereum/router/README.md @@ -0,0 +1 @@ +# Ethereum Router diff --git a/processor/ethereum/router/build.rs b/processor/ethereum/router/build.rs new file mode 100644 index 000000000..1ce6d4f57 --- /dev/null +++ b/processor/ethereum/router/build.rs @@ -0,0 +1,42 @@ +use std::{env, fs}; + +use alloy_sol_macro_input::SolInputKind; + +fn write(sol: syn_solidity::File, file: &str) { + let sol = alloy_sol_macro_expander::expand::expand(sol).unwrap(); + fs::write(file, sol.to_string()).unwrap(); +} + +fn sol(sol_files: &[&str], file: &str) { + let mut sol = String::new(); + for sol_file in sol_files { + sol += &fs::read_to_string(sol_file).unwrap(); + } + let SolInputKind::Sol(sol) = syn::parse_str(&sol).unwrap() else { + panic!("parsed .sols file wasn't SolInputKind::Sol"); + }; + write(sol, file); +} + +fn main() { + let artifacts_path = + env::var("OUT_DIR").unwrap().to_string() + "/serai-processor-ethereum-router"; + + if !fs::exists(&artifacts_path).unwrap() { + fs::create_dir(&artifacts_path).unwrap(); + } + + build_solidity_contracts::build( + &["../../../networks/ethereum/schnorr/contracts", "../erc20/contracts"], + "contracts", + &artifacts_path, + ) + .unwrap(); + + // This cannot be handled with the sol! macro. The Solidity requires an import + // https://github.com/alloy-rs/core/issues/602 + sol( + &["../../../networks/ethereum/schnorr/contracts/Schnorr.sol", "contracts/Router.sol"], + &(artifacts_path + "/router.rs"), + ); +} diff --git a/processor/ethereum/contracts/contracts/Router.sol b/processor/ethereum/router/contracts/Router.sol similarity index 86% rename from processor/ethereum/contracts/contracts/Router.sol rename to processor/ethereum/router/contracts/Router.sol index 136c1e629..e5a5c53fc 100644 --- a/processor/ethereum/contracts/contracts/Router.sol +++ b/processor/ethereum/router/contracts/Router.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-only pragma solidity ^0.8.26; -import "./IERC20.sol"; +import "IERC20.sol"; import "Schnorr.sol"; @@ -22,6 +22,15 @@ contract Router { Code } + struct AddressDestination { + address destination; + } + + struct CodeDestination { + uint32 gas; + bytes code; + } + struct OutInstruction { DestinationType destinationType; bytes destination; @@ -38,7 +47,7 @@ contract Router { event InInstruction( address indexed from, address indexed coin, uint256 amount, bytes instruction ); - event Executed(uint256 indexed nonce, bytes32 indexed batch); + event Executed(uint256 indexed nonce, bytes32 indexed message_hash); error InvalidSignature(); error InvalidAmount(); @@ -68,7 +77,7 @@ contract Router { external _updateSeraiKeyAtEndOfFn(_nonce, newSeraiKey) { - bytes memory message = abi.encodePacked("updateSeraiKey", block.chainid, _nonce, newSeraiKey); + bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", block.chainid, _nonce, newSeraiKey)); _nonce++; if (!Schnorr.verify(_seraiKey, message, signature.c, signature.s)) { @@ -132,6 +141,7 @@ contract Router { */ if (coin == address(0)) { // Enough gas to service the transfer and a minimal amount of logic + // TODO: If we're constructing a contract, we can do this at the same time as construction to.call{ value: value, gas: 5_000 }(""); } else { coin.call{ gas: 100_000 }(abi.encodeWithSelector(IERC20.transfer.selector, msg.sender, value)); @@ -156,13 +166,16 @@ contract Router { // Execute a list of transactions if they were signed by the current key with the current nonce function execute(OutInstruction[] calldata transactions, Signature calldata signature) external { // Verify the signature - bytes memory message = abi.encode("execute", block.chainid, _nonce, transactions); + // We hash the message here as we need the message's hash for the Executed event + // Since we're already going to hash it, hashing it prior to verifying the signature reduces the + // amount of words hashed by its challenge function (reducing our gas costs) + bytes32 message = keccak256(abi.encode("execute", block.chainid, _nonce, transactions)); if (!Schnorr.verify(_seraiKey, message, signature.c, signature.s)) { revert InvalidSignature(); } // Since the signature was verified, perform execution - emit Executed(_nonce, keccak256(message)); + emit Executed(_nonce, message); // While this is sufficient to prevent replays, it's still technically possible for instructions // from later batches to be executed before these instructions upon re-entrancy _nonce++; @@ -172,8 +185,8 @@ contract Router { if (transactions[i].destinationType == DestinationType.Address) { // This may cause a panic and the contract to become stuck if the destination isn't actually // 20 bytes. Serai is trusted to not pass a malformed destination - (address destination) = abi.decode(transactions[i].destination, (address)); - _transferOut(destination, transactions[i].coin, transactions[i].value); + (AddressDestination memory destination) = abi.decode(transactions[i].destination, (AddressDestination)); + _transferOut(destination.destination, transactions[i].coin, transactions[i].value); } else { // The destination is a piece of initcode. We calculate the hash of the will-be contract, // transfer to it, and then run the initcode @@ -184,9 +197,9 @@ contract Router { _transferOut(nextAddress, transactions[i].coin, transactions[i].value); // Perform the calls with a set gas budget - (uint32 gas, bytes memory code) = abi.decode(transactions[i].destination, (uint32, bytes)); - address(this).call{ gas: gas }( - abi.encodeWithSelector(Router.arbitaryCallOut.selector, code) + (CodeDestination memory destination) = abi.decode(transactions[i].destination, (CodeDestination)); + address(this).call{ gas: destination.gas }( + abi.encodeWithSelector(Router.arbitaryCallOut.selector, destination.code) ); } } diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs new file mode 100644 index 000000000..4e4abec84 --- /dev/null +++ b/processor/ethereum/router/src/lib.rs @@ -0,0 +1,582 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::{sync::Arc, io, collections::HashSet}; + +use group::ff::PrimeField; + +/* +use k256::{ + elliptic_curve::{group::GroupEncoding, sec1}, + ProjectivePoint, +}; +*/ + +use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; +use alloy_consensus::TxLegacy; + +use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; + +use alloy_rpc_types_eth::Filter; +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use ethereum_schnorr::{PublicKey, Signature}; +use ethereum_deployer::Deployer; +use erc20::Transfer; + +use serai_client::{primitives::Amount, networks::ethereum::Address as SeraiAddress}; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod _abi { + include!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-router/router.rs")); +} +use _abi::Router as abi; +use abi::{ + SeraiKeyUpdated as SeraiKeyUpdatedEvent, InInstruction as InInstructionEvent, + Executed as ExecutedEvent, +}; + +impl From<&Signature> for abi::Signature { + fn from(signature: &Signature) -> Self { + Self { + c: <[u8; 32]>::from(signature.c().to_repr()).into(), + s: <[u8; 32]>::from(signature.s().to_repr()).into(), + } + } +} + +/// A coin on Ethereum. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Coin { + /// Ether, the native coin of Ethereum. + Ether, + /// An ERC20 token. + Erc20([u8; 20]), +} + +impl Coin { + /// Read a `Coin`. + pub fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + Ok(match kind[0] { + 0 => Coin::Ether, + 1 => { + let mut address = [0; 20]; + reader.read_exact(&mut address)?; + Coin::Erc20(address) + } + _ => Err(io::Error::other("unrecognized Coin type"))?, + }) + } + + /// Write the `Coin`. + pub fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Coin::Ether => writer.write_all(&[0]), + Coin::Erc20(token) => { + writer.write_all(&[1])?; + writer.write_all(token) + } + } + } +} + +/// An InInstruction from the Router. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct InInstruction { + /// The ID for this `InInstruction`. + pub id: ([u8; 32], u64), + /// The address which transferred these coins to Serai. + pub from: [u8; 20], + /// The coin transferred. + pub coin: Coin, + /// The amount transferred. + pub amount: U256, + /// The data associated with the transfer. + pub data: Vec, +} + +impl InInstruction { + /// Read an `InInstruction`. + pub fn read(reader: &mut R) -> io::Result { + let id = { + let mut id_hash = [0; 32]; + reader.read_exact(&mut id_hash)?; + let mut id_pos = [0; 8]; + reader.read_exact(&mut id_pos)?; + let id_pos = u64::from_le_bytes(id_pos); + (id_hash, id_pos) + }; + + let mut from = [0; 20]; + reader.read_exact(&mut from)?; + + let coin = Coin::read(reader)?; + let mut amount = [0; 32]; + reader.read_exact(&mut amount)?; + let amount = U256::from_le_slice(&amount); + + let mut data_len = [0; 4]; + reader.read_exact(&mut data_len)?; + let data_len = usize::try_from(u32::from_le_bytes(data_len)) + .map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?; + let mut data = vec![0; data_len]; + reader.read_exact(&mut data)?; + + Ok(InInstruction { id, from, coin, amount, data }) + } + + /// Write the `InInstruction`. + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.id.0)?; + writer.write_all(&self.id.1.to_le_bytes())?; + + writer.write_all(&self.from)?; + + self.coin.write(writer)?; + writer.write_all(&self.amount.as_le_bytes())?; + + writer.write_all( + &u32::try_from(self.data.len()) + .map_err(|_| { + io::Error::other("InInstruction being written had data exceeding 2**32 in length") + })? + .to_le_bytes(), + )?; + writer.write_all(&self.data) + } +} + +/// Executed an command. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Executed { + /// Set a new key. + SetKey { + /// The nonce this was done with. + nonce: u64, + /// The key set. + key: [u8; 32], + }, + /// Executed Batch. + Batch { + /// The nonce this was done with. + nonce: u64, + /// The hash of the signed message for the Batch executed. + message_hash: [u8; 32], + }, +} + +impl Executed { + /// The nonce consumed by this executed event. + pub fn nonce(&self) -> u64 { + match self { + Executed::SetKey { nonce, .. } | Executed::Batch { nonce, .. } => *nonce, + } + } +} + +/// A view of the Router for Serai. +#[derive(Clone, Debug)] +pub struct Router(Arc>, Address); +impl Router { + pub(crate) fn code() -> Vec { + const BYTECODE: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-router/Router.bin")); + Bytes::from_hex(BYTECODE).expect("compiled-in Router bytecode wasn't valid hex").to_vec() + } + + pub(crate) fn init_code(key: &PublicKey) -> Vec { + let mut bytecode = Self::code(); + // Append the constructor arguments + bytecode.extend((abi::constructorCall { initialSeraiKey: key.eth_repr().into() }).abi_encode()); + bytecode + } + + /// Create a new view of the Router. + /// + /// This performs an on-chain lookup for the first deployed Router constructed with this public + /// key. This lookup is of a constant amount of calls and does not read any logs. + pub async fn new( + provider: Arc>, + initial_serai_key: &PublicKey, + ) -> Result, RpcError> { + let Some(deployer) = Deployer::new(provider.clone()).await? else { + return Ok(None); + }; + let Some(deployment) = deployer + .find_deployment(ethereum_primitives::keccak256(Self::init_code(initial_serai_key))) + .await? + else { + return Ok(None); + }; + Ok(Some(Self(provider, deployment))) + } + + /// The address of the router. + pub fn address(&self) -> Address { + self.1 + } + + /// Construct a transaction to update the key representing Serai. + pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { + // TODO: Set a more accurate gas + TxLegacy { + to: TxKind::Call(self.1), + input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into())) + .abi_encode() + .into(), + gas_limit: 100_000, + ..Default::default() + } + } + + /// Construct a transaction to execute a batch of `OutInstruction`s. + pub fn execute(&self, outs: &[(SeraiAddress, (Coin, Amount))], sig: &Signature) -> TxLegacy { + TxLegacy { + to: TxKind::Call(self.1), + input: abi::executeCall::new(( + outs + .iter() + .map(|(address, (coin, amount))| { + #[allow(non_snake_case)] + let (destinationType, destination) = match address { + SeraiAddress::Address(address) => ( + abi::DestinationType::Address, + (abi::AddressDestination { destination: Address::from(address) }).abi_encode(), + ), + SeraiAddress::Contract(contract) => ( + abi::DestinationType::Code, + (abi::CodeDestination { + gas: contract.gas(), + code: contract.code().to_vec().into(), + }) + .abi_encode(), + ), + }; + abi::OutInstruction { + destinationType, + destination: destination.into(), + coin: match coin { + Coin::Ether => [0; 20].into(), + Coin::Erc20(address) => address.into(), + }, + value: amount.0.try_into().expect("couldn't convert u64 to u256"), + } + }) + .collect(), + sig.into(), + )) + .abi_encode() + .into(), + // TODO + gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()), + ..Default::default() + } + } + + /* + /// Get the key for Serai at the specified block. + #[cfg(test)] + pub async fn serai_key(&self, at: [u8; 32]) -> Result> { + let call = TransactionRequest::default() + .to(self.1) + .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); + let bytes = self + .0 + .call(&call) + .block(BlockId::Hash(B256::from(at).into())) + .await + ?; + let res = + abi::seraiKeyCall::abi_decode_returns(&bytes, true)?; + PublicKey::from_eth_repr(res._0.0).ok_or_else(|| TransportErrorKind::Custom( + "TODO".to_string().into())) + } + */ + + /* + /// Get the message to be signed in order to update the key for Serai. + pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec { + let mut buffer = b"updateSeraiKey".to_vec(); + buffer.extend(&chain_id.to_be_bytes::<32>()); + buffer.extend(&nonce.to_be_bytes::<32>()); + buffer.extend(&key.eth_repr()); + buffer + } + */ + + /* + /// Get the current nonce for the published batches. + #[cfg(test)] + pub async fn nonce(&self, at: [u8; 32]) -> Result> { + let call = TransactionRequest::default() + .to(self.1) + .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); + let bytes = self + .0 + .call(&call) + .block(BlockId::Hash(B256::from(at).into())) + .await + ?; + let res = + abi::nonceCall::abi_decode_returns(&bytes, true)?; + Ok(res._0) + } + */ + + /* + /// Get the message to be signed in order to update the key for Serai. + pub(crate) fn execute_message( + chain_id: U256, + nonce: U256, + outs: Vec, + ) -> Vec { + ("execute".to_string(), chain_id, nonce, outs).abi_encode_params() + } + */ + + /// Fetch the `InInstruction`s emitted by the Router from this block. + pub async fn in_instructions( + &self, + block: u64, + allowed_tokens: &HashSet<[u8; 20]>, + ) -> Result, RpcError> { + // The InInstruction events for this block + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await?; + + /* + We check that for all InInstructions for ERC20s emitted, a corresponding transfer occurred. + In order to prevent a transfer from being used to justify multiple distinct InInstructions, + we insert the transfer's log index into this HashSet. + */ + let mut transfer_check = HashSet::new(); + + let mut in_instructions = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(TransportErrorKind::Custom( + "node returned a log from a different address than requested".to_string().into(), + ))?; + } + + let id = ( + log + .block_hash + .ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its block hash set".to_string().into()) + })? + .into(), + log.log_index.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its index set".to_string().into()) + })?, + ); + + let tx_hash = log.transaction_hash.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its transaction hash set".to_string().into()) + })?; + let tx = self.0.get_transaction_by_hash(tx_hash).await?.ok_or_else(|| { + TransportErrorKind::Custom( + "node didn't have a transaction it had the logs of".to_string().into(), + ) + })?; + + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom( + format!("filtered to InInstructionEvent yet couldn't decode log: {e:?}").into(), + ) + })? + .inner + .data; + + let coin = if log.coin.0 == [0; 20] { + Coin::Ether + } else { + let token = *log.coin.0; + + if !allowed_tokens.contains(&token) { + continue; + } + + /* + If this also counts as a top-level transfer of a token, drop it. + + This event will only exist if there's an ERC20 which has some form of programmability + (`onTransferFrom`), and when a top-level transfer was made, that hook made its own call + into the Serai router. + + If such an ERC20 exists, Serai would parse it as a top-level transfer and as a router + InInstruction. While no such ERC20 is planned to be integrated, this enures we don't + allow a double-spend on that premise. + + TODO: See below note. + */ + if tx.to == Some(token.into()) { + continue; + } + + // Get all logs for this TX + let receipt = self.0.get_transaction_receipt(tx_hash).await?.ok_or_else(|| { + TransportErrorKind::Custom( + "node didn't have the receipt for a transaction it had".to_string().into(), + ) + })?; + let tx_logs = receipt.inner.logs(); + + /* + TODO: If this is also a top-level transfer, drop the log from the top-level transfer and + only iterate over the rest of the logs. + */ + + // Find a matching transfer log + let mut found_transfer = false; + for tx_log in tx_logs { + let log_index = tx_log.log_index.ok_or_else(|| { + TransportErrorKind::Custom( + "log in transaction receipt didn't have its log index set".to_string().into(), + ) + })?; + // Ensure we didn't already use this transfer to check a distinct InInstruction event + if transfer_check.contains(&log_index) { + continue; + } + + // Check if this log is from the token we expected to be transferred + if tx_log.address().0 != token { + continue; + } + // Check if this is a transfer log + // https://github.com/alloy-rs/core/issues/589 + if tx_log.topics()[0] != Transfer::SIGNATURE_HASH { + continue; + } + let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; + // Check if this is a transfer to us for the expected amount + if (transfer.to == self.1) && (transfer.value == log.amount) { + transfer_check.insert(log_index); + found_transfer = true; + break; + } + } + if !found_transfer { + // This shouldn't be a simple error + // This is an exploit, a non-conforming ERC20, or a malicious connection + // This should halt the process. While this is sufficient, it's sub-optimal + // TODO + Err(TransportErrorKind::Custom( + "ERC20 InInstruction with no matching transfer log".to_string().into(), + ))?; + } + + Coin::Erc20(token) + }; + + in_instructions.push(InInstruction { + id, + from: *log.from.0, + coin, + amount: log.amount, + data: log.instruction.as_ref().to_vec(), + }); + } + + Ok(in_instructions) + } + + /// Fetch the executed actions from this block. + pub async fn executed(&self, block: u64) -> Result, RpcError> { + let mut res = vec![]; + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(SeraiKeyUpdatedEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(TransportErrorKind::Custom( + "node returned a log from a different address than requested".to_string().into(), + ))?; + } + + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom( + format!("filtered to SeraiKeyUpdatedEvent yet couldn't decode log: {e:?}").into(), + ) + })? + .inner + .data; + + res.push(Executed::SetKey { + nonce: log.nonce.try_into().map_err(|e| { + TransportErrorKind::Custom(format!("filtered to convert nonce to u64: {e:?}").into()) + })?, + key: log.key.into(), + }); + } + } + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(TransportErrorKind::Custom( + "node returned a log from a different address than requested".to_string().into(), + ))?; + } + + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom( + format!("filtered to ExecutedEvent yet couldn't decode log: {e:?}").into(), + ) + })? + .inner + .data; + + res.push(Executed::Batch { + nonce: log.nonce.try_into().map_err(|e| { + TransportErrorKind::Custom(format!("filtered to convert nonce to u64: {e:?}").into()) + })?, + message_hash: log.message_hash.into(), + }); + } + } + + res.sort_by_key(Executed::nonce); + + Ok(res) + } + + /* + #[cfg(feature = "tests")] + pub fn key_updated_filter(&self) -> Filter { + Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH) + } + #[cfg(feature = "tests")] + pub fn executed_filter(&self) -> Filter { + Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH) + } + */ +} diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index 33bfabf9a..f59c70feb 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -24,7 +24,7 @@ bitvec = { version = "1", default-features = false, features = ["alloc", "serde" hex = "0.4" scale = { package = "parity-scale-codec", version = "3" } -borsh = { version = "1" } +borsh = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"], optional = true } serde_json = { version = "1", optional = true } diff --git a/substrate/client/src/networks/ethereum.rs b/substrate/client/src/networks/ethereum.rs index 28ada6356..ddf15480c 100644 --- a/substrate/client/src/networks/ethereum.rs +++ b/substrate/client/src/networks/ethereum.rs @@ -29,6 +29,13 @@ impl ContractDeployment { } Some(Self { gas, code }) } + + pub fn gas(&self) -> u32 { + self.gas + } + pub fn code(&self) -> &[u8] { + &self.code + } } /// A representation of an Ethereum address. From d74e6e70f0f994007b73a38c881f4072df8dec17 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 17 Sep 2024 01:04:22 -0400 Subject: [PATCH 144/179] Hash the message before the challenge function in the Schnorr contract Slightly more efficient. --- networks/ethereum/schnorr/contracts/Schnorr.sol | 2 +- networks/ethereum/schnorr/contracts/tests/Schnorr.sol | 2 +- networks/ethereum/schnorr/src/signature.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/networks/ethereum/schnorr/contracts/Schnorr.sol b/networks/ethereum/schnorr/contracts/Schnorr.sol index 69dc208a2..247e0fbe9 100644 --- a/networks/ethereum/schnorr/contracts/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/Schnorr.sol @@ -15,7 +15,7 @@ library Schnorr { // message := the message signed // c := Schnorr signature challenge // s := Schnorr signature solution - function verify(bytes32 px, bytes memory message, bytes32 c, bytes32 s) + function verify(bytes32 px, bytes32 message, bytes32 c, bytes32 s) internal pure returns (bool) diff --git a/networks/ethereum/schnorr/contracts/tests/Schnorr.sol b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol index 11a3c3bc9..412786a33 100644 --- a/networks/ethereum/schnorr/contracts/tests/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol @@ -9,6 +9,6 @@ contract TestSchnorr { pure returns (bool) { - return Schnorr.verify(public_key, message, c, s); + return Schnorr.verify(public_key, keccak256(message), c, s); } } diff --git a/networks/ethereum/schnorr/src/signature.rs b/networks/ethereum/schnorr/src/signature.rs index cd467cea6..1af1d60f8 100644 --- a/networks/ethereum/schnorr/src/signature.rs +++ b/networks/ethereum/schnorr/src/signature.rs @@ -38,7 +38,7 @@ impl Signature { &Keccak256::digest(x_and_y_coordinates)[12 ..] }); hash.update(key.eth_repr()); - hash.update(message); + hash.update(Keccak256::digest(message)); >::reduce_bytes(&hash.finalize()) } From 6057a9da8a614458963f360ae6eefb6a335d0792 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 17 Sep 2024 01:05:31 -0400 Subject: [PATCH 145/179] Don't have the Deployer store the deployment block Also updates how re-entrancy is handled to a more efficient and portable mechanism. --- .../ethereum/deployer/contracts/Deployer.sol | 45 +++++-------------- processor/ethereum/deployer/src/lib.rs | 17 ++++--- 2 files changed, 19 insertions(+), 43 deletions(-) diff --git a/processor/ethereum/deployer/contracts/Deployer.sol b/processor/ethereum/deployer/contracts/Deployer.sol index ad217fdc5..2d4904e40 100644 --- a/processor/ethereum/deployer/contracts/Deployer.sol +++ b/processor/ethereum/deployer/contracts/Deployer.sol @@ -34,41 +34,12 @@ pragma solidity ^0.8.26; */ contract Deployer { - struct Deployment { - uint64 block_number; - address created_contract; - } - - mapping(bytes32 => Deployment) public deployments; + mapping(bytes32 => address) public deployments; - error Reentrancy(); error PriorDeployed(); error DeploymentFailed(); function deploy(bytes memory init_code) external { - // Prevent re-entrancy - // If we did allow it, one could deploy the same contract multiple times (with one overwriting - // the other's set value in storage) - bool called; - // This contract doesn't have any other use of transient storage, nor is to be inherited, making - // this usage of the zero address safe - assembly { - called := tload(0) - } - if (called) { - revert Reentrancy(); - } - assembly { - tstore(0, 1) - } - - // Check this wasn't prior deployed - bytes32 init_code_hash = keccak256(init_code); - Deployment memory deployment = deployments[init_code_hash]; - if (deployment.created_contract == address(0)) { - revert PriorDeployed(); - } - // Deploy the contract address created_contract; assembly { @@ -78,9 +49,15 @@ contract Deployer { revert DeploymentFailed(); } - // Set the dpeloyment to storage - deployment.block_number = uint64(block.number); - deployment.created_contract = created_contract; - deployments[init_code_hash] = deployment; + bytes32 init_code_hash = keccak256(init_code); + + // Check this wasn't prior deployed + // We check this *after* deploymeing (in violation of CEI) to handle re-entrancy related bugs + if (deployments[init_code_hash] != address(0)) { + revert PriorDeployed(); + } + + // Write the deployment to storage + deployments[init_code_hash] = created_contract; } } diff --git a/processor/ethereum/deployer/src/lib.rs b/processor/ethereum/deployer/src/lib.rs index bf2d1a9cf..6fa59ee3c 100644 --- a/processor/ethereum/deployer/src/lib.rs +++ b/processor/ethereum/deployer/src/lib.rs @@ -30,7 +30,7 @@ mod abi { /// compatible chain. It then supports retrieving the Router contract's address (which isn't /// deterministic) using a single call. #[derive(Clone, Debug)] -pub struct Deployer; +pub struct Deployer(Arc>); impl Deployer { /// Obtain the transaction to deploy this contract, already signed. /// @@ -38,8 +38,8 @@ impl Deployer { /// funded for this transaction to be submitted. This account has no known private key to anyone /// so ETH sent can be neither misappropriated nor returned. pub fn deployment_tx() -> Signed { - pub const BYTECODE: &str = - include_str!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-deployer/Deployer.bin")); + pub const BYTECODE: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-deployer/Deployer.bin")); let bytecode = Bytes::from_hex(BYTECODE).expect("compiled-in Deployer bytecode wasn't valid hex"); @@ -75,28 +75,27 @@ impl Deployer { if code.is_empty() { return Ok(None); } - Ok(Some(Self)) + Ok(Some(Self(provider))) } /// Find the deployment of a contract. pub async fn find_deployment( &self, - provider: Arc>, init_code_hash: [u8; 32], - ) -> Result, RpcError> { + ) -> Result, RpcError> { let call = TransactionRequest::default().to(Self::address()).input(TransactionInput::new( abi::Deployer::deploymentsCall::new((init_code_hash.into(),)).abi_encode().into(), )); - let bytes = provider.call(&call).await?; + let bytes = self.0.call(&call).await?; let deployment = abi::Deployer::deploymentsCall::abi_decode_returns(&bytes, true) .map_err(|e| { TransportErrorKind::Custom( - format!("node returned a non-Deployment for function returning Deployment: {e:?}").into(), + format!("node returned a non-address for function returning address: {e:?}").into(), ) })? ._0; - if deployment.created_contract == [0; 20] { + if **deployment == [0; 20] { return Ok(None); } Ok(Some(deployment)) From 6692679720524a0290784c04521e0ff26144a171 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 17 Sep 2024 01:07:08 -0400 Subject: [PATCH 146/179] Trim dead code --- processor/ethereum/contracts/src/lib.rs | 4 +- processor/ethereum/erc20/src/lib.rs | 7 +- .../ethereum/ethereum-serai/src/crypto.rs | 42 ------- .../ethereum/ethereum-serai/src/deployer.rs | 113 ------------------ 4 files changed, 6 insertions(+), 160 deletions(-) delete mode 100644 processor/ethereum/ethereum-serai/src/deployer.rs diff --git a/processor/ethereum/contracts/src/lib.rs b/processor/ethereum/contracts/src/lib.rs index d0a5c076e..9087eaed2 100644 --- a/processor/ethereum/contracts/src/lib.rs +++ b/processor/ethereum/contracts/src/lib.rs @@ -10,7 +10,7 @@ pub mod erc20 { pub use super::abigen::erc20::IERC20::*; } pub mod router { - pub const BYTECODE: &str = - include_str!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-contracts/Router.bin")); + pub const BYTECODE: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-contracts/Router.bin")); pub use super::abigen::router::Router::*; } diff --git a/processor/ethereum/erc20/src/lib.rs b/processor/ethereum/erc20/src/lib.rs index 560ea86c1..51f68d0ec 100644 --- a/processor/ethereum/erc20/src/lib.rs +++ b/processor/ethereum/erc20/src/lib.rs @@ -22,7 +22,8 @@ use alloy_provider::{Provider, RootProvider}; mod abi { alloy_sol_macro::sol!("contracts/IERC20.sol"); } -use abi::IERC20::{IERC20Calls, Transfer, transferCall, transferFromCall}; +use abi::IERC20::{IERC20Calls, transferCall, transferFromCall}; +pub use abi::IERC20::Transfer; /// A top-level ERC20 transfer #[derive(Clone, Debug)] @@ -50,12 +51,12 @@ impl Erc20 { pub async fn top_level_transfers( &self, block: u64, - to: [u8; 20], + to: Address, ) -> Result, RpcError> { let filter = Filter::new().from_block(block).to_block(block).address(self.1); let filter = filter.event_signature(Transfer::SIGNATURE_HASH); let mut to_topic = [0; 32]; - to_topic[12 ..].copy_from_slice(&to); + to_topic[12 ..].copy_from_slice(to.as_ref()); let filter = filter.topic2(B256::from(to_topic)); let logs = self.0.get_logs(&filter).await?; diff --git a/processor/ethereum/ethereum-serai/src/crypto.rs b/processor/ethereum/ethereum-serai/src/crypto.rs index fc51ae6bc..3b9dc58a1 100644 --- a/processor/ethereum/ethereum-serai/src/crypto.rs +++ b/processor/ethereum/ethereum-serai/src/crypto.rs @@ -18,48 +18,6 @@ pub use ethereum_schnorr_contract::*; use alloy_core::primitives::{Parity, Signature as AlloySignature, Address}; use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; -pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { - alloy_core::primitives::keccak256(data).into() -} - -pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar { - >::reduce_bytes(&keccak256(data).into()) -} - -pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] { - let encoded_point = point.to_encoded_point(false); - **Address::from_raw_public_key(&encoded_point.as_ref()[1 .. 65]) -} - -/// Deterministically sign a transaction. -/// -/// This function panics if passed a transaction with a non-None chain ID. -pub fn deterministically_sign(tx: &TxLegacy) -> Signed { - assert!( - tx.chain_id.is_none(), - "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" - ); - - let sig_hash = tx.signature_hash().0; - let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat()); - let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat()); - loop { - let r_bytes: [u8; 32] = r.to_repr().into(); - let s_bytes: [u8; 32] = s.to_repr().into(); - let v = Parity::NonEip155(false); - let signature = - AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap(); - let tx = tx.clone().into_signed(signature); - if tx.recover_signer().is_ok() { - return tx; - } - - // Re-hash until valid - r = hash_to_scalar(r_bytes.as_ref()); - s = hash_to_scalar(s_bytes.as_ref()); - } -} - /// The HRAm to use for the Schnorr Solidity library. /// /// This will panic if the public key being signed for is not representable within the Schnorr diff --git a/processor/ethereum/ethereum-serai/src/deployer.rs b/processor/ethereum/ethereum-serai/src/deployer.rs deleted file mode 100644 index 88f4a5fb9..000000000 --- a/processor/ethereum/ethereum-serai/src/deployer.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::sync::Arc; - -use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind}; -use alloy_consensus::{Signed, TxLegacy}; - -use alloy_sol_types::{SolCall, SolEvent}; - -use alloy_rpc_types_eth::{BlockNumberOrTag, Filter}; -use alloy_simple_request_transport::SimpleRequest; -use alloy_provider::{Provider, RootProvider}; - -use crate::{ - Error, - crypto::{self, keccak256, PublicKey}, - router::Router, -}; -pub use crate::abi::deployer as abi; - -/// The Deployer contract for the Router contract. -/// -/// This Deployer has a deterministic address, letting it be immediately identified on any -/// compatible chain. It then supports retrieving the Router contract's address (which isn't -/// deterministic) using a single log query. -#[derive(Clone, Debug)] -pub struct Deployer; -impl Deployer { - /// Obtain the transaction to deploy this contract, already signed. - /// - /// The account this transaction is sent from (which is populated in `from`) must be sufficiently - /// funded for this transaction to be submitted. This account has no known private key to anyone, - /// so ETH sent can be neither misappropriated nor returned. - pub fn deployment_tx() -> Signed { - let bytecode = contracts::deployer::BYTECODE; - let bytecode = - Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex"); - - let tx = TxLegacy { - chain_id: None, - nonce: 0, - gas_price: 100_000_000_000u128, - // TODO: Use a more accurate gas limit - gas_limit: 1_000_000u128, - to: TxKind::Create, - value: U256::ZERO, - input: bytecode, - }; - - crypto::deterministically_sign(&tx) - } - - /// Obtain the deterministic address for this contract. - pub fn address() -> [u8; 20] { - let deployer_deployer = - Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); - **Address::create(&deployer_deployer, 0) - } - - /// Construct a new view of the `Deployer`. - pub async fn new(provider: Arc>) -> Result, Error> { - let address = Self::address(); - let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?; - // Contract has yet to be deployed - if code.is_empty() { - return Ok(None); - } - Ok(Some(Self)) - } - - /// Yield the `ContractCall` necessary to deploy the Router. - pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy { - TxLegacy { - to: TxKind::Call(Self::address().into()), - input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(), - gas_limit: 1_000_000, - ..Default::default() - } - } - - /// Find the first Router deployed with the specified key as its first key. - /// - /// This is the Router Serai will use, and is the only way to construct a `Router`. - pub async fn find_router( - &self, - provider: Arc>, - key: &PublicKey, - ) -> Result, Error> { - let init_code = Router::init_code(key); - let init_code_hash = keccak256(&init_code); - - #[cfg(not(test))] - let to_block = BlockNumberOrTag::Finalized; - #[cfg(test)] - let to_block = BlockNumberOrTag::Latest; - - // Find the first log using this init code (where the init code is binding to the key) - // TODO: Make an abstraction for event filtering (de-duplicating common code) - let filter = - Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address())); - let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH); - let filter = filter.topic1(B256::from(init_code_hash)); - let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - let Some(first_log) = logs.first() else { return Ok(None) }; - let router = first_log - .log_decode::() - .map_err(|_| Error::ConnectionError)? - .inner - .data - .created; - - Ok(Some(Router::new(provider, router))) - } -} From b1b7b6d7f30f933ef29cd8a95fe6826b6341e8bf Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 17 Sep 2024 01:26:37 -0400 Subject: [PATCH 147/179] Add calls to get the messages to sign for the router --- .../ethereum/router/contracts/Router.sol | 2 + processor/ethereum/router/src/lib.rs | 169 ++++++------------ 2 files changed, 61 insertions(+), 110 deletions(-) diff --git a/processor/ethereum/router/contracts/Router.sol b/processor/ethereum/router/contracts/Router.sol index e5a5c53fc..bc0debde5 100644 --- a/processor/ethereum/router/contracts/Router.sol +++ b/processor/ethereum/router/contracts/Router.sol @@ -77,6 +77,8 @@ contract Router { external _updateSeraiKeyAtEndOfFn(_nonce, newSeraiKey) { + // This DST needs a length prefix as well to prevent DSTs potentially being substrings of each + // other, yet this fine for our very well-defined, limited use bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", block.chainid, _nonce, newSeraiKey)); _nonce++; diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs index 4e4abec84..ef1dfd000 100644 --- a/processor/ethereum/router/src/lib.rs +++ b/processor/ethereum/router/src/lib.rs @@ -156,6 +156,42 @@ impl InInstruction { } } +/// A list of `OutInstruction`s. +#[derive(Clone)] +pub struct OutInstructions(Vec); +impl From<&[(SeraiAddress, (Coin, Amount))]> for OutInstructions { + fn from(outs: &[(SeraiAddress, (Coin, Amount))]) -> Self { + Self( + outs + .iter() + .map(|(address, (coin, amount))| { + #[allow(non_snake_case)] + let (destinationType, destination) = match address { + SeraiAddress::Address(address) => ( + abi::DestinationType::Address, + (abi::AddressDestination { destination: Address::from(address) }).abi_encode(), + ), + SeraiAddress::Contract(contract) => ( + abi::DestinationType::Code, + (abi::CodeDestination { gas: contract.gas(), code: contract.code().to_vec().into() }) + .abi_encode(), + ), + }; + abi::OutInstruction { + destinationType, + destination: destination.into(), + coin: match coin { + Coin::Ether => [0; 20].into(), + Coin::Erc20(address) => address.into(), + }, + value: amount.0.try_into().expect("couldn't convert u64 to u256"), + } + }) + .collect(), + ) + } +} + /// Executed an command. #[derive(Clone, PartialEq, Eq, Debug)] pub enum Executed { @@ -188,13 +224,13 @@ impl Executed { #[derive(Clone, Debug)] pub struct Router(Arc>, Address); impl Router { - pub(crate) fn code() -> Vec { + fn code() -> Vec { const BYTECODE: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-router/Router.bin")); Bytes::from_hex(BYTECODE).expect("compiled-in Router bytecode wasn't valid hex").to_vec() } - pub(crate) fn init_code(key: &PublicKey) -> Vec { + fn init_code(key: &PublicKey) -> Vec { let mut bytecode = Self::code(); // Append the constructor arguments bytecode.extend((abi::constructorCall { initialSeraiKey: key.eth_repr().into() }).abi_encode()); @@ -226,6 +262,17 @@ impl Router { self.1 } + /// Get the message to be signed in order to update the key for Serai. + pub fn update_serai_key_message(chain_id: U256, nonce: u64, key: &PublicKey) -> Vec { + ( + "updateSeraiKey", + chain_id, + U256::try_from(nonce).expect("couldn't convert u64 to u256"), + key.eth_repr(), + ) + .abi_encode_packed() + } + /// Construct a transaction to update the key representing Serai. pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { // TODO: Set a more accurate gas @@ -239,111 +286,24 @@ impl Router { } } + /// Get the message to be signed in order to execute a series of `OutInstruction`s. + pub fn execute_message(chain_id: U256, nonce: u64, outs: OutInstructions) -> Vec { + ("execute", chain_id, U256::try_from(nonce).expect("couldn't convert u64 to u256"), outs.0) + .abi_encode() + } + /// Construct a transaction to execute a batch of `OutInstruction`s. - pub fn execute(&self, outs: &[(SeraiAddress, (Coin, Amount))], sig: &Signature) -> TxLegacy { + pub fn execute(&self, outs: OutInstructions, sig: &Signature) -> TxLegacy { + let outs_len = outs.0.len(); TxLegacy { to: TxKind::Call(self.1), - input: abi::executeCall::new(( - outs - .iter() - .map(|(address, (coin, amount))| { - #[allow(non_snake_case)] - let (destinationType, destination) = match address { - SeraiAddress::Address(address) => ( - abi::DestinationType::Address, - (abi::AddressDestination { destination: Address::from(address) }).abi_encode(), - ), - SeraiAddress::Contract(contract) => ( - abi::DestinationType::Code, - (abi::CodeDestination { - gas: contract.gas(), - code: contract.code().to_vec().into(), - }) - .abi_encode(), - ), - }; - abi::OutInstruction { - destinationType, - destination: destination.into(), - coin: match coin { - Coin::Ether => [0; 20].into(), - Coin::Erc20(address) => address.into(), - }, - value: amount.0.try_into().expect("couldn't convert u64 to u256"), - } - }) - .collect(), - sig.into(), - )) - .abi_encode() - .into(), + input: abi::executeCall::new((outs.0, sig.into())).abi_encode().into(), // TODO - gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()), + gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs_len).unwrap()), ..Default::default() } } - /* - /// Get the key for Serai at the specified block. - #[cfg(test)] - pub async fn serai_key(&self, at: [u8; 32]) -> Result> { - let call = TransactionRequest::default() - .to(self.1) - .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); - let bytes = self - .0 - .call(&call) - .block(BlockId::Hash(B256::from(at).into())) - .await - ?; - let res = - abi::seraiKeyCall::abi_decode_returns(&bytes, true)?; - PublicKey::from_eth_repr(res._0.0).ok_or_else(|| TransportErrorKind::Custom( - "TODO".to_string().into())) - } - */ - - /* - /// Get the message to be signed in order to update the key for Serai. - pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec { - let mut buffer = b"updateSeraiKey".to_vec(); - buffer.extend(&chain_id.to_be_bytes::<32>()); - buffer.extend(&nonce.to_be_bytes::<32>()); - buffer.extend(&key.eth_repr()); - buffer - } - */ - - /* - /// Get the current nonce for the published batches. - #[cfg(test)] - pub async fn nonce(&self, at: [u8; 32]) -> Result> { - let call = TransactionRequest::default() - .to(self.1) - .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); - let bytes = self - .0 - .call(&call) - .block(BlockId::Hash(B256::from(at).into())) - .await - ?; - let res = - abi::nonceCall::abi_decode_returns(&bytes, true)?; - Ok(res._0) - } - */ - - /* - /// Get the message to be signed in order to update the key for Serai. - pub(crate) fn execute_message( - chain_id: U256, - nonce: U256, - outs: Vec, - ) -> Vec { - ("execute".to_string(), chain_id, nonce, outs).abi_encode_params() - } - */ - /// Fetch the `InInstruction`s emitted by the Router from this block. pub async fn in_instructions( &self, @@ -568,15 +528,4 @@ impl Router { Ok(res) } - - /* - #[cfg(feature = "tests")] - pub fn key_updated_filter(&self) -> Filter { - Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH) - } - #[cfg(feature = "tests")] - pub fn executed_filter(&self) -> Filter { - Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH) - } - */ } From 3f78af1c9156fc3e3248efc5dad9e44937282fef Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 17 Sep 2024 02:59:01 -0400 Subject: [PATCH 148/179] Don't have the router drop transactions which may have top-level transfers The router will now match the top-level transfer so it isn't used as the justification for the InInstruction it's handling. This allows the theoretical case where a top-level transfer occurs (to any entity) and an internal call performs a transfer to Serai. Also uses a JoinSet for fetching transactions' top-level transfers in the ERC20 crate. This does add a dependency on tokio yet improves performance, and it's scoped under serai-processor (which is always presumed to be tokio-based). While we could instead import futures for join_all, https://github.com/smol-rs/futures-lite/issues/6 summarizes why that wouldn't be a good idea. While we could prefer async-executor over tokio's JoinSet, JoinSet doesn't share the same issues as FuturesUnordered. That means our question is solely if we want the async-executor executor or the tokio executor, when we've already established the Serai processor is always presumed to be tokio-based. --- Cargo.lock | 1 + processor/ethereum/erc20/Cargo.toml | 2 + processor/ethereum/erc20/src/lib.rs | 213 +++++++++++++++++---------- processor/ethereum/router/src/lib.rs | 36 ++--- 4 files changed, 152 insertions(+), 100 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 981275fba..f928e57e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8748,6 +8748,7 @@ dependencies = [ "alloy-sol-macro", "alloy-sol-types", "alloy-transport", + "tokio", ] [[package]] diff --git a/processor/ethereum/erc20/Cargo.toml b/processor/ethereum/erc20/Cargo.toml index 85bc83c32..3c7f51017 100644 --- a/processor/ethereum/erc20/Cargo.toml +++ b/processor/ethereum/erc20/Cargo.toml @@ -26,3 +26,5 @@ alloy-rpc-types-eth = { version = "0.3", default-features = false } alloy-transport = { version = "0.3", default-features = false } alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-provider = { version = "0.3", default-features = false } + +tokio = { version = "1", default-features = false, features = ["rt"] } diff --git a/processor/ethereum/erc20/src/lib.rs b/processor/ethereum/erc20/src/lib.rs index 51f68d0ec..920915e93 100644 --- a/processor/ethereum/erc20/src/lib.rs +++ b/processor/ethereum/erc20/src/lib.rs @@ -13,6 +13,8 @@ use alloy_transport::{TransportErrorKind, RpcError}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; +use tokio::task::JoinSet; + #[rustfmt::skip] #[expect(warnings)] #[expect(needless_pass_by_value)] @@ -27,7 +29,7 @@ pub use abi::IERC20::Transfer; /// A top-level ERC20 transfer #[derive(Clone, Debug)] -pub struct TopLevelErc20Transfer { +pub struct TopLevelTransfer { /// The transaction ID which effected this transfer. pub id: [u8; 32], /// The address which made the transfer. @@ -38,6 +40,14 @@ pub struct TopLevelErc20Transfer { pub data: Vec, } +/// A transaction with a top-level transfer, matched to the log index of the transfer. +pub struct MatchedTopLevelTransfer { + /// The transfer. + pub transfer: TopLevelTransfer, + /// The log index of the transfer. + pub log_index: u64, +} + /// A view for an ERC20 contract. #[derive(Clone, Debug)] pub struct Erc20(Arc>, Address); @@ -47,12 +57,104 @@ impl Erc20 { Self(provider, Address::from(&address)) } - /// Fetch all top-level transfers to the specified ERC20. + /// Match a transaction for its top-level transfer to the specified address (if one exists). + pub async fn match_top_level_transfer( + provider: impl AsRef>, + transaction_id: B256, + to: Address, + ) -> Result, RpcError> { + // Fetch the transaction + let transaction = + provider.as_ref().get_transaction_by_hash(transaction_id).await?.ok_or_else(|| { + TransportErrorKind::Custom( + "node didn't have the transaction which emitted a log it had".to_string().into(), + ) + })?; + + // If this is a top-level call... + // Don't validate the encoding as this can't be re-encoded to an identical bytestring due + // to the `InInstruction` appended after the call itself + if let Ok(call) = IERC20Calls::abi_decode(&transaction.input, false) { + // Extract the top-level call's from/to/value + let (from, call_to, value) = match call { + IERC20Calls::transfer(transferCall { to, value }) => (transaction.from, to, value), + IERC20Calls::transferFrom(transferFromCall { from, to, value }) => (from, to, value), + // Treat any other function selectors as unrecognized + _ => return Ok(None), + }; + // If this isn't a transfer to the expected address, return None + if call_to != to { + return Ok(None); + } + + // Fetch the transaction's logs + let receipt = + provider.as_ref().get_transaction_receipt(transaction_id).await?.ok_or_else(|| { + TransportErrorKind::Custom( + "node didn't have receipt for a transaction we were matching for a top-level transfer" + .to_string() + .into(), + ) + })?; + + // Find the log for this transfer + for log in receipt.inner.logs() { + // If this log was emitted by a different contract, continue + if Some(log.address()) != transaction.to { + continue; + } + + // Check if this is actually a transfer log + // https://github.com/alloy-rs/core/issues/589 + if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) { + continue; + } + + let log_index = log.log_index.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its index set".to_string().into()) + })?; + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom(format!("failed to decode Transfer log: {e:?}").into()) + })? + .inner + .data; + + // Ensure the top-level transfer is equivalent to the transfer this log represents. Since + // we can't find the exact top-level transfer without tracing the call, we just rule the + // first equivalent transfer as THE top-level transfer + if !((log.from == from) && (log.to == to) && (log.value == value)) { + continue; + } + + // Read the data appended after + let encoded = call.abi_encode(); + let data = transaction.input.as_ref()[encoded.len() ..].to_vec(); + + return Ok(Some(MatchedTopLevelTransfer { + transfer: TopLevelTransfer { + // Since there's only one top-level transfer per TX, set the ID to the TX ID + id: *transaction_id, + from: *log.from.0, + amount: log.value, + data, + }, + log_index, + })); + } + } + + Ok(None) + } + + /// Fetch all top-level transfers to the specified address. pub async fn top_level_transfers( &self, block: u64, to: Address, - ) -> Result, RpcError> { + ) -> Result, RpcError> { + // Get all transfers within this block let filter = Filter::new().from_block(block).to_block(block).address(self.1); let filter = filter.event_signature(Transfer::SIGNATURE_HASH); let mut to_topic = [0; 32]; @@ -60,83 +162,46 @@ impl Erc20 { let filter = filter.topic2(B256::from(to_topic)); let logs = self.0.get_logs(&filter).await?; - /* - A set of all transactions we've handled a transfer from. This handles the edge case where a - top-level transfer T somehow triggers another transfer T', with equivalent contents, within - the same transaction. We only want to report one transfer as only one is top-level. - */ - let mut handled = HashSet::new(); + // These logs are for all transactions which performed any transfer + // We now check each transaction for having a top-level transfer to the specified address + let tx_ids = logs + .into_iter() + .map(|log| { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(TransportErrorKind::Custom( + "node returned logs for a different address than requested".to_string().into(), + ))?; + } - let mut top_level_transfers = vec![]; - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(TransportErrorKind::Custom( - "node returned logs for a different address than requested".to_string().into(), - ))?; - } + log.transaction_hash.ok_or_else(|| { + TransportErrorKind::Custom("log didn't specify its transaction hash".to_string().into()) + }) + }) + .collect::, _>>()?; - let tx_id = log.transaction_hash.ok_or_else(|| { - TransportErrorKind::Custom("log didn't specify its transaction hash".to_string().into()) - })?; - let tx = self.0.get_transaction_by_hash(tx_id).await?.ok_or_else(|| { - TransportErrorKind::Custom( - "node didn't have the transaction which emitted a log it had".to_string().into(), - ) - })?; + let mut join_set = JoinSet::new(); + for tx_id in tx_ids { + join_set.spawn(Self::match_top_level_transfer(self.0.clone(), tx_id, to)); + } - // If this is a top-level call... - if tx.to == Some(self.1) { - // And we recognize the call... - // Don't validate the encoding as this can't be re-encoded to an identical bytestring due - // to the InInstruction appended - if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) { - // Extract the top-level call's from/to/value - let (from, call_to, value) = match call { - IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value), - IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => { - (from, call_to, value) - } - // Treat any other function selectors as unrecognized - _ => continue, - }; - - let log = log - .log_decode::() - .map_err(|e| { - TransportErrorKind::Custom(format!("failed to decode Transfer log: {e:?}").into()) - })? - .inner - .data; - - // Ensure the top-level transfer is equivalent, and this presumably isn't a log for an - // internal transfer - if (log.from != from) || (call_to != to) || (value != log.value) { - continue; - } - - // Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's - // the only log we handle - if handled.contains(&tx_id) { - continue; - } - handled.insert(tx_id); - - // Read the data appended after - let encoded = call.abi_encode(); - let data = tx.input.as_ref()[encoded.len() ..].to_vec(); - - // Push the transfer - top_level_transfers.push(TopLevelErc20Transfer { - // Since we'll only handle one log for this TX, set the ID to the TX ID - id: *tx_id, - from: *log.from.0, - amount: log.value, - data, - }); + let mut top_level_transfers = vec![]; + while let Some(top_level_transfer) = join_set.join_next().await { + // This is an error if a task panics or aborts + // Panicking on a task panic is desired behavior, and we haven't aborted any tasks + match top_level_transfer.unwrap() { + // Top-level transfer + Ok(Some(top_level_transfer)) => top_level_transfers.push(top_level_transfer.transfer), + // Not a top-level transfer + Ok(None) => continue, + // Failed to get this transaction's information so abort + Err(e) => { + join_set.abort_all(); + Err(e)? } } } + Ok(top_level_transfers) } } diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs index ef1dfd000..18bc3d4b6 100644 --- a/processor/ethereum/router/src/lib.rs +++ b/processor/ethereum/router/src/lib.rs @@ -25,7 +25,7 @@ use alloy_provider::{Provider, RootProvider}; use ethereum_schnorr::{PublicKey, Signature}; use ethereum_deployer::Deployer; -use erc20::Transfer; +use erc20::{Transfer, Erc20}; use serai_client::{primitives::Amount, networks::ethereum::Address as SeraiAddress}; @@ -346,11 +346,6 @@ impl Router { let tx_hash = log.transaction_hash.ok_or_else(|| { TransportErrorKind::Custom("log didn't have its transaction hash set".to_string().into()) })?; - let tx = self.0.get_transaction_by_hash(tx_hash).await?.ok_or_else(|| { - TransportErrorKind::Custom( - "node didn't have a transaction it had the logs of".to_string().into(), - ) - })?; let log = log .log_decode::() @@ -371,23 +366,6 @@ impl Router { continue; } - /* - If this also counts as a top-level transfer of a token, drop it. - - This event will only exist if there's an ERC20 which has some form of programmability - (`onTransferFrom`), and when a top-level transfer was made, that hook made its own call - into the Serai router. - - If such an ERC20 exists, Serai would parse it as a top-level transfer and as a router - InInstruction. While no such ERC20 is planned to be integrated, this enures we don't - allow a double-spend on that premise. - - TODO: See below note. - */ - if tx.to == Some(token.into()) { - continue; - } - // Get all logs for this TX let receipt = self.0.get_transaction_receipt(tx_hash).await?.ok_or_else(|| { TransportErrorKind::Custom( @@ -397,9 +375,14 @@ impl Router { let tx_logs = receipt.inner.logs(); /* - TODO: If this is also a top-level transfer, drop the log from the top-level transfer and - only iterate over the rest of the logs. + The transfer which causes an InInstruction event won't be a top-level transfer. + Accordingly, when looking for the matching transfer, disregard the top-level transfer (if + one exists). */ + if let Some(matched) = Erc20::match_top_level_transfer(&self.0, tx_hash, self.1).await? { + // Mark this log index as used so it isn't used again + transfer_check.insert(matched.log_index); + } // Find a matching transfer log let mut found_transfer = false; @@ -409,6 +392,7 @@ impl Router { "log in transaction receipt didn't have its log index set".to_string().into(), ) })?; + // Ensure we didn't already use this transfer to check a distinct InInstruction event if transfer_check.contains(&log_index) { continue; @@ -420,7 +404,7 @@ impl Router { } // Check if this is a transfer log // https://github.com/alloy-rs/core/issues/589 - if tx_log.topics()[0] != Transfer::SIGNATURE_HASH { + if tx_log.topics().first() != Some(&Transfer::SIGNATURE_HASH) { continue; } let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; From 62da845bb70b1fa481ff1ee719ba54f282523525 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 18 Sep 2024 00:54:20 -0400 Subject: [PATCH 149/179] Ethereum SignableTransaction, Eventuality --- processor/ethereum/Cargo.toml | 17 +- processor/ethereum/router/Cargo.toml | 1 - processor/ethereum/router/src/lib.rs | 48 ++- processor/ethereum/src/key_gen.rs | 2 +- processor/ethereum/src/main.rs | 10 +- processor/ethereum/src/primitives/block.rs | 39 ++- processor/ethereum/src/primitives/mod.rs | 6 + processor/ethereum/src/primitives/output.rs | 14 +- .../ethereum/src/primitives/transaction.rs | 297 +++++++++++++++--- processor/ethereum/src/rpc.rs | 10 +- processor/ethereum/src/scheduler.rs | 75 ++--- 11 files changed, 390 insertions(+), 129 deletions(-) diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml index dfed2f9d4..9a3b264cc 100644 --- a/processor/ethereum/Cargo.toml +++ b/processor/ethereum/Cargo.toml @@ -26,10 +26,18 @@ borsh = { version = "1", default-features = false, features = ["std", "derive", ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } -frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } k256 = { version = "^0.13.1", default-features = false, features = ["std"] } -ethereum-serai = { path = "./ethereum-serai", default-features = false, optional = true } + +alloy-core = { version = "0.8", default-features = false } +alloy-rlp = { version = "0.3", default-features = false } +alloy-consensus = { version = "0.3", default-features = false } + +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-simple-request-transport = { path = "../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-rpc-client = { version = "0.3", default-features = false } +alloy-provider = { version = "0.3", default-features = false } serai-client = { path = "../../substrate/client", default-features = false, features = ["ethereum"] } @@ -48,6 +56,11 @@ scanner = { package = "serai-processor-scanner", path = "../scanner" } smart-contract-scheduler = { package = "serai-processor-smart-contract-scheduler", path = "../scheduler/smart-contract" } signers = { package = "serai-processor-signers", path = "../signers" } +ethereum-schnorr = { package = "ethereum-schnorr-contract", path = "../../networks/ethereum/schnorr" } +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "./primitives" } +ethereum-router = { package = "serai-processor-ethereum-router", path = "./router" } +ethereum-erc20 = { package = "serai-processor-ethereum-erc20", path = "./erc20" } + bin = { package = "serai-processor-bin", path = "../bin" } [features] diff --git a/processor/ethereum/router/Cargo.toml b/processor/ethereum/router/Cargo.toml index ed5417c0f..e8884eae6 100644 --- a/processor/ethereum/router/Cargo.toml +++ b/processor/ethereum/router/Cargo.toml @@ -24,7 +24,6 @@ alloy-core = { version = "0.8", default-features = false } alloy-consensus = { version = "0.3", default-features = false } alloy-sol-types = { version = "0.8", default-features = false } -alloy-sol-macro = { version = "0.8", default-features = false } alloy-rpc-types-eth = { version = "0.3", default-features = false } alloy-transport = { version = "0.3", default-features = false } diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs index 18bc3d4b6..344e2beed 100644 --- a/processor/ethereum/router/src/lib.rs +++ b/processor/ethereum/router/src/lib.rs @@ -27,7 +27,7 @@ use ethereum_schnorr::{PublicKey, Signature}; use ethereum_deployer::Deployer; use erc20::{Transfer, Erc20}; -use serai_client::{primitives::Amount, networks::ethereum::Address as SeraiAddress}; +use serai_client::networks::ethereum::Address as SeraiAddress; #[rustfmt::skip] #[expect(warnings)] @@ -159,8 +159,8 @@ impl InInstruction { /// A list of `OutInstruction`s. #[derive(Clone)] pub struct OutInstructions(Vec); -impl From<&[(SeraiAddress, (Coin, Amount))]> for OutInstructions { - fn from(outs: &[(SeraiAddress, (Coin, Amount))]) -> Self { +impl From<&[(SeraiAddress, (Coin, U256))]> for OutInstructions { + fn from(outs: &[(SeraiAddress, (Coin, U256))]) -> Self { Self( outs .iter() @@ -184,7 +184,7 @@ impl From<&[(SeraiAddress, (Coin, Amount))]> for OutInstructions { Coin::Ether => [0; 20].into(), Coin::Erc20(address) => address.into(), }, - value: amount.0.try_into().expect("couldn't convert u64 to u256"), + value: *amount, } }) .collect(), @@ -192,7 +192,7 @@ impl From<&[(SeraiAddress, (Coin, Amount))]> for OutInstructions { } } -/// Executed an command. +/// An action which was executed by the Router. #[derive(Clone, PartialEq, Eq, Debug)] pub enum Executed { /// Set a new key. @@ -218,6 +218,44 @@ impl Executed { Executed::SetKey { nonce, .. } | Executed::Batch { nonce, .. } => *nonce, } } + + /// Write the Executed. + pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + match self { + Self::SetKey { nonce, key } => { + writer.write_all(&[0])?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(key) + } + Self::Batch { nonce, message_hash } => { + writer.write_all(&[1])?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(message_hash) + } + } + } + + /// Read an Executed. + pub fn read(reader: &mut impl io::Read) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + if kind[0] >= 2 { + Err(io::Error::other("unrecognized type of Executed"))?; + } + + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + + let mut payload = [0; 32]; + reader.read_exact(&mut payload)?; + + Ok(match kind[0] { + 0 => Self::SetKey { nonce, key: payload }, + 1 => Self::Batch { nonce, message_hash: payload }, + _ => unreachable!(), + }) + } } /// A view of the Router for Serai. diff --git a/processor/ethereum/src/key_gen.rs b/processor/ethereum/src/key_gen.rs index 73b7c1e16..581684efc 100644 --- a/processor/ethereum/src/key_gen.rs +++ b/processor/ethereum/src/key_gen.rs @@ -1,7 +1,7 @@ use ciphersuite::{Ciphersuite, Secp256k1}; use dkg::ThresholdKeys; -use ethereum_serai::crypto::PublicKey; +use ethereum_schnorr::PublicKey; pub(crate) struct KeyGenParams; impl key_gen::KeyGenParams for KeyGenParams { diff --git a/processor/ethereum/src/main.rs b/processor/ethereum/src/main.rs index e4ec37013..06c0bc98f 100644 --- a/processor/ethereum/src/main.rs +++ b/processor/ethereum/src/main.rs @@ -8,12 +8,10 @@ static ALLOCATOR: zalloc::ZeroizingAlloc = use std::sync::Arc; -use ethereum_serai::alloy::{ - primitives::U256, - simple_request_transport::SimpleRequest, - rpc_client::ClientBuilder, - provider::{Provider, RootProvider}, -}; +use alloy_core::primitives::U256; +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; use serai_env as env; diff --git a/processor/ethereum/src/primitives/block.rs b/processor/ethereum/src/primitives/block.rs index e947e8513..2c0e0505f 100644 --- a/processor/ethereum/src/primitives/block.rs +++ b/processor/ethereum/src/primitives/block.rs @@ -5,6 +5,9 @@ use ciphersuite::{Ciphersuite, Secp256k1}; use serai_client::networks::ethereum::Address; use primitives::{ReceivedOutput, EventualityTracker}; + +use ethereum_router::Executed; + use crate::{output::Output, transaction::Eventuality}; // We interpret 32-block Epochs as singular blocks. @@ -37,9 +40,11 @@ impl primitives::BlockHeader for Epoch { } } -#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct FullEpoch { epoch: Epoch, + outputs: Vec, + executed: Vec, } impl primitives::Block for FullEpoch { @@ -54,7 +59,8 @@ impl primitives::Block for FullEpoch { self.epoch.end_hash } - fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { + fn scan_for_outputs_unordered(&self, _key: Self::Key) -> Vec { + // Only return these outputs for the latest key todo!("TODO") } @@ -66,6 +72,33 @@ impl primitives::Block for FullEpoch { >::TransactionId, Self::Eventuality, > { - todo!("TODO") + let mut res = HashMap::new(); + for executed in &self.executed { + let Some(expected) = + eventualities.active_eventualities.remove(executed.nonce().to_le_bytes().as_slice()) + else { + continue; + }; + assert_eq!( + executed, + &expected.0, + "Router emitted distinct event for nonce {}", + executed.nonce() + ); + /* + The transaction ID is used to determine how internal outputs from this transaction should + be handled (if they were actually internal or if they were just to an internal address). + The Ethereum integration doesn't have internal addresses, and this transaction wasn't made + by Serai. It was simply authorized by Serai yet may or may not be associated with other + actions we don't want to flag as our own. + + Accordingly, we set the transaction ID to the nonce. This is unique barring someone finding + the preimage which hashes to this nonce, and won't cause any other data to be associated. + */ + let mut tx_id = [0; 32]; + tx_id[.. 8].copy_from_slice(executed.nonce().to_le_bytes().as_slice()); + res.insert(tx_id, expected); + } + res } } diff --git a/processor/ethereum/src/primitives/mod.rs b/processor/ethereum/src/primitives/mod.rs index fba52dd96..8d2a9118a 100644 --- a/processor/ethereum/src/primitives/mod.rs +++ b/processor/ethereum/src/primitives/mod.rs @@ -1,3 +1,9 @@ pub(crate) mod output; pub(crate) mod transaction; pub(crate) mod block; + +pub(crate) const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { + Ok(res) => res, + Err(_) => panic!("invalid non-test DAI hex address"), + }; diff --git a/processor/ethereum/src/primitives/output.rs b/processor/ethereum/src/primitives/output.rs index 4dadb1474..843f22f6b 100644 --- a/processor/ethereum/src/primitives/output.rs +++ b/processor/ethereum/src/primitives/output.rs @@ -2,10 +2,7 @@ use std::io; use ciphersuite::{Ciphersuite, Secp256k1}; -use ethereum_serai::{ - alloy::primitives::U256, - router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction}, -}; +use alloy_core::primitives::U256; use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; @@ -16,12 +13,9 @@ use serai_client::{ }; use primitives::{OutputType, ReceivedOutput}; +use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction}; -const DAI: [u8; 20] = - match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { - Ok(res) => res, - Err(_) => panic!("invalid non-test DAI hex address"), - }; +use crate::DAI; fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { match coin { @@ -87,7 +81,7 @@ impl ReceivedOutput<::G, Address> for Output { } fn key(&self) -> ::G { - self.0.key_at_end_of_block + todo!("TODO") } fn presumed_origin(&self) -> Option
{ diff --git a/processor/ethereum/src/primitives/transaction.rs b/processor/ethereum/src/primitives/transaction.rs index 908358ecb..f77153ffa 100644 --- a/processor/ethereum/src/primitives/transaction.rs +++ b/processor/ethereum/src/primitives/transaction.rs @@ -1,101 +1,304 @@ -use std::io; +use std::{io, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; -use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; -use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; +use ciphersuite::{Ciphersuite, Secp256k1}; +use frost::{ + dkg::{Participant, ThresholdKeys}, + FrostError, + algorithm::*, + sign::*, +}; -use ethereum_serai::{crypto::PublicKey, machine::*}; +use alloy_core::primitives::U256; + +use serai_client::networks::ethereum::Address; + +use scheduler::SignableTransaction; + +use ethereum_primitives::keccak256; +use ethereum_schnorr::{PublicKey, Signature}; +use ethereum_router::{Coin, OutInstructions, Executed, Router}; use crate::output::OutputId; -#[derive(Clone, Debug)] -pub(crate) struct Transaction(pub(crate) SignedRouterCommand); +#[derive(Clone, PartialEq, Debug)] +pub(crate) enum Action { + SetKey { chain_id: U256, nonce: u64, key: PublicKey }, + Batch { chain_id: U256, nonce: u64, outs: Vec<(Address, (Coin, U256))> }, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Eventuality(pub(crate) Executed); + +impl Action { + fn nonce(&self) -> u64 { + match self { + Action::SetKey { nonce, .. } | Action::Batch { nonce, .. } => *nonce, + } + } + + fn message(&self) -> Vec { + match self { + Action::SetKey { chain_id, nonce, key } => Router::update_serai_key_message(*chain_id, *nonce, key), + Action::Batch { chain_id, nonce, outs } => Router::execute_message(*chain_id, *nonce, OutInstructions::from(outs.as_ref())), + } + } -impl From for Transaction { - fn from(signed_router_command: SignedRouterCommand) -> Self { - Self(signed_router_command) + pub(crate) fn eventuality(&self) -> Eventuality { + Eventuality(match self { + Self::SetKey { chain_id: _, nonce, key } => { + Executed::SetKey { nonce: *nonce, key: key.eth_repr() } + } + Self::Batch { chain_id, nonce, outs } => Executed::Batch { + nonce: *nonce, + message_hash: keccak256(Router::execute_message( + *chain_id, + *nonce, + OutInstructions::from(outs.as_ref()), + )), + }, + }) } } +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct Transaction(Action, Signature); impl scheduler::Transaction for Transaction { fn read(reader: &mut impl io::Read) -> io::Result { - SignedRouterCommand::read(reader).map(Self) + /* + let buf: Vec = borsh::from_reader(reader)?; + // We can only read this from a &[u8], hence prior reading into a Vec + ::decode(&mut buf.as_slice()) + .map(Self) + .map_err(io::Error::other) + */ + let action = Action::read(reader)?; + let signature = Signature::read(reader)?; + Ok(Transaction(action, signature)) } fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { - self.0.write(writer) + /* + let mut buf = Vec::with_capacity(256); + ::encode(&self.0, &mut buf); + borsh::BorshSerialize::serialize(&buf, writer) + */ + self.0.write(writer)?; + self.1.write(writer)?; + Ok(()) } } -#[derive(Clone, Debug)] -pub(crate) struct SignableTransaction(pub(crate) RouterCommand); +/// The HRAm to use for the Schnorr Solidity library. +/// +/// This will panic if the public key being signed for is not representable within the Schnorr +/// Solidity library. +#[derive(Clone, Default, Debug)] +pub struct EthereumHram; +impl Hram for EthereumHram { + #[allow(non_snake_case)] + fn hram( + R: &::G, + A: &::G, + m: &[u8], + ) -> ::F { + Signature::challenge(*R, &PublicKey::new(*A).unwrap(), m) + } +} #[derive(Clone)] -pub(crate) struct ClonableTransctionMachine(RouterCommand, ThresholdKeys); +pub(crate) struct ClonableTransctionMachine(ThresholdKeys, Action); + +type LiteralAlgorithmMachine = AlgorithmMachine>; +type LiteralAlgorithmSignMachine = + AlgorithmSignMachine>; + +pub(crate) struct ActionSignMachine(PublicKey, Action, LiteralAlgorithmSignMachine); + +type LiteralAlgorithmSignatureMachine = + AlgorithmSignatureMachine>; + +pub(crate) struct ActionSignatureMachine(PublicKey, Action, LiteralAlgorithmSignatureMachine); + impl PreprocessMachine for ClonableTransctionMachine { - type Preprocess = ::Preprocess; - type Signature = ::Signature; - type SignMachine = ::SignMachine; + type Preprocess = ::Preprocess; + type Signature = Transaction; + type SignMachine = ActionSignMachine; fn preprocess( self, rng: &mut R, ) -> (Self::SignMachine, Self::Preprocess) { - // TODO: Use a proper error here, not an Option - RouterCommandMachine::new(self.1.clone(), self.0.clone()).unwrap().preprocess(rng) + let (machine, preprocess) = AlgorithmMachine::new(IetfSchnorr::::ietf(), self.0.clone()) + .preprocess(rng); + (ActionSignMachine(PublicKey::new(self.0.group_key()).expect("signing with non-representable key"), self.1, machine), preprocess) + } +} + +impl SignMachine for ActionSignMachine { + type Params = ::Signature, + >>::Params; + type Keys = ::Signature, + >>::Keys; + type Preprocess = ::Signature, + >>::Preprocess; + type SignatureShare = ::Signature, + >>::SignatureShare; + type SignatureMachine = ActionSignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!() + } + fn from_cache( + params: Self::Params, + keys: Self::Keys, + cache: CachedPreprocess, +) -> (Self, Self::Preprocess) { + unimplemented!() + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.2.read_preprocess(reader) + } + fn sign( + self, + commitments: HashMap, + msg: &[u8], + ) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError> { + assert!(msg.is_empty()); + self + .2 + .sign(commitments, &self.1.message()) + .map(|(machine, shares)| (ActionSignatureMachine(self.0, self.1, machine), shares)) + } +} + +impl SignatureMachine for ActionSignatureMachine { + type SignatureShare = ::Signature, + >>::SignatureShare; + + fn read_share(&self, reader: &mut R) -> io::Result { + self.2.read_share(reader) + } + + fn complete( + self, + shares: HashMap, + ) -> Result { + /* + match self.1 { + Action::SetKey { chain_id: _, nonce: _, key } => self.0.update_serai_key(key, signature), + Action::Batch { chain_id: _, nonce: _, outs } => self.0.execute(outs, signature), + } + */ + self.2.complete(shares).map(|signature| { + let s = signature.s; + let c = Signature::challenge(signature.R, &self.0, &self.1.message()); + Transaction(self.1, Signature::new(c, s)) + }) } } -impl scheduler::SignableTransaction for SignableTransaction { +impl SignableTransaction for Action { type Transaction = Transaction; type Ciphersuite = Secp256k1; type PreprocessMachine = ClonableTransctionMachine; fn read(reader: &mut impl io::Read) -> io::Result { - RouterCommand::read(reader).map(Self) + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + if kind[0] >= 2 { + Err(io::Error::other("unrecognized Action type"))?; + } + + let mut chain_id = [0; 32]; + reader.read_exact(&mut chain_id)?; + let chain_id = U256::from_le_bytes(chain_id); + + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + + Ok(match kind[0] { + 0 => { + let mut key = [0; 32]; + reader.read_exact(&mut key)?; + let key = + PublicKey::from_eth_repr(key).ok_or_else(|| io::Error::other("invalid key in Action"))?; + + Action::SetKey { chain_id, nonce, key } + } + 1 => { + let mut outs_len = [0; 4]; + reader.read_exact(&mut outs_len)?; + let outs_len = usize::try_from(u32::from_le_bytes(outs_len)).unwrap(); + + let mut outs = vec![]; + for _ in 0 .. outs_len { + let address = borsh::from_reader(reader)?; + let coin = Coin::read(reader)?; + + let mut amount = [0; 32]; + reader.read_exact(&mut amount)?; + let amount = U256::from_le_bytes(amount); + + outs.push((address, (coin, amount))); + } + Action::Batch { chain_id, nonce, outs } + } + _ => unreachable!(), + }) } fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { - self.0.write(writer) + match self { + Self::SetKey { chain_id, nonce, key } => { + writer.write_all(&[0])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(&key.eth_repr()) + } + Self::Batch { chain_id, nonce, outs } => { + writer.write_all(&[1])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; + for (address, (coin, amount)) in outs { + borsh::BorshSerialize::serialize(address, writer)?; + coin.write(writer)?; + writer.write_all(&amount.as_le_bytes())?; + } + Ok(()) + } + } } fn id(&self) -> [u8; 32] { let mut res = [0; 32]; - // TODO: Add getter for the nonce - match self.0 { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - res[.. 8].copy_from_slice(&nonce.as_le_bytes()); - } - } + res[.. 8].copy_from_slice(&self.nonce().to_le_bytes()); res } fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { - ClonableTransctionMachine(self.0, keys) + ClonableTransctionMachine(keys, self) } } -#[derive(Clone, PartialEq, Eq, Debug)] -pub(crate) struct Eventuality(pub(crate) PublicKey, pub(crate) RouterCommand); - impl primitives::Eventuality for Eventuality { type OutputId = OutputId; fn id(&self) -> [u8; 32] { let mut res = [0; 32]; - match self.1 { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - res[.. 8].copy_from_slice(&nonce.as_le_bytes()); - } - } + res[.. 8].copy_from_slice(&self.0.nonce().to_le_bytes()); res } fn lookup(&self) -> Vec { - match self.1 { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - nonce.as_le_bytes().to_vec() - } - } + self.0.nonce().to_le_bytes().to_vec() } fn singular_spent_output(&self) -> Option { @@ -103,15 +306,9 @@ impl primitives::Eventuality for Eventuality { } fn read(reader: &mut impl io::Read) -> io::Result { - let point = Secp256k1::read_G(reader)?; - let command = RouterCommand::read(reader)?; - Ok(Eventuality( - PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?, - command, - )) + Executed::read(reader).map(Self) } fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { - writer.write_all(self.0.point().to_bytes().as_slice())?; - self.1.write(writer) + self.0.write(writer) } } diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs index 58b3933e9..819fbf484 100644 --- a/processor/ethereum/src/rpc.rs +++ b/processor/ethereum/src/rpc.rs @@ -1,13 +1,9 @@ use core::future::Future; use std::sync::Arc; -use ethereum_serai::{ - alloy::{ - rpc_types::{BlockTransactionsKind, BlockNumberOrTag}, - simple_request_transport::SimpleRequest, - provider::{Provider, RootProvider}, - }, -}; +use alloy_rpc_types_eth::{BlockTransactionsKind, BlockNumberOrTag}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; use serai_client::primitives::{NetworkId, Coin, Amount}; diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs index 6e17ef70e..ca636b5bb 100644 --- a/processor/ethereum/src/scheduler.rs +++ b/processor/ethereum/src/scheduler.rs @@ -1,14 +1,23 @@ -use serai_client::primitives::{NetworkId, Balance}; +use alloy_core::primitives::U256; -use ethereum_serai::{alloy::primitives::U256, router::PublicKey, machine::*}; +use serai_client::primitives::{NetworkId, Coin, Balance}; use primitives::Payment; use scanner::{KeyFor, AddressFor, EventualityFor}; -use crate::{ - transaction::{SignableTransaction, Eventuality}, - rpc::Rpc, -}; +use ethereum_schnorr::PublicKey; +use ethereum_router::Coin as EthereumCoin; + +use crate::{DAI, transaction::Action, rpc::Rpc}; + +fn coin_to_ethereum_coin(coin: Coin) -> EthereumCoin { + assert_eq!(coin.network(), NetworkId::Ethereum); + match coin { + Coin::Ether => EthereumCoin::Ether, + Coin::Dai => EthereumCoin::Erc20(DAI), + _ => unreachable!(), + } +} fn balance_to_ethereum_amount(balance: Balance) -> U256 { assert_eq!(balance.coin.network(), NetworkId::Ethereum); @@ -24,7 +33,7 @@ pub(crate) struct SmartContract { pub(crate) chain_id: U256, } impl smart_contract_scheduler::SmartContract for SmartContract { - type SignableTransaction = SignableTransaction; + type SignableTransaction = Action; fn rotate( &self, @@ -32,16 +41,14 @@ impl smart_contract_scheduler::SmartContract for SmartContract { retiring_key: KeyFor, new_key: KeyFor, ) -> (Self::SignableTransaction, EventualityFor) { - let command = RouterCommand::UpdateSeraiKey { + let action = Action::SetKey { chain_id: self.chain_id, - nonce: U256::try_from(nonce).unwrap(), + nonce, key: PublicKey::new(new_key).expect("rotating to an invald key"), }; - ( - SignableTransaction(command.clone()), - Eventuality(PublicKey::new(retiring_key).expect("retiring an invalid key"), command), - ) + (action.clone(), action.eventuality()) } + fn fulfill( &self, nonce: u64, @@ -50,40 +57,20 @@ impl smart_contract_scheduler::SmartContract for SmartContract { ) -> Vec<(Self::SignableTransaction, EventualityFor)> { let mut outs = Vec::with_capacity(payments.len()); for payment in payments { - outs.push(OutInstruction { - target: if let Some(data) = payment.data() { - // This introspects the Call serialization format, expecting the first 20 bytes to - // be the address - // This avoids wasting the 20-bytes allocated within address - let full_data = [<[u8; 20]>::from(*payment.address()).as_slice(), data].concat(); - let mut reader = full_data.as_slice(); - - let mut calls = vec![]; - while !reader.is_empty() { - let Ok(call) = Call::read(&mut reader) else { break }; - calls.push(call); - } - // The above must have executed at least once since reader contains the address - assert_eq!(calls[0].to, <[u8; 20]>::from(*payment.address())); - - OutInstructionTarget::Calls(calls) - } else { - OutInstructionTarget::Direct((*payment.address()).into()) - }, - value: { balance_to_ethereum_amount(payment.balance()) }, - }); + outs.push(( + payment.address().clone(), + ( + coin_to_ethereum_coin(payment.balance().coin), + balance_to_ethereum_amount(payment.balance()), + ), + )); } - let command = RouterCommand::Execute { - chain_id: self.chain_id, - nonce: U256::try_from(nonce).unwrap(), - outs, - }; + // TODO: Per-batch gas limit + // TODO: Create several batches + let action = Action::Batch { chain_id: self.chain_id, nonce, outs }; - vec![( - SignableTransaction(command.clone()), - Eventuality(PublicKey::new(key).expect("fulfilling payments with an invalid key"), command), - )] + vec![(action.clone(), action.eventuality())] } } From ffd291b307c65261f184d01936dc432122e10ae0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 18 Sep 2024 00:57:10 -0400 Subject: [PATCH 150/179] Remove ethereum-serai/serai-processor-ethereum-contracts contracts was smashed out of ethereum-serai. Both have now been smashed into individual crates. Creates a TODO directory with left-over test code yet to be moved. --- .github/workflows/tests.yml | 2 - Cargo.lock | 93 +- Cargo.toml | 2 - deny.toml | 2 - .../contracts/tests/ERC20.sol | 0 .../{src/lib.rs => TODO/old_processor.rs} | 0 .../src => TODO}/tests/crypto.rs | 0 .../{ethereum-serai/src => TODO}/tests/mod.rs | 0 .../src => TODO}/tests/router.rs | 0 processor/ethereum/contracts/Cargo.toml | 32 - processor/ethereum/contracts/LICENSE | 15 - processor/ethereum/contracts/README.md | 7 - processor/ethereum/contracts/build.rs | 69 - .../ethereum/contracts/src/abigen/deployer.rs | 584 ---- .../ethereum/contracts/src/abigen/erc20.rs | 1838 ---------- .../ethereum/contracts/src/abigen/mod.rs | 3 - .../ethereum/contracts/src/abigen/router.rs | 2958 ----------------- processor/ethereum/contracts/src/lib.rs | 16 - processor/ethereum/ethereum-serai/Cargo.toml | 52 - processor/ethereum/ethereum-serai/LICENSE | 15 - processor/ethereum/ethereum-serai/README.md | 15 - .../ethereum/ethereum-serai/src/crypto.rs | 32 - processor/ethereum/ethereum-serai/src/lib.rs | 41 - .../ethereum/ethereum-serai/src/machine.rs | 427 --- .../ethereum/src/primitives/transaction.rs | 24 +- tests/processor/Cargo.toml | 1 - 26 files changed, 35 insertions(+), 6193 deletions(-) rename processor/ethereum/{contracts => TODO}/contracts/tests/ERC20.sol (100%) rename processor/ethereum/{src/lib.rs => TODO/old_processor.rs} (100%) rename processor/ethereum/{ethereum-serai/src => TODO}/tests/crypto.rs (100%) rename processor/ethereum/{ethereum-serai/src => TODO}/tests/mod.rs (100%) rename processor/ethereum/{ethereum-serai/src => TODO}/tests/router.rs (100%) delete mode 100644 processor/ethereum/contracts/Cargo.toml delete mode 100644 processor/ethereum/contracts/LICENSE delete mode 100644 processor/ethereum/contracts/README.md delete mode 100644 processor/ethereum/contracts/build.rs delete mode 100644 processor/ethereum/contracts/src/abigen/deployer.rs delete mode 100644 processor/ethereum/contracts/src/abigen/erc20.rs delete mode 100644 processor/ethereum/contracts/src/abigen/mod.rs delete mode 100644 processor/ethereum/contracts/src/abigen/router.rs delete mode 100644 processor/ethereum/contracts/src/lib.rs delete mode 100644 processor/ethereum/ethereum-serai/Cargo.toml delete mode 100644 processor/ethereum/ethereum-serai/LICENSE delete mode 100644 processor/ethereum/ethereum-serai/README.md delete mode 100644 processor/ethereum/ethereum-serai/src/crypto.rs delete mode 100644 processor/ethereum/ethereum-serai/src/lib.rs delete mode 100644 processor/ethereum/ethereum-serai/src/machine.rs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e374d4f13..d207e9cdf 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -52,12 +52,10 @@ jobs: -p serai-processor-signers \ -p serai-processor-bin \ -p serai-bitcoin-processor \ - -p serai-processor-ethereum-contracts \ -p serai-processor-ethereum-primitives \ -p serai-processor-ethereum-deployer \ -p serai-processor-ethereum-router \ -p serai-processor-ethereum-erc20 \ - -p ethereum-serai \ -p serai-ethereum-processor \ -p serai-monero-processor \ -p tendermint-machine \ diff --git a/Cargo.lock b/Cargo.lock index f928e57e4..a7f3792aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -184,17 +184,6 @@ dependencies = [ "serde", ] -[[package]] -name = "alloy-json-abi" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "299d2a937b6c60968df3dad2a988b0f0e03277b344639a4f7a31bd68e6285e59" -dependencies = [ - "alloy-primitives", - "alloy-sol-type-parser", - "serde", -] - [[package]] name = "alloy-json-rpc" version = "0.3.1" @@ -426,7 +415,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71c4d842beb7a6686d04125603bc57614d5ed78bf95e4753274db3db4ba95214" dependencies = [ - "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.5.0", @@ -445,33 +433,21 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1306e8d3c9e6e6ecf7a39ffaf7291e73a5f655a2defd366ee92c2efebcdf7fee" dependencies = [ - "alloy-json-abi", "const-hex", "dunce", "heck 0.5.0", "proc-macro2", "quote", - "serde_json", "syn 2.0.77", "syn-solidity", ] -[[package]] -name = "alloy-sol-type-parser" -version = "0.8.0" -source = "git+https://github.com/alloy-rs/core?rev=446b9d2fbce12b88456152170709a3eaac929af0#446b9d2fbce12b88456152170709a3eaac929af0" -dependencies = [ - "serde", - "winnow 0.6.18", -] - [[package]] name = "alloy-sol-types" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "577e262966e92112edbd15b1b2c0947cc434d6e8311df96d3329793fe8047da9" dependencies = [ - "alloy-json-abi", "alloy-primitives", "alloy-sol-macro", "const-hex", @@ -2503,30 +2479,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "ethereum-serai" -version = "0.1.0" -dependencies = [ - "alloy-consensus", - "alloy-core", - "alloy-network", - "alloy-node-bindings", - "alloy-provider", - "alloy-rpc-client", - "alloy-rpc-types-eth", - "alloy-simple-request-transport", - "alloy-sol-types", - "ethereum-schnorr-contract", - "flexible-transcript", - "group", - "k256", - "modular-frost", - "rand_core", - "serai-processor-ethereum-contracts", - "thiserror", - "tokio", -] - [[package]] name = "event-listener" version = "2.5.3" @@ -6127,16 +6079,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "prettyplease" -version = "0.2.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" -dependencies = [ - "proc-macro2", - "syn 2.0.77", -] - [[package]] name = "primeorder" version = "0.13.6" @@ -6302,7 +6244,7 @@ dependencies = [ "log", "multimap", "petgraph", - "prettyplease 0.1.25", + "prettyplease", "prost", "prost-types", "regex", @@ -8385,11 +8327,18 @@ version = "0.1.0" name = "serai-ethereum-processor" version = "0.1.0" dependencies = [ + "alloy-consensus", + "alloy-core", + "alloy-provider", + "alloy-rlp", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", "borsh", "ciphersuite", "const-hex", "dkg", - "ethereum-serai", + "ethereum-schnorr-contract", "hex", "k256", "log", @@ -8400,6 +8349,9 @@ dependencies = [ "serai-db", "serai-env", "serai-processor-bin", + "serai-processor-ethereum-erc20", + "serai-processor-ethereum-primitives", + "serai-processor-ethereum-router", "serai-processor-key-gen", "serai-processor-primitives", "serai-processor-scanner", @@ -8707,20 +8659,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "serai-processor-ethereum-contracts" -version = "0.1.0" -dependencies = [ - "alloy-sol-macro-expander", - "alloy-sol-macro-input", - "alloy-sol-types", - "build-solidity-contracts", - "prettyplease 0.2.22", - "serde_json", - "syn 2.0.77", - "syn-solidity", -] - [[package]] name = "serai-processor-ethereum-deployer" version = "0.1.0" @@ -8770,7 +8708,6 @@ dependencies = [ "alloy-provider", "alloy-rpc-types-eth", "alloy-simple-request-transport", - "alloy-sol-macro", "alloy-sol-macro-expander", "alloy-sol-macro-input", "alloy-sol-types", @@ -8924,7 +8861,6 @@ dependencies = [ "curve25519-dalek", "dkg", "dockertest", - "ethereum-serai", "hex", "k256", "monero-simple-request-rpc", @@ -11954,3 +11890,8 @@ dependencies = [ "cc", "pkg-config", ] + +[[patch.unused]] +name = "alloy-sol-type-parser" +version = "0.8.0" +source = "git+https://github.com/alloy-rs/core?rev=446b9d2fbce12b88456152170709a3eaac929af0#446b9d2fbce12b88456152170709a3eaac929af0" diff --git a/Cargo.toml b/Cargo.toml index 3c203cedb..99a10be04 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,12 +87,10 @@ members = [ "processor/bin", "processor/bitcoin", - "processor/ethereum/contracts", "processor/ethereum/primitives", "processor/ethereum/deployer", "processor/ethereum/router", "processor/ethereum/erc20", - "processor/ethereum/ethereum-serai", "processor/ethereum", "processor/monero", diff --git a/deny.toml b/deny.toml index 9ee16043a..d09fc8ebb 100644 --- a/deny.toml +++ b/deny.toml @@ -59,12 +59,10 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-signers" }, { allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" }, - { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-contracts" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-primitives" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-deployer" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-router" }, { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-erc20" }, - { allow = ["AGPL-3.0"], name = "ethereum-serai" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, { allow = ["AGPL-3.0"], name = "serai-monero-processor" }, diff --git a/processor/ethereum/contracts/contracts/tests/ERC20.sol b/processor/ethereum/TODO/contracts/tests/ERC20.sol similarity index 100% rename from processor/ethereum/contracts/contracts/tests/ERC20.sol rename to processor/ethereum/TODO/contracts/tests/ERC20.sol diff --git a/processor/ethereum/src/lib.rs b/processor/ethereum/TODO/old_processor.rs similarity index 100% rename from processor/ethereum/src/lib.rs rename to processor/ethereum/TODO/old_processor.rs diff --git a/processor/ethereum/ethereum-serai/src/tests/crypto.rs b/processor/ethereum/TODO/tests/crypto.rs similarity index 100% rename from processor/ethereum/ethereum-serai/src/tests/crypto.rs rename to processor/ethereum/TODO/tests/crypto.rs diff --git a/processor/ethereum/ethereum-serai/src/tests/mod.rs b/processor/ethereum/TODO/tests/mod.rs similarity index 100% rename from processor/ethereum/ethereum-serai/src/tests/mod.rs rename to processor/ethereum/TODO/tests/mod.rs diff --git a/processor/ethereum/ethereum-serai/src/tests/router.rs b/processor/ethereum/TODO/tests/router.rs similarity index 100% rename from processor/ethereum/ethereum-serai/src/tests/router.rs rename to processor/ethereum/TODO/tests/router.rs diff --git a/processor/ethereum/contracts/Cargo.toml b/processor/ethereum/contracts/Cargo.toml deleted file mode 100644 index 5ed540b66..000000000 --- a/processor/ethereum/contracts/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "serai-processor-ethereum-contracts" -version = "0.1.0" -description = "Ethereum contracts for the Serai processor" -license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/contracts" -authors = ["Luke Parker ", "Elizabeth Binks "] -edition = "2021" -publish = false -rust-version = "1.79" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true - -[dependencies] -alloy-sol-types = { version = "0.8", default-features = false, features = ["json"] } - -[build-dependencies] -build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts" } - -syn = { version = "2", default-features = false, features = ["proc-macro"] } - -serde_json = { version = "1", default-features = false, features = ["std"] } - -syn-solidity = { version = "0.8", default-features = false } -alloy-sol-macro-input = { version = "0.8", default-features = false } -alloy-sol-macro-expander = { version = "0.8", default-features = false } -prettyplease = { version = "0.2", default-features = false } diff --git a/processor/ethereum/contracts/LICENSE b/processor/ethereum/contracts/LICENSE deleted file mode 100644 index 41d5a2616..000000000 --- a/processor/ethereum/contracts/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -AGPL-3.0-only license - -Copyright (c) 2022-2024 Luke Parker - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License Version 3 as -published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/processor/ethereum/contracts/README.md b/processor/ethereum/contracts/README.md deleted file mode 100644 index fcd8f3c75..000000000 --- a/processor/ethereum/contracts/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Serai Processor Ethereum Contracts - -The Ethereum contracts used for (and for testing) the Serai processor. This is -its own crate for organizational and build-time reasons. It is not intended to -be publicly used. - -This crate will fail to build if `solc` is not installed and available. diff --git a/processor/ethereum/contracts/build.rs b/processor/ethereum/contracts/build.rs deleted file mode 100644 index 23d1e9072..000000000 --- a/processor/ethereum/contracts/build.rs +++ /dev/null @@ -1,69 +0,0 @@ -use std::{env, fs}; - -use alloy_sol_macro_input::{SolInputKind, SolInput}; - -fn write(sol: syn_solidity::File, file: &str) { - let sol = alloy_sol_macro_expander::expand::expand(sol).unwrap(); - fs::write( - file, - // TODO: Replace `prettyplease::unparse` with `to_string` - prettyplease::unparse(&syn::File { - attrs: vec![], - items: vec![syn::parse2(sol).unwrap()], - shebang: None, - }) - .as_bytes(), - ) - .unwrap(); -} - -fn sol(sol: &str, file: &str) { - let alloy_sol_macro_input::SolInputKind::Sol(sol) = - syn::parse_str(&std::fs::read_to_string(sol).unwrap()).unwrap() - else { - panic!("parsed .sol file wasn't SolInputKind::Sol"); - }; - write(sol, file); -} - -fn abi(ident: &str, abi: &str, file: &str) { - let SolInputKind::Sol(sol) = (SolInput { - attrs: vec![], - path: None, - kind: SolInputKind::Json( - syn::parse_str(ident).unwrap(), - serde_json::from_str(&fs::read_to_string(abi).unwrap()).unwrap(), - ), - }) - .normalize_json() - .unwrap() - .kind - else { - panic!("normalized JSON wasn't SolInputKind::Sol"); - }; - write(sol, file); -} - -fn main() { - let artifacts_path = - env::var("OUT_DIR").unwrap().to_string() + "/serai-processor-ethereum-contracts"; - build_solidity_contracts::build( - &["../../../networks/ethereum/schnorr/contracts"], - "contracts", - &artifacts_path, - ) - .unwrap(); - - // TODO: Use OUT_DIR for the generated code - if !fs::exists("src/abigen").unwrap() { - fs::create_dir("src/abigen").unwrap(); - } - - // These can be handled with the sol! macro - sol("contracts/IERC20.sol", "src/abigen/erc20.rs"); - sol("contracts/Deployer.sol", "src/abigen/deployer.rs"); - // This cannot be handled with the sol! macro. The Solidity requires an import, the ABI is built - // to OUT_DIR and the macro doesn't support non-static paths: - // https://github.com/alloy-rs/core/issues/738 - abi("Router", &(artifacts_path.clone() + "/Router.abi"), "src/abigen/router.rs"); -} diff --git a/processor/ethereum/contracts/src/abigen/deployer.rs b/processor/ethereum/contracts/src/abigen/deployer.rs deleted file mode 100644 index f4bcb3a66..000000000 --- a/processor/ethereum/contracts/src/abigen/deployer.rs +++ /dev/null @@ -1,584 +0,0 @@ -///Module containing a contract's types and functions. -/** - -```solidity -contract Deployer { - event Deployment(bytes32 indexed init_code_hash, address created); - error DeploymentFailed(); - function deploy(bytes memory init_code) external { } -} -```*/ -#[allow(non_camel_case_types, non_snake_case, clippy::style)] -pub mod Deployer { - use super::*; - use ::alloy_sol_types as alloy_sol_types; - /**Event with signature `Deployment(bytes32,address)` and selector `0x60b877a3bae7bf0f0bd5e1c40ebf44ea158201397f6b72d7c05360157b1ec0fc`. -```solidity -event Deployment(bytes32 indexed init_code_hash, address created); -```*/ - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - #[derive(Clone)] - pub struct Deployment { - #[allow(missing_docs)] - pub init_code_hash: ::alloy_sol_types::private::FixedBytes<32>, - #[allow(missing_docs)] - pub created: ::alloy_sol_types::private::Address, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[automatically_derived] - impl alloy_sol_types::SolEvent for Deployment { - type DataTuple<'a> = (::alloy_sol_types::sol_data::Address,); - type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - type TopicList = ( - alloy_sol_types::sol_data::FixedBytes<32>, - ::alloy_sol_types::sol_data::FixedBytes<32>, - ); - const SIGNATURE: &'static str = "Deployment(bytes32,address)"; - const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ - 96u8, - 184u8, - 119u8, - 163u8, - 186u8, - 231u8, - 191u8, - 15u8, - 11u8, - 213u8, - 225u8, - 196u8, - 14u8, - 191u8, - 68u8, - 234u8, - 21u8, - 130u8, - 1u8, - 57u8, - 127u8, - 107u8, - 114u8, - 215u8, - 192u8, - 83u8, - 96u8, - 21u8, - 123u8, - 30u8, - 192u8, - 252u8, - ]); - const ANONYMOUS: bool = false; - #[allow(unused_variables)] - #[inline] - fn new( - topics: ::RustType, - data: as alloy_sol_types::SolType>::RustType, - ) -> Self { - Self { - init_code_hash: topics.1, - created: data.0, - } - } - #[inline] - fn tokenize_body(&self) -> Self::DataToken<'_> { - ( - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.created, - ), - ) - } - #[inline] - fn topics(&self) -> ::RustType { - (Self::SIGNATURE_HASH.into(), self.init_code_hash.clone()) - } - #[inline] - fn encode_topics_raw( - &self, - out: &mut [alloy_sol_types::abi::token::WordToken], - ) -> alloy_sol_types::Result<()> { - if out.len() < ::COUNT { - return Err(alloy_sol_types::Error::Overrun); - } - out[0usize] = alloy_sol_types::abi::token::WordToken( - Self::SIGNATURE_HASH, - ); - out[1usize] = <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::EventTopic>::encode_topic(&self.init_code_hash); - Ok(()) - } - } - #[automatically_derived] - impl alloy_sol_types::private::IntoLogData for Deployment { - fn to_log_data(&self) -> alloy_sol_types::private::LogData { - From::from(self) - } - fn into_log_data(self) -> alloy_sol_types::private::LogData { - From::from(&self) - } - } - #[automatically_derived] - impl From<&Deployment> for alloy_sol_types::private::LogData { - #[inline] - fn from(this: &Deployment) -> alloy_sol_types::private::LogData { - alloy_sol_types::SolEvent::encode_log_data(this) - } - } - }; - /**Custom error with signature `DeploymentFailed()` and selector `0x30116425`. -```solidity -error DeploymentFailed(); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct DeploymentFailed {} - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: DeploymentFailed) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for DeploymentFailed { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - #[automatically_derived] - impl alloy_sol_types::SolError for DeploymentFailed { - type Parameters<'a> = UnderlyingSolTuple<'a>; - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "DeploymentFailed()"; - const SELECTOR: [u8; 4] = [48u8, 17u8, 100u8, 37u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - } - }; - /**Function with signature `deploy(bytes)` and selector `0x00774360`. -```solidity -function deploy(bytes memory init_code) external { } -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct deployCall { - pub init_code: ::alloy_sol_types::private::Bytes, - } - ///Container type for the return parameters of the [`deploy(bytes)`](deployCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct deployReturn {} - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: deployCall) -> Self { - (value.init_code,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for deployCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { init_code: tuple.0 } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: deployReturn) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for deployReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for deployCall { - type Parameters<'a> = (::alloy_sol_types::sol_data::Bytes,); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = deployReturn; - type ReturnTuple<'a> = (); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "deploy(bytes)"; - const SELECTOR: [u8; 4] = [0u8, 119u8, 67u8, 96u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( - &self.init_code, - ), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - ///Container for all the [`Deployer`](self) function calls. - pub enum DeployerCalls { - deploy(deployCall), - } - #[automatically_derived] - impl DeployerCalls { - /// All the selectors of this enum. - /// - /// Note that the selectors might not be in the same order as the variants. - /// No guarantees are made about the order of the selectors. - /// - /// Prefer using `SolInterface` methods instead. - pub const SELECTORS: &'static [[u8; 4usize]] = &[[0u8, 119u8, 67u8, 96u8]]; - } - #[automatically_derived] - impl alloy_sol_types::SolInterface for DeployerCalls { - const NAME: &'static str = "DeployerCalls"; - const MIN_DATA_LENGTH: usize = 64usize; - const COUNT: usize = 1usize; - #[inline] - fn selector(&self) -> [u8; 4] { - match self { - Self::deploy(_) => ::SELECTOR, - } - } - #[inline] - fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { - Self::SELECTORS.get(i).copied() - } - #[inline] - fn valid_selector(selector: [u8; 4]) -> bool { - Self::SELECTORS.binary_search(&selector).is_ok() - } - #[inline] - #[allow(unsafe_code, non_snake_case)] - fn abi_decode_raw( - selector: [u8; 4], - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - static DECODE_SHIMS: &[fn( - &[u8], - bool, - ) -> alloy_sol_types::Result] = &[ - { - fn deploy( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(DeployerCalls::deploy) - } - deploy - }, - ]; - let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { - return Err( - alloy_sol_types::Error::unknown_selector( - ::NAME, - selector, - ), - ); - }; - (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) - } - #[inline] - fn abi_encoded_size(&self) -> usize { - match self { - Self::deploy(inner) => { - ::abi_encoded_size(inner) - } - } - } - #[inline] - fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { - match self { - Self::deploy(inner) => { - ::abi_encode_raw(inner, out) - } - } - } - } - ///Container for all the [`Deployer`](self) custom errors. - pub enum DeployerErrors { - DeploymentFailed(DeploymentFailed), - } - #[automatically_derived] - impl DeployerErrors { - /// All the selectors of this enum. - /// - /// Note that the selectors might not be in the same order as the variants. - /// No guarantees are made about the order of the selectors. - /// - /// Prefer using `SolInterface` methods instead. - pub const SELECTORS: &'static [[u8; 4usize]] = &[[48u8, 17u8, 100u8, 37u8]]; - } - #[automatically_derived] - impl alloy_sol_types::SolInterface for DeployerErrors { - const NAME: &'static str = "DeployerErrors"; - const MIN_DATA_LENGTH: usize = 0usize; - const COUNT: usize = 1usize; - #[inline] - fn selector(&self) -> [u8; 4] { - match self { - Self::DeploymentFailed(_) => { - ::SELECTOR - } - } - } - #[inline] - fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { - Self::SELECTORS.get(i).copied() - } - #[inline] - fn valid_selector(selector: [u8; 4]) -> bool { - Self::SELECTORS.binary_search(&selector).is_ok() - } - #[inline] - #[allow(unsafe_code, non_snake_case)] - fn abi_decode_raw( - selector: [u8; 4], - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - static DECODE_SHIMS: &[fn( - &[u8], - bool, - ) -> alloy_sol_types::Result] = &[ - { - fn DeploymentFailed( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(DeployerErrors::DeploymentFailed) - } - DeploymentFailed - }, - ]; - let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { - return Err( - alloy_sol_types::Error::unknown_selector( - ::NAME, - selector, - ), - ); - }; - (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) - } - #[inline] - fn abi_encoded_size(&self) -> usize { - match self { - Self::DeploymentFailed(inner) => { - ::abi_encoded_size( - inner, - ) - } - } - } - #[inline] - fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { - match self { - Self::DeploymentFailed(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - } - } - } - ///Container for all the [`Deployer`](self) events. - pub enum DeployerEvents { - Deployment(Deployment), - } - #[automatically_derived] - impl DeployerEvents { - /// All the selectors of this enum. - /// - /// Note that the selectors might not be in the same order as the variants. - /// No guarantees are made about the order of the selectors. - /// - /// Prefer using `SolInterface` methods instead. - pub const SELECTORS: &'static [[u8; 32usize]] = &[ - [ - 96u8, - 184u8, - 119u8, - 163u8, - 186u8, - 231u8, - 191u8, - 15u8, - 11u8, - 213u8, - 225u8, - 196u8, - 14u8, - 191u8, - 68u8, - 234u8, - 21u8, - 130u8, - 1u8, - 57u8, - 127u8, - 107u8, - 114u8, - 215u8, - 192u8, - 83u8, - 96u8, - 21u8, - 123u8, - 30u8, - 192u8, - 252u8, - ], - ]; - } - #[automatically_derived] - impl alloy_sol_types::SolEventInterface for DeployerEvents { - const NAME: &'static str = "DeployerEvents"; - const COUNT: usize = 1usize; - fn decode_raw_log( - topics: &[alloy_sol_types::Word], - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - match topics.first().copied() { - Some(::SIGNATURE_HASH) => { - ::decode_raw_log( - topics, - data, - validate, - ) - .map(Self::Deployment) - } - _ => { - alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { - name: ::NAME, - log: alloy_sol_types::private::Box::new( - alloy_sol_types::private::LogData::new_unchecked( - topics.to_vec(), - data.to_vec().into(), - ), - ), - }) - } - } - } - } - #[automatically_derived] - impl alloy_sol_types::private::IntoLogData for DeployerEvents { - fn to_log_data(&self) -> alloy_sol_types::private::LogData { - match self { - Self::Deployment(inner) => { - alloy_sol_types::private::IntoLogData::to_log_data(inner) - } - } - } - fn into_log_data(self) -> alloy_sol_types::private::LogData { - match self { - Self::Deployment(inner) => { - alloy_sol_types::private::IntoLogData::into_log_data(inner) - } - } - } - } -} diff --git a/processor/ethereum/contracts/src/abigen/erc20.rs b/processor/ethereum/contracts/src/abigen/erc20.rs deleted file mode 100644 index d9c0dd6e6..000000000 --- a/processor/ethereum/contracts/src/abigen/erc20.rs +++ /dev/null @@ -1,1838 +0,0 @@ -///Module containing a contract's types and functions. -/** - -```solidity -interface IERC20 { - event Transfer(address indexed from, address indexed to, uint256 value); - event Approval(address indexed owner, address indexed spender, uint256 value); - function name() external view returns (string memory); - function symbol() external view returns (string memory); - function decimals() external view returns (uint8); - function totalSupply() external view returns (uint256); - function balanceOf(address owner) external view returns (uint256); - function transfer(address to, uint256 value) external returns (bool); - function transferFrom(address from, address to, uint256 value) external returns (bool); - function approve(address spender, uint256 value) external returns (bool); - function allowance(address owner, address spender) external view returns (uint256); -} -```*/ -#[allow(non_camel_case_types, non_snake_case, clippy::style)] -pub mod IERC20 { - use super::*; - use ::alloy_sol_types as alloy_sol_types; - /**Event with signature `Transfer(address,address,uint256)` and selector `0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef`. -```solidity -event Transfer(address indexed from, address indexed to, uint256 value); -```*/ - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - #[derive(Clone)] - pub struct Transfer { - #[allow(missing_docs)] - pub from: ::alloy_sol_types::private::Address, - #[allow(missing_docs)] - pub to: ::alloy_sol_types::private::Address, - #[allow(missing_docs)] - pub value: ::alloy_sol_types::private::primitives::aliases::U256, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[automatically_derived] - impl alloy_sol_types::SolEvent for Transfer { - type DataTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - type TopicList = ( - alloy_sol_types::sol_data::FixedBytes<32>, - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Address, - ); - const SIGNATURE: &'static str = "Transfer(address,address,uint256)"; - const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ - 221u8, - 242u8, - 82u8, - 173u8, - 27u8, - 226u8, - 200u8, - 155u8, - 105u8, - 194u8, - 176u8, - 104u8, - 252u8, - 55u8, - 141u8, - 170u8, - 149u8, - 43u8, - 167u8, - 241u8, - 99u8, - 196u8, - 161u8, - 22u8, - 40u8, - 245u8, - 90u8, - 77u8, - 245u8, - 35u8, - 179u8, - 239u8, - ]); - const ANONYMOUS: bool = false; - #[allow(unused_variables)] - #[inline] - fn new( - topics: ::RustType, - data: as alloy_sol_types::SolType>::RustType, - ) -> Self { - Self { - from: topics.1, - to: topics.2, - value: data.0, - } - } - #[inline] - fn tokenize_body(&self) -> Self::DataToken<'_> { - ( - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::SolType>::tokenize(&self.value), - ) - } - #[inline] - fn topics(&self) -> ::RustType { - (Self::SIGNATURE_HASH.into(), self.from.clone(), self.to.clone()) - } - #[inline] - fn encode_topics_raw( - &self, - out: &mut [alloy_sol_types::abi::token::WordToken], - ) -> alloy_sol_types::Result<()> { - if out.len() < ::COUNT { - return Err(alloy_sol_types::Error::Overrun); - } - out[0usize] = alloy_sol_types::abi::token::WordToken( - Self::SIGNATURE_HASH, - ); - out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( - &self.from, - ); - out[2usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( - &self.to, - ); - Ok(()) - } - } - #[automatically_derived] - impl alloy_sol_types::private::IntoLogData for Transfer { - fn to_log_data(&self) -> alloy_sol_types::private::LogData { - From::from(self) - } - fn into_log_data(self) -> alloy_sol_types::private::LogData { - From::from(&self) - } - } - #[automatically_derived] - impl From<&Transfer> for alloy_sol_types::private::LogData { - #[inline] - fn from(this: &Transfer) -> alloy_sol_types::private::LogData { - alloy_sol_types::SolEvent::encode_log_data(this) - } - } - }; - /**Event with signature `Approval(address,address,uint256)` and selector `0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925`. -```solidity -event Approval(address indexed owner, address indexed spender, uint256 value); -```*/ - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - #[derive(Clone)] - pub struct Approval { - #[allow(missing_docs)] - pub owner: ::alloy_sol_types::private::Address, - #[allow(missing_docs)] - pub spender: ::alloy_sol_types::private::Address, - #[allow(missing_docs)] - pub value: ::alloy_sol_types::private::primitives::aliases::U256, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[automatically_derived] - impl alloy_sol_types::SolEvent for Approval { - type DataTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - type TopicList = ( - alloy_sol_types::sol_data::FixedBytes<32>, - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Address, - ); - const SIGNATURE: &'static str = "Approval(address,address,uint256)"; - const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ - 140u8, - 91u8, - 225u8, - 229u8, - 235u8, - 236u8, - 125u8, - 91u8, - 209u8, - 79u8, - 113u8, - 66u8, - 125u8, - 30u8, - 132u8, - 243u8, - 221u8, - 3u8, - 20u8, - 192u8, - 247u8, - 178u8, - 41u8, - 30u8, - 91u8, - 32u8, - 10u8, - 200u8, - 199u8, - 195u8, - 185u8, - 37u8, - ]); - const ANONYMOUS: bool = false; - #[allow(unused_variables)] - #[inline] - fn new( - topics: ::RustType, - data: as alloy_sol_types::SolType>::RustType, - ) -> Self { - Self { - owner: topics.1, - spender: topics.2, - value: data.0, - } - } - #[inline] - fn tokenize_body(&self) -> Self::DataToken<'_> { - ( - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::SolType>::tokenize(&self.value), - ) - } - #[inline] - fn topics(&self) -> ::RustType { - (Self::SIGNATURE_HASH.into(), self.owner.clone(), self.spender.clone()) - } - #[inline] - fn encode_topics_raw( - &self, - out: &mut [alloy_sol_types::abi::token::WordToken], - ) -> alloy_sol_types::Result<()> { - if out.len() < ::COUNT { - return Err(alloy_sol_types::Error::Overrun); - } - out[0usize] = alloy_sol_types::abi::token::WordToken( - Self::SIGNATURE_HASH, - ); - out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( - &self.owner, - ); - out[2usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( - &self.spender, - ); - Ok(()) - } - } - #[automatically_derived] - impl alloy_sol_types::private::IntoLogData for Approval { - fn to_log_data(&self) -> alloy_sol_types::private::LogData { - From::from(self) - } - fn into_log_data(self) -> alloy_sol_types::private::LogData { - From::from(&self) - } - } - #[automatically_derived] - impl From<&Approval> for alloy_sol_types::private::LogData { - #[inline] - fn from(this: &Approval) -> alloy_sol_types::private::LogData { - alloy_sol_types::SolEvent::encode_log_data(this) - } - } - }; - /**Function with signature `name()` and selector `0x06fdde03`. -```solidity -function name() external view returns (string memory); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct nameCall {} - ///Container type for the return parameters of the [`name()`](nameCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct nameReturn { - pub _0: ::alloy_sol_types::private::String, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: nameCall) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for nameCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: nameReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for nameReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for nameCall { - type Parameters<'a> = (); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = nameReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::String,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "name()"; - const SELECTOR: [u8; 4] = [6u8, 253u8, 222u8, 3u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `symbol()` and selector `0x95d89b41`. -```solidity -function symbol() external view returns (string memory); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct symbolCall {} - ///Container type for the return parameters of the [`symbol()`](symbolCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct symbolReturn { - pub _0: ::alloy_sol_types::private::String, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: symbolCall) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for symbolCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: symbolReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for symbolReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for symbolCall { - type Parameters<'a> = (); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = symbolReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::String,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "symbol()"; - const SELECTOR: [u8; 4] = [149u8, 216u8, 155u8, 65u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `decimals()` and selector `0x313ce567`. -```solidity -function decimals() external view returns (uint8); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct decimalsCall {} - ///Container type for the return parameters of the [`decimals()`](decimalsCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct decimalsReturn { - pub _0: u8, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: decimalsCall) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for decimalsCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<8>,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (u8,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: decimalsReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for decimalsReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for decimalsCall { - type Parameters<'a> = (); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = decimalsReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<8>,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "decimals()"; - const SELECTOR: [u8; 4] = [49u8, 60u8, 229u8, 103u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `totalSupply()` and selector `0x18160ddd`. -```solidity -function totalSupply() external view returns (uint256); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct totalSupplyCall {} - ///Container type for the return parameters of the [`totalSupply()`](totalSupplyCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct totalSupplyReturn { - pub _0: ::alloy_sol_types::private::primitives::aliases::U256, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: totalSupplyCall) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for totalSupplyCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::primitives::aliases::U256, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: totalSupplyReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for totalSupplyReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for totalSupplyCall { - type Parameters<'a> = (); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = totalSupplyReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "totalSupply()"; - const SELECTOR: [u8; 4] = [24u8, 22u8, 13u8, 221u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `balanceOf(address)` and selector `0x70a08231`. -```solidity -function balanceOf(address owner) external view returns (uint256); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct balanceOfCall { - pub owner: ::alloy_sol_types::private::Address, - } - ///Container type for the return parameters of the [`balanceOf(address)`](balanceOfCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct balanceOfReturn { - pub _0: ::alloy_sol_types::private::primitives::aliases::U256, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: balanceOfCall) -> Self { - (value.owner,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for balanceOfCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { owner: tuple.0 } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::primitives::aliases::U256, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: balanceOfReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for balanceOfReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for balanceOfCall { - type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = balanceOfReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "balanceOf(address)"; - const SELECTOR: [u8; 4] = [112u8, 160u8, 130u8, 49u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.owner, - ), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `transfer(address,uint256)` and selector `0xa9059cbb`. -```solidity -function transfer(address to, uint256 value) external returns (bool); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct transferCall { - pub to: ::alloy_sol_types::private::Address, - pub value: ::alloy_sol_types::private::primitives::aliases::U256, - } - ///Container type for the return parameters of the [`transfer(address,uint256)`](transferCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct transferReturn { - pub _0: bool, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Uint<256>, - ); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::Address, - ::alloy_sol_types::private::primitives::aliases::U256, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: transferCall) -> Self { - (value.to, value.value) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for transferCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { - to: tuple.0, - value: tuple.1, - } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bool,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (bool,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: transferReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for transferReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for transferCall { - type Parameters<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Uint<256>, - ); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = transferReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Bool,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "transfer(address,uint256)"; - const SELECTOR: [u8; 4] = [169u8, 5u8, 156u8, 187u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.to, - ), - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::SolType>::tokenize(&self.value), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `transferFrom(address,address,uint256)` and selector `0x23b872dd`. -```solidity -function transferFrom(address from, address to, uint256 value) external returns (bool); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct transferFromCall { - pub from: ::alloy_sol_types::private::Address, - pub to: ::alloy_sol_types::private::Address, - pub value: ::alloy_sol_types::private::primitives::aliases::U256, - } - ///Container type for the return parameters of the [`transferFrom(address,address,uint256)`](transferFromCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct transferFromReturn { - pub _0: bool, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Uint<256>, - ); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::Address, - ::alloy_sol_types::private::Address, - ::alloy_sol_types::private::primitives::aliases::U256, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: transferFromCall) -> Self { - (value.from, value.to, value.value) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for transferFromCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { - from: tuple.0, - to: tuple.1, - value: tuple.2, - } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bool,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (bool,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: transferFromReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for transferFromReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for transferFromCall { - type Parameters<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Uint<256>, - ); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = transferFromReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Bool,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "transferFrom(address,address,uint256)"; - const SELECTOR: [u8; 4] = [35u8, 184u8, 114u8, 221u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.from, - ), - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.to, - ), - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::SolType>::tokenize(&self.value), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `approve(address,uint256)` and selector `0x095ea7b3`. -```solidity -function approve(address spender, uint256 value) external returns (bool); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct approveCall { - pub spender: ::alloy_sol_types::private::Address, - pub value: ::alloy_sol_types::private::primitives::aliases::U256, - } - ///Container type for the return parameters of the [`approve(address,uint256)`](approveCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct approveReturn { - pub _0: bool, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Uint<256>, - ); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::Address, - ::alloy_sol_types::private::primitives::aliases::U256, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: approveCall) -> Self { - (value.spender, value.value) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for approveCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { - spender: tuple.0, - value: tuple.1, - } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bool,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (bool,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: approveReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for approveReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for approveCall { - type Parameters<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Uint<256>, - ); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = approveReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Bool,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "approve(address,uint256)"; - const SELECTOR: [u8; 4] = [9u8, 94u8, 167u8, 179u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.spender, - ), - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::SolType>::tokenize(&self.value), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `allowance(address,address)` and selector `0xdd62ed3e`. -```solidity -function allowance(address owner, address spender) external view returns (uint256); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct allowanceCall { - pub owner: ::alloy_sol_types::private::Address, - pub spender: ::alloy_sol_types::private::Address, - } - ///Container type for the return parameters of the [`allowance(address,address)`](allowanceCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct allowanceReturn { - pub _0: ::alloy_sol_types::private::primitives::aliases::U256, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Address, - ); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::Address, - ::alloy_sol_types::private::Address, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: allowanceCall) -> Self { - (value.owner, value.spender) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for allowanceCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { - owner: tuple.0, - spender: tuple.1, - } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::primitives::aliases::U256, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: allowanceReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for allowanceReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for allowanceCall { - type Parameters<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Address, - ); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = allowanceReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "allowance(address,address)"; - const SELECTOR: [u8; 4] = [221u8, 98u8, 237u8, 62u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.owner, - ), - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.spender, - ), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - ///Container for all the [`IERC20`](self) function calls. - pub enum IERC20Calls { - name(nameCall), - symbol(symbolCall), - decimals(decimalsCall), - totalSupply(totalSupplyCall), - balanceOf(balanceOfCall), - transfer(transferCall), - transferFrom(transferFromCall), - approve(approveCall), - allowance(allowanceCall), - } - #[automatically_derived] - impl IERC20Calls { - /// All the selectors of this enum. - /// - /// Note that the selectors might not be in the same order as the variants. - /// No guarantees are made about the order of the selectors. - /// - /// Prefer using `SolInterface` methods instead. - pub const SELECTORS: &'static [[u8; 4usize]] = &[ - [6u8, 253u8, 222u8, 3u8], - [9u8, 94u8, 167u8, 179u8], - [24u8, 22u8, 13u8, 221u8], - [35u8, 184u8, 114u8, 221u8], - [49u8, 60u8, 229u8, 103u8], - [112u8, 160u8, 130u8, 49u8], - [149u8, 216u8, 155u8, 65u8], - [169u8, 5u8, 156u8, 187u8], - [221u8, 98u8, 237u8, 62u8], - ]; - } - #[automatically_derived] - impl alloy_sol_types::SolInterface for IERC20Calls { - const NAME: &'static str = "IERC20Calls"; - const MIN_DATA_LENGTH: usize = 0usize; - const COUNT: usize = 9usize; - #[inline] - fn selector(&self) -> [u8; 4] { - match self { - Self::name(_) => ::SELECTOR, - Self::symbol(_) => ::SELECTOR, - Self::decimals(_) => ::SELECTOR, - Self::totalSupply(_) => { - ::SELECTOR - } - Self::balanceOf(_) => { - ::SELECTOR - } - Self::transfer(_) => ::SELECTOR, - Self::transferFrom(_) => { - ::SELECTOR - } - Self::approve(_) => ::SELECTOR, - Self::allowance(_) => { - ::SELECTOR - } - } - } - #[inline] - fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { - Self::SELECTORS.get(i).copied() - } - #[inline] - fn valid_selector(selector: [u8; 4]) -> bool { - Self::SELECTORS.binary_search(&selector).is_ok() - } - #[inline] - #[allow(unsafe_code, non_snake_case)] - fn abi_decode_raw( - selector: [u8; 4], - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - static DECODE_SHIMS: &[fn( - &[u8], - bool, - ) -> alloy_sol_types::Result] = &[ - { - fn name( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(IERC20Calls::name) - } - name - }, - { - fn approve( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(IERC20Calls::approve) - } - approve - }, - { - fn totalSupply( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(IERC20Calls::totalSupply) - } - totalSupply - }, - { - fn transferFrom( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(IERC20Calls::transferFrom) - } - transferFrom - }, - { - fn decimals( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(IERC20Calls::decimals) - } - decimals - }, - { - fn balanceOf( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(IERC20Calls::balanceOf) - } - balanceOf - }, - { - fn symbol( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(IERC20Calls::symbol) - } - symbol - }, - { - fn transfer( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(IERC20Calls::transfer) - } - transfer - }, - { - fn allowance( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(IERC20Calls::allowance) - } - allowance - }, - ]; - let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { - return Err( - alloy_sol_types::Error::unknown_selector( - ::NAME, - selector, - ), - ); - }; - (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) - } - #[inline] - fn abi_encoded_size(&self) -> usize { - match self { - Self::name(inner) => { - ::abi_encoded_size(inner) - } - Self::symbol(inner) => { - ::abi_encoded_size(inner) - } - Self::decimals(inner) => { - ::abi_encoded_size(inner) - } - Self::totalSupply(inner) => { - ::abi_encoded_size( - inner, - ) - } - Self::balanceOf(inner) => { - ::abi_encoded_size(inner) - } - Self::transfer(inner) => { - ::abi_encoded_size(inner) - } - Self::transferFrom(inner) => { - ::abi_encoded_size( - inner, - ) - } - Self::approve(inner) => { - ::abi_encoded_size(inner) - } - Self::allowance(inner) => { - ::abi_encoded_size(inner) - } - } - } - #[inline] - fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { - match self { - Self::name(inner) => { - ::abi_encode_raw(inner, out) - } - Self::symbol(inner) => { - ::abi_encode_raw(inner, out) - } - Self::decimals(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::totalSupply(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::balanceOf(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::transfer(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::transferFrom(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::approve(inner) => { - ::abi_encode_raw(inner, out) - } - Self::allowance(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - } - } - } - ///Container for all the [`IERC20`](self) events. - pub enum IERC20Events { - Transfer(Transfer), - Approval(Approval), - } - #[automatically_derived] - impl IERC20Events { - /// All the selectors of this enum. - /// - /// Note that the selectors might not be in the same order as the variants. - /// No guarantees are made about the order of the selectors. - /// - /// Prefer using `SolInterface` methods instead. - pub const SELECTORS: &'static [[u8; 32usize]] = &[ - [ - 140u8, - 91u8, - 225u8, - 229u8, - 235u8, - 236u8, - 125u8, - 91u8, - 209u8, - 79u8, - 113u8, - 66u8, - 125u8, - 30u8, - 132u8, - 243u8, - 221u8, - 3u8, - 20u8, - 192u8, - 247u8, - 178u8, - 41u8, - 30u8, - 91u8, - 32u8, - 10u8, - 200u8, - 199u8, - 195u8, - 185u8, - 37u8, - ], - [ - 221u8, - 242u8, - 82u8, - 173u8, - 27u8, - 226u8, - 200u8, - 155u8, - 105u8, - 194u8, - 176u8, - 104u8, - 252u8, - 55u8, - 141u8, - 170u8, - 149u8, - 43u8, - 167u8, - 241u8, - 99u8, - 196u8, - 161u8, - 22u8, - 40u8, - 245u8, - 90u8, - 77u8, - 245u8, - 35u8, - 179u8, - 239u8, - ], - ]; - } - #[automatically_derived] - impl alloy_sol_types::SolEventInterface for IERC20Events { - const NAME: &'static str = "IERC20Events"; - const COUNT: usize = 2usize; - fn decode_raw_log( - topics: &[alloy_sol_types::Word], - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - match topics.first().copied() { - Some(::SIGNATURE_HASH) => { - ::decode_raw_log( - topics, - data, - validate, - ) - .map(Self::Transfer) - } - Some(::SIGNATURE_HASH) => { - ::decode_raw_log( - topics, - data, - validate, - ) - .map(Self::Approval) - } - _ => { - alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { - name: ::NAME, - log: alloy_sol_types::private::Box::new( - alloy_sol_types::private::LogData::new_unchecked( - topics.to_vec(), - data.to_vec().into(), - ), - ), - }) - } - } - } - } - #[automatically_derived] - impl alloy_sol_types::private::IntoLogData for IERC20Events { - fn to_log_data(&self) -> alloy_sol_types::private::LogData { - match self { - Self::Transfer(inner) => { - alloy_sol_types::private::IntoLogData::to_log_data(inner) - } - Self::Approval(inner) => { - alloy_sol_types::private::IntoLogData::to_log_data(inner) - } - } - } - fn into_log_data(self) -> alloy_sol_types::private::LogData { - match self { - Self::Transfer(inner) => { - alloy_sol_types::private::IntoLogData::into_log_data(inner) - } - Self::Approval(inner) => { - alloy_sol_types::private::IntoLogData::into_log_data(inner) - } - } - } - } -} diff --git a/processor/ethereum/contracts/src/abigen/mod.rs b/processor/ethereum/contracts/src/abigen/mod.rs deleted file mode 100644 index 541c2980d..000000000 --- a/processor/ethereum/contracts/src/abigen/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod erc20; -pub mod deployer; -pub mod router; diff --git a/processor/ethereum/contracts/src/abigen/router.rs b/processor/ethereum/contracts/src/abigen/router.rs deleted file mode 100644 index cea1858f1..000000000 --- a/processor/ethereum/contracts/src/abigen/router.rs +++ /dev/null @@ -1,2958 +0,0 @@ -/** - -Generated by the following Solidity interface... -```solidity -interface Router { - type DestinationType is uint8; - struct OutInstruction { - DestinationType destinationType; - bytes destination; - address coin; - uint256 value; - } - struct Signature { - bytes32 c; - bytes32 s; - } - - error FailedTransfer(); - error InvalidAmount(); - error InvalidSignature(); - - event Executed(uint256 indexed nonce, bytes32 indexed batch); - event InInstruction(address indexed from, address indexed coin, uint256 amount, bytes instruction); - event SeraiKeyUpdated(uint256 indexed nonce, bytes32 indexed key); - - constructor(bytes32 initialSeraiKey); - - function arbitaryCallOut(bytes memory code) external; - function execute(OutInstruction[] memory transactions, Signature memory signature) external; - function inInstruction(address coin, uint256 amount, bytes memory instruction) external payable; - function nonce() external view returns (uint256); - function seraiKey() external view returns (bytes32); - function smartContractNonce() external view returns (uint256); - function updateSeraiKey(bytes32 newSeraiKey, Signature memory signature) external; -} -``` - -...which was generated by the following JSON ABI: -```json -[ - { - "type": "constructor", - "inputs": [ - { - "name": "initialSeraiKey", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "arbitaryCallOut", - "inputs": [ - { - "name": "code", - "type": "bytes", - "internalType": "bytes" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "execute", - "inputs": [ - { - "name": "transactions", - "type": "tuple[]", - "internalType": "struct Router.OutInstruction[]", - "components": [ - { - "name": "destinationType", - "type": "uint8", - "internalType": "enum Router.DestinationType" - }, - { - "name": "destination", - "type": "bytes", - "internalType": "bytes" - }, - { - "name": "coin", - "type": "address", - "internalType": "address" - }, - { - "name": "value", - "type": "uint256", - "internalType": "uint256" - } - ] - }, - { - "name": "signature", - "type": "tuple", - "internalType": "struct Router.Signature", - "components": [ - { - "name": "c", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "s", - "type": "bytes32", - "internalType": "bytes32" - } - ] - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "inInstruction", - "inputs": [ - { - "name": "coin", - "type": "address", - "internalType": "address" - }, - { - "name": "amount", - "type": "uint256", - "internalType": "uint256" - }, - { - "name": "instruction", - "type": "bytes", - "internalType": "bytes" - } - ], - "outputs": [], - "stateMutability": "payable" - }, - { - "type": "function", - "name": "nonce", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "uint256", - "internalType": "uint256" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "seraiKey", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "smartContractNonce", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "uint256", - "internalType": "uint256" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "updateSeraiKey", - "inputs": [ - { - "name": "newSeraiKey", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "signature", - "type": "tuple", - "internalType": "struct Router.Signature", - "components": [ - { - "name": "c", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "s", - "type": "bytes32", - "internalType": "bytes32" - } - ] - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "event", - "name": "Executed", - "inputs": [ - { - "name": "nonce", - "type": "uint256", - "indexed": true, - "internalType": "uint256" - }, - { - "name": "batch", - "type": "bytes32", - "indexed": true, - "internalType": "bytes32" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "InInstruction", - "inputs": [ - { - "name": "from", - "type": "address", - "indexed": true, - "internalType": "address" - }, - { - "name": "coin", - "type": "address", - "indexed": true, - "internalType": "address" - }, - { - "name": "amount", - "type": "uint256", - "indexed": false, - "internalType": "uint256" - }, - { - "name": "instruction", - "type": "bytes", - "indexed": false, - "internalType": "bytes" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "SeraiKeyUpdated", - "inputs": [ - { - "name": "nonce", - "type": "uint256", - "indexed": true, - "internalType": "uint256" - }, - { - "name": "key", - "type": "bytes32", - "indexed": true, - "internalType": "bytes32" - } - ], - "anonymous": false - }, - { - "type": "error", - "name": "FailedTransfer", - "inputs": [] - }, - { - "type": "error", - "name": "InvalidAmount", - "inputs": [] - }, - { - "type": "error", - "name": "InvalidSignature", - "inputs": [] - } -] -```*/ -#[allow(non_camel_case_types, non_snake_case, clippy::style)] -pub mod Router { - use super::*; - use ::alloy_sol_types as alloy_sol_types; - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct DestinationType(u8); - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[automatically_derived] - impl alloy_sol_types::private::SolTypeValue for u8 { - #[inline] - fn stv_to_tokens( - &self, - ) -> <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::SolType>::Token<'_> { - alloy_sol_types::private::SolTypeValue::< - ::alloy_sol_types::sol_data::Uint<8>, - >::stv_to_tokens(self) - } - #[inline] - fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { - <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::SolType>::tokenize(self) - .0 - } - #[inline] - fn stv_abi_encode_packed_to( - &self, - out: &mut alloy_sol_types::private::Vec, - ) { - <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::SolType>::abi_encode_packed_to(self, out) - } - #[inline] - fn stv_abi_packed_encoded_size(&self) -> usize { - <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::SolType>::abi_encoded_size(self) - } - } - #[automatically_derived] - impl DestinationType { - /// The Solidity type name. - pub const NAME: &'static str = stringify!(@ name); - /// Convert from the underlying value type. - #[inline] - pub const fn from(value: u8) -> Self { - Self(value) - } - /// Return the underlying value. - #[inline] - pub const fn into(self) -> u8 { - self.0 - } - /// Return the single encoding of this value, delegating to the - /// underlying type. - #[inline] - pub fn abi_encode(&self) -> alloy_sol_types::private::Vec { - ::abi_encode(&self.0) - } - /// Return the packed encoding of this value, delegating to the - /// underlying type. - #[inline] - pub fn abi_encode_packed(&self) -> alloy_sol_types::private::Vec { - ::abi_encode_packed(&self.0) - } - } - #[automatically_derived] - impl alloy_sol_types::SolType for DestinationType { - type RustType = u8; - type Token<'a> = <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::SolType>::Token<'a>; - const SOL_NAME: &'static str = Self::NAME; - const ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::SolType>::ENCODED_SIZE; - const PACKED_ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; - #[inline] - fn valid_token(token: &Self::Token<'_>) -> bool { - Self::type_check(token).is_ok() - } - #[inline] - fn type_check(token: &Self::Token<'_>) -> alloy_sol_types::Result<()> { - <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::SolType>::type_check(token) - } - #[inline] - fn detokenize(token: Self::Token<'_>) -> Self::RustType { - <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::SolType>::detokenize(token) - } - } - #[automatically_derived] - impl alloy_sol_types::EventTopic for DestinationType { - #[inline] - fn topic_preimage_length(rust: &Self::RustType) -> usize { - <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::EventTopic>::topic_preimage_length(rust) - } - #[inline] - fn encode_topic_preimage( - rust: &Self::RustType, - out: &mut alloy_sol_types::private::Vec, - ) { - <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::EventTopic>::encode_topic_preimage(rust, out) - } - #[inline] - fn encode_topic( - rust: &Self::RustType, - ) -> alloy_sol_types::abi::token::WordToken { - <::alloy_sol_types::sol_data::Uint< - 8, - > as alloy_sol_types::EventTopic>::encode_topic(rust) - } - } - }; - /**```solidity -struct OutInstruction { DestinationType destinationType; bytes destination; address coin; uint256 value; } -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct OutInstruction { - pub destinationType: ::RustType, - pub destination: ::alloy_sol_types::private::Bytes, - pub coin: ::alloy_sol_types::private::Address, - pub value: ::alloy_sol_types::private::primitives::aliases::U256, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[doc(hidden)] - type UnderlyingSolTuple<'a> = ( - DestinationType, - ::alloy_sol_types::sol_data::Bytes, - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Uint<256>, - ); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::RustType, - ::alloy_sol_types::private::Bytes, - ::alloy_sol_types::private::Address, - ::alloy_sol_types::private::primitives::aliases::U256, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: OutInstruction) -> Self { - (value.destinationType, value.destination, value.coin, value.value) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for OutInstruction { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { - destinationType: tuple.0, - destination: tuple.1, - coin: tuple.2, - value: tuple.3, - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolValue for OutInstruction { - type SolType = Self; - } - #[automatically_derived] - impl alloy_sol_types::private::SolTypeValue for OutInstruction { - #[inline] - fn stv_to_tokens(&self) -> ::Token<'_> { - ( - ::tokenize( - &self.destinationType, - ), - <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( - &self.destination, - ), - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.coin, - ), - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::SolType>::tokenize(&self.value), - ) - } - #[inline] - fn stv_abi_encoded_size(&self) -> usize { - if let Some(size) = ::ENCODED_SIZE { - return size; - } - let tuple = as ::core::convert::From>::from(self.clone()); - as alloy_sol_types::SolType>::abi_encoded_size(&tuple) - } - #[inline] - fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { - ::eip712_hash_struct(self) - } - #[inline] - fn stv_abi_encode_packed_to( - &self, - out: &mut alloy_sol_types::private::Vec, - ) { - let tuple = as ::core::convert::From>::from(self.clone()); - as alloy_sol_types::SolType>::abi_encode_packed_to(&tuple, out) - } - #[inline] - fn stv_abi_packed_encoded_size(&self) -> usize { - if let Some(size) = ::PACKED_ENCODED_SIZE { - return size; - } - let tuple = as ::core::convert::From>::from(self.clone()); - as alloy_sol_types::SolType>::abi_packed_encoded_size(&tuple) - } - } - #[automatically_derived] - impl alloy_sol_types::SolType for OutInstruction { - type RustType = Self; - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SOL_NAME: &'static str = ::NAME; - const ENCODED_SIZE: Option = as alloy_sol_types::SolType>::ENCODED_SIZE; - const PACKED_ENCODED_SIZE: Option = as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; - #[inline] - fn valid_token(token: &Self::Token<'_>) -> bool { - as alloy_sol_types::SolType>::valid_token(token) - } - #[inline] - fn detokenize(token: Self::Token<'_>) -> Self::RustType { - let tuple = as alloy_sol_types::SolType>::detokenize(token); - >>::from(tuple) - } - } - #[automatically_derived] - impl alloy_sol_types::SolStruct for OutInstruction { - const NAME: &'static str = "OutInstruction"; - #[inline] - fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { - alloy_sol_types::private::Cow::Borrowed( - "OutInstruction(uint8 destinationType,bytes destination,address coin,uint256 value)", - ) - } - #[inline] - fn eip712_components() -> alloy_sol_types::private::Vec< - alloy_sol_types::private::Cow<'static, str>, - > { - alloy_sol_types::private::Vec::new() - } - #[inline] - fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { - ::eip712_root_type() - } - #[inline] - fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { - [ - ::eip712_data_word( - &self.destinationType, - ) - .0, - <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::eip712_data_word( - &self.destination, - ) - .0, - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::eip712_data_word( - &self.coin, - ) - .0, - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::SolType>::eip712_data_word(&self.value) - .0, - ] - .concat() - } - } - #[automatically_derived] - impl alloy_sol_types::EventTopic for OutInstruction { - #[inline] - fn topic_preimage_length(rust: &Self::RustType) -> usize { - 0usize - + ::topic_preimage_length( - &rust.destinationType, - ) - + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::EventTopic>::topic_preimage_length( - &rust.destination, - ) - + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::topic_preimage_length( - &rust.coin, - ) - + <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.value) - } - #[inline] - fn encode_topic_preimage( - rust: &Self::RustType, - out: &mut alloy_sol_types::private::Vec, - ) { - out.reserve( - ::topic_preimage_length(rust), - ); - ::encode_topic_preimage( - &rust.destinationType, - out, - ); - <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::EventTopic>::encode_topic_preimage( - &rust.destination, - out, - ); - <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic_preimage( - &rust.coin, - out, - ); - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::EventTopic>::encode_topic_preimage( - &rust.value, - out, - ); - } - #[inline] - fn encode_topic( - rust: &Self::RustType, - ) -> alloy_sol_types::abi::token::WordToken { - let mut out = alloy_sol_types::private::Vec::new(); - ::encode_topic_preimage( - rust, - &mut out, - ); - alloy_sol_types::abi::token::WordToken( - alloy_sol_types::private::keccak256(out), - ) - } - } - }; - /**```solidity -struct Signature { bytes32 c; bytes32 s; } -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct Signature { - pub c: ::alloy_sol_types::private::FixedBytes<32>, - pub s: ::alloy_sol_types::private::FixedBytes<32>, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[doc(hidden)] - type UnderlyingSolTuple<'a> = ( - ::alloy_sol_types::sol_data::FixedBytes<32>, - ::alloy_sol_types::sol_data::FixedBytes<32>, - ); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::FixedBytes<32>, - ::alloy_sol_types::private::FixedBytes<32>, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: Signature) -> Self { - (value.c, value.s) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for Signature { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { c: tuple.0, s: tuple.1 } - } - } - #[automatically_derived] - impl alloy_sol_types::SolValue for Signature { - type SolType = Self; - } - #[automatically_derived] - impl alloy_sol_types::private::SolTypeValue for Signature { - #[inline] - fn stv_to_tokens(&self) -> ::Token<'_> { - ( - <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::SolType>::tokenize(&self.c), - <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::SolType>::tokenize(&self.s), - ) - } - #[inline] - fn stv_abi_encoded_size(&self) -> usize { - if let Some(size) = ::ENCODED_SIZE { - return size; - } - let tuple = as ::core::convert::From>::from(self.clone()); - as alloy_sol_types::SolType>::abi_encoded_size(&tuple) - } - #[inline] - fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { - ::eip712_hash_struct(self) - } - #[inline] - fn stv_abi_encode_packed_to( - &self, - out: &mut alloy_sol_types::private::Vec, - ) { - let tuple = as ::core::convert::From>::from(self.clone()); - as alloy_sol_types::SolType>::abi_encode_packed_to(&tuple, out) - } - #[inline] - fn stv_abi_packed_encoded_size(&self) -> usize { - if let Some(size) = ::PACKED_ENCODED_SIZE { - return size; - } - let tuple = as ::core::convert::From>::from(self.clone()); - as alloy_sol_types::SolType>::abi_packed_encoded_size(&tuple) - } - } - #[automatically_derived] - impl alloy_sol_types::SolType for Signature { - type RustType = Self; - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SOL_NAME: &'static str = ::NAME; - const ENCODED_SIZE: Option = as alloy_sol_types::SolType>::ENCODED_SIZE; - const PACKED_ENCODED_SIZE: Option = as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; - #[inline] - fn valid_token(token: &Self::Token<'_>) -> bool { - as alloy_sol_types::SolType>::valid_token(token) - } - #[inline] - fn detokenize(token: Self::Token<'_>) -> Self::RustType { - let tuple = as alloy_sol_types::SolType>::detokenize(token); - >>::from(tuple) - } - } - #[automatically_derived] - impl alloy_sol_types::SolStruct for Signature { - const NAME: &'static str = "Signature"; - #[inline] - fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { - alloy_sol_types::private::Cow::Borrowed("Signature(bytes32 c,bytes32 s)") - } - #[inline] - fn eip712_components() -> alloy_sol_types::private::Vec< - alloy_sol_types::private::Cow<'static, str>, - > { - alloy_sol_types::private::Vec::new() - } - #[inline] - fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { - ::eip712_root_type() - } - #[inline] - fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { - [ - <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::SolType>::eip712_data_word(&self.c) - .0, - <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::SolType>::eip712_data_word(&self.s) - .0, - ] - .concat() - } - } - #[automatically_derived] - impl alloy_sol_types::EventTopic for Signature { - #[inline] - fn topic_preimage_length(rust: &Self::RustType) -> usize { - 0usize - + <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.c) - + <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.s) - } - #[inline] - fn encode_topic_preimage( - rust: &Self::RustType, - out: &mut alloy_sol_types::private::Vec, - ) { - out.reserve( - ::topic_preimage_length(rust), - ); - <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::EventTopic>::encode_topic_preimage(&rust.c, out); - <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::EventTopic>::encode_topic_preimage(&rust.s, out); - } - #[inline] - fn encode_topic( - rust: &Self::RustType, - ) -> alloy_sol_types::abi::token::WordToken { - let mut out = alloy_sol_types::private::Vec::new(); - ::encode_topic_preimage( - rust, - &mut out, - ); - alloy_sol_types::abi::token::WordToken( - alloy_sol_types::private::keccak256(out), - ) - } - } - }; - /**Custom error with signature `FailedTransfer()` and selector `0xbfa871c5`. -```solidity -error FailedTransfer(); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct FailedTransfer {} - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: FailedTransfer) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for FailedTransfer { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - #[automatically_derived] - impl alloy_sol_types::SolError for FailedTransfer { - type Parameters<'a> = UnderlyingSolTuple<'a>; - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "FailedTransfer()"; - const SELECTOR: [u8; 4] = [191u8, 168u8, 113u8, 197u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - } - }; - /**Custom error with signature `InvalidAmount()` and selector `0x2c5211c6`. -```solidity -error InvalidAmount(); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct InvalidAmount {} - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: InvalidAmount) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for InvalidAmount { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - #[automatically_derived] - impl alloy_sol_types::SolError for InvalidAmount { - type Parameters<'a> = UnderlyingSolTuple<'a>; - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "InvalidAmount()"; - const SELECTOR: [u8; 4] = [44u8, 82u8, 17u8, 198u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - } - }; - /**Custom error with signature `InvalidSignature()` and selector `0x8baa579f`. -```solidity -error InvalidSignature(); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct InvalidSignature {} - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: InvalidSignature) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for InvalidSignature { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - #[automatically_derived] - impl alloy_sol_types::SolError for InvalidSignature { - type Parameters<'a> = UnderlyingSolTuple<'a>; - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "InvalidSignature()"; - const SELECTOR: [u8; 4] = [139u8, 170u8, 87u8, 159u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - } - }; - /**Event with signature `Executed(uint256,bytes32)` and selector `0xc218c77e54cac1162571e52b65bb27aa0cdfcc70b7c7296ad83933914b132091`. -```solidity -event Executed(uint256 indexed nonce, bytes32 indexed batch); -```*/ - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - #[derive(Clone)] - pub struct Executed { - #[allow(missing_docs)] - pub nonce: ::alloy_sol_types::private::primitives::aliases::U256, - #[allow(missing_docs)] - pub batch: ::alloy_sol_types::private::FixedBytes<32>, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[automatically_derived] - impl alloy_sol_types::SolEvent for Executed { - type DataTuple<'a> = (); - type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - type TopicList = ( - alloy_sol_types::sol_data::FixedBytes<32>, - ::alloy_sol_types::sol_data::Uint<256>, - ::alloy_sol_types::sol_data::FixedBytes<32>, - ); - const SIGNATURE: &'static str = "Executed(uint256,bytes32)"; - const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ - 194u8, - 24u8, - 199u8, - 126u8, - 84u8, - 202u8, - 193u8, - 22u8, - 37u8, - 113u8, - 229u8, - 43u8, - 101u8, - 187u8, - 39u8, - 170u8, - 12u8, - 223u8, - 204u8, - 112u8, - 183u8, - 199u8, - 41u8, - 106u8, - 216u8, - 57u8, - 51u8, - 145u8, - 75u8, - 19u8, - 32u8, - 145u8, - ]); - const ANONYMOUS: bool = false; - #[allow(unused_variables)] - #[inline] - fn new( - topics: ::RustType, - data: as alloy_sol_types::SolType>::RustType, - ) -> Self { - Self { - nonce: topics.1, - batch: topics.2, - } - } - #[inline] - fn tokenize_body(&self) -> Self::DataToken<'_> { - () - } - #[inline] - fn topics(&self) -> ::RustType { - (Self::SIGNATURE_HASH.into(), self.nonce.clone(), self.batch.clone()) - } - #[inline] - fn encode_topics_raw( - &self, - out: &mut [alloy_sol_types::abi::token::WordToken], - ) -> alloy_sol_types::Result<()> { - if out.len() < ::COUNT { - return Err(alloy_sol_types::Error::Overrun); - } - out[0usize] = alloy_sol_types::abi::token::WordToken( - Self::SIGNATURE_HASH, - ); - out[1usize] = <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::EventTopic>::encode_topic(&self.nonce); - out[2usize] = <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::EventTopic>::encode_topic(&self.batch); - Ok(()) - } - } - #[automatically_derived] - impl alloy_sol_types::private::IntoLogData for Executed { - fn to_log_data(&self) -> alloy_sol_types::private::LogData { - From::from(self) - } - fn into_log_data(self) -> alloy_sol_types::private::LogData { - From::from(&self) - } - } - #[automatically_derived] - impl From<&Executed> for alloy_sol_types::private::LogData { - #[inline] - fn from(this: &Executed) -> alloy_sol_types::private::LogData { - alloy_sol_types::SolEvent::encode_log_data(this) - } - } - }; - /**Event with signature `InInstruction(address,address,uint256,bytes)` and selector `0x346fd5cd6d19d26d3afd222f43033ecd0d5614ca64bec0aed101482cd87e922f`. -```solidity -event InInstruction(address indexed from, address indexed coin, uint256 amount, bytes instruction); -```*/ - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - #[derive(Clone)] - pub struct InInstruction { - #[allow(missing_docs)] - pub from: ::alloy_sol_types::private::Address, - #[allow(missing_docs)] - pub coin: ::alloy_sol_types::private::Address, - #[allow(missing_docs)] - pub amount: ::alloy_sol_types::private::primitives::aliases::U256, - #[allow(missing_docs)] - pub instruction: ::alloy_sol_types::private::Bytes, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[automatically_derived] - impl alloy_sol_types::SolEvent for InInstruction { - type DataTuple<'a> = ( - ::alloy_sol_types::sol_data::Uint<256>, - ::alloy_sol_types::sol_data::Bytes, - ); - type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - type TopicList = ( - alloy_sol_types::sol_data::FixedBytes<32>, - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Address, - ); - const SIGNATURE: &'static str = "InInstruction(address,address,uint256,bytes)"; - const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ - 52u8, - 111u8, - 213u8, - 205u8, - 109u8, - 25u8, - 210u8, - 109u8, - 58u8, - 253u8, - 34u8, - 47u8, - 67u8, - 3u8, - 62u8, - 205u8, - 13u8, - 86u8, - 20u8, - 202u8, - 100u8, - 190u8, - 192u8, - 174u8, - 209u8, - 1u8, - 72u8, - 44u8, - 216u8, - 126u8, - 146u8, - 47u8, - ]); - const ANONYMOUS: bool = false; - #[allow(unused_variables)] - #[inline] - fn new( - topics: ::RustType, - data: as alloy_sol_types::SolType>::RustType, - ) -> Self { - Self { - from: topics.1, - coin: topics.2, - amount: data.0, - instruction: data.1, - } - } - #[inline] - fn tokenize_body(&self) -> Self::DataToken<'_> { - ( - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::SolType>::tokenize(&self.amount), - <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( - &self.instruction, - ), - ) - } - #[inline] - fn topics(&self) -> ::RustType { - (Self::SIGNATURE_HASH.into(), self.from.clone(), self.coin.clone()) - } - #[inline] - fn encode_topics_raw( - &self, - out: &mut [alloy_sol_types::abi::token::WordToken], - ) -> alloy_sol_types::Result<()> { - if out.len() < ::COUNT { - return Err(alloy_sol_types::Error::Overrun); - } - out[0usize] = alloy_sol_types::abi::token::WordToken( - Self::SIGNATURE_HASH, - ); - out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( - &self.from, - ); - out[2usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( - &self.coin, - ); - Ok(()) - } - } - #[automatically_derived] - impl alloy_sol_types::private::IntoLogData for InInstruction { - fn to_log_data(&self) -> alloy_sol_types::private::LogData { - From::from(self) - } - fn into_log_data(self) -> alloy_sol_types::private::LogData { - From::from(&self) - } - } - #[automatically_derived] - impl From<&InInstruction> for alloy_sol_types::private::LogData { - #[inline] - fn from(this: &InInstruction) -> alloy_sol_types::private::LogData { - alloy_sol_types::SolEvent::encode_log_data(this) - } - } - }; - /**Event with signature `SeraiKeyUpdated(uint256,bytes32)` and selector `0x1b9ff0164e811045a617ae783e807501a8e27762a7cb8f2fbd027851752570b5`. -```solidity -event SeraiKeyUpdated(uint256 indexed nonce, bytes32 indexed key); -```*/ - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - #[derive(Clone)] - pub struct SeraiKeyUpdated { - #[allow(missing_docs)] - pub nonce: ::alloy_sol_types::private::primitives::aliases::U256, - #[allow(missing_docs)] - pub key: ::alloy_sol_types::private::FixedBytes<32>, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - #[automatically_derived] - impl alloy_sol_types::SolEvent for SeraiKeyUpdated { - type DataTuple<'a> = (); - type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - type TopicList = ( - alloy_sol_types::sol_data::FixedBytes<32>, - ::alloy_sol_types::sol_data::Uint<256>, - ::alloy_sol_types::sol_data::FixedBytes<32>, - ); - const SIGNATURE: &'static str = "SeraiKeyUpdated(uint256,bytes32)"; - const SIGNATURE_HASH: alloy_sol_types::private::B256 = alloy_sol_types::private::B256::new([ - 27u8, - 159u8, - 240u8, - 22u8, - 78u8, - 129u8, - 16u8, - 69u8, - 166u8, - 23u8, - 174u8, - 120u8, - 62u8, - 128u8, - 117u8, - 1u8, - 168u8, - 226u8, - 119u8, - 98u8, - 167u8, - 203u8, - 143u8, - 47u8, - 189u8, - 2u8, - 120u8, - 81u8, - 117u8, - 37u8, - 112u8, - 181u8, - ]); - const ANONYMOUS: bool = false; - #[allow(unused_variables)] - #[inline] - fn new( - topics: ::RustType, - data: as alloy_sol_types::SolType>::RustType, - ) -> Self { - Self { - nonce: topics.1, - key: topics.2, - } - } - #[inline] - fn tokenize_body(&self) -> Self::DataToken<'_> { - () - } - #[inline] - fn topics(&self) -> ::RustType { - (Self::SIGNATURE_HASH.into(), self.nonce.clone(), self.key.clone()) - } - #[inline] - fn encode_topics_raw( - &self, - out: &mut [alloy_sol_types::abi::token::WordToken], - ) -> alloy_sol_types::Result<()> { - if out.len() < ::COUNT { - return Err(alloy_sol_types::Error::Overrun); - } - out[0usize] = alloy_sol_types::abi::token::WordToken( - Self::SIGNATURE_HASH, - ); - out[1usize] = <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::EventTopic>::encode_topic(&self.nonce); - out[2usize] = <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::EventTopic>::encode_topic(&self.key); - Ok(()) - } - } - #[automatically_derived] - impl alloy_sol_types::private::IntoLogData for SeraiKeyUpdated { - fn to_log_data(&self) -> alloy_sol_types::private::LogData { - From::from(self) - } - fn into_log_data(self) -> alloy_sol_types::private::LogData { - From::from(&self) - } - } - #[automatically_derived] - impl From<&SeraiKeyUpdated> for alloy_sol_types::private::LogData { - #[inline] - fn from(this: &SeraiKeyUpdated) -> alloy_sol_types::private::LogData { - alloy_sol_types::SolEvent::encode_log_data(this) - } - } - }; - /**Constructor`. -```solidity -constructor(bytes32 initialSeraiKey); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct constructorCall { - pub initialSeraiKey: ::alloy_sol_types::private::FixedBytes<32>, - } - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::FixedBytes<32>,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: constructorCall) -> Self { - (value.initialSeraiKey,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for constructorCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { initialSeraiKey: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolConstructor for constructorCall { - type Parameters<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::SolType>::tokenize(&self.initialSeraiKey), - ) - } - } - }; - /**Function with signature `arbitaryCallOut(bytes)` and selector `0x3cbd2bf6`. -```solidity -function arbitaryCallOut(bytes memory code) external; -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct arbitaryCallOutCall { - pub code: ::alloy_sol_types::private::Bytes, - } - ///Container type for the return parameters of the [`arbitaryCallOut(bytes)`](arbitaryCallOutCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct arbitaryCallOutReturn {} - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: arbitaryCallOutCall) -> Self { - (value.code,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for arbitaryCallOutCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { code: tuple.0 } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From - for UnderlyingRustTuple<'_> { - fn from(value: arbitaryCallOutReturn) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> - for arbitaryCallOutReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for arbitaryCallOutCall { - type Parameters<'a> = (::alloy_sol_types::sol_data::Bytes,); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = arbitaryCallOutReturn; - type ReturnTuple<'a> = (); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "arbitaryCallOut(bytes)"; - const SELECTOR: [u8; 4] = [60u8, 189u8, 43u8, 246u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( - &self.code, - ), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `execute((uint8,bytes,address,uint256)[],(bytes32,bytes32))` and selector `0xd5f22182`. -```solidity -function execute(OutInstruction[] memory transactions, Signature memory signature) external; -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct executeCall { - pub transactions: ::alloy_sol_types::private::Vec< - ::RustType, - >, - pub signature: ::RustType, - } - ///Container type for the return parameters of the [`execute((uint8,bytes,address,uint256)[],(bytes32,bytes32))`](executeCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct executeReturn {} - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = ( - ::alloy_sol_types::sol_data::Array, - Signature, - ); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::Vec< - ::RustType, - >, - ::RustType, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: executeCall) -> Self { - (value.transactions, value.signature) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for executeCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { - transactions: tuple.0, - signature: tuple.1, - } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: executeReturn) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for executeReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for executeCall { - type Parameters<'a> = ( - ::alloy_sol_types::sol_data::Array, - Signature, - ); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = executeReturn; - type ReturnTuple<'a> = (); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "execute((uint8,bytes,address,uint256)[],(bytes32,bytes32))"; - const SELECTOR: [u8; 4] = [213u8, 242u8, 33u8, 130u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::Array< - OutInstruction, - > as alloy_sol_types::SolType>::tokenize(&self.transactions), - ::tokenize(&self.signature), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `inInstruction(address,uint256,bytes)` and selector `0x0759a1a4`. -```solidity -function inInstruction(address coin, uint256 amount, bytes memory instruction) external payable; -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct inInstructionCall { - pub coin: ::alloy_sol_types::private::Address, - pub amount: ::alloy_sol_types::private::primitives::aliases::U256, - pub instruction: ::alloy_sol_types::private::Bytes, - } - ///Container type for the return parameters of the [`inInstruction(address,uint256,bytes)`](inInstructionCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct inInstructionReturn {} - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Uint<256>, - ::alloy_sol_types::sol_data::Bytes, - ); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::Address, - ::alloy_sol_types::private::primitives::aliases::U256, - ::alloy_sol_types::private::Bytes, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: inInstructionCall) -> Self { - (value.coin, value.amount, value.instruction) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for inInstructionCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { - coin: tuple.0, - amount: tuple.1, - instruction: tuple.2, - } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: inInstructionReturn) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for inInstructionReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for inInstructionCall { - type Parameters<'a> = ( - ::alloy_sol_types::sol_data::Address, - ::alloy_sol_types::sol_data::Uint<256>, - ::alloy_sol_types::sol_data::Bytes, - ); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = inInstructionReturn; - type ReturnTuple<'a> = (); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "inInstruction(address,uint256,bytes)"; - const SELECTOR: [u8; 4] = [7u8, 89u8, 161u8, 164u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( - &self.coin, - ), - <::alloy_sol_types::sol_data::Uint< - 256, - > as alloy_sol_types::SolType>::tokenize(&self.amount), - <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( - &self.instruction, - ), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `nonce()` and selector `0xaffed0e0`. -```solidity -function nonce() external view returns (uint256); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct nonceCall {} - ///Container type for the return parameters of the [`nonce()`](nonceCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct nonceReturn { - pub _0: ::alloy_sol_types::private::primitives::aliases::U256, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: nonceCall) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for nonceCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::primitives::aliases::U256, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: nonceReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for nonceReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for nonceCall { - type Parameters<'a> = (); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = nonceReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "nonce()"; - const SELECTOR: [u8; 4] = [175u8, 254u8, 208u8, 224u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `seraiKey()` and selector `0x9d6eea0a`. -```solidity -function seraiKey() external view returns (bytes32); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct seraiKeyCall {} - ///Container type for the return parameters of the [`seraiKey()`](seraiKeyCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct seraiKeyReturn { - pub _0: ::alloy_sol_types::private::FixedBytes<32>, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: seraiKeyCall) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for seraiKeyCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::FixedBytes<32>,); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: seraiKeyReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for seraiKeyReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for seraiKeyCall { - type Parameters<'a> = (); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = seraiKeyReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "seraiKey()"; - const SELECTOR: [u8; 4] = [157u8, 110u8, 234u8, 10u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `smartContractNonce()` and selector `0xc3727534`. -```solidity -function smartContractNonce() external view returns (uint256); -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct smartContractNonceCall {} - ///Container type for the return parameters of the [`smartContractNonce()`](smartContractNonceCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct smartContractNonceReturn { - pub _0: ::alloy_sol_types::private::primitives::aliases::U256, - } - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From - for UnderlyingRustTuple<'_> { - fn from(value: smartContractNonceCall) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> - for smartContractNonceCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::primitives::aliases::U256, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From - for UnderlyingRustTuple<'_> { - fn from(value: smartContractNonceReturn) -> Self { - (value._0,) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> - for smartContractNonceReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { _0: tuple.0 } - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for smartContractNonceCall { - type Parameters<'a> = (); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = smartContractNonceReturn; - type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<256>,); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "smartContractNonce()"; - const SELECTOR: [u8; 4] = [195u8, 114u8, 117u8, 52u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - () - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - /**Function with signature `updateSeraiKey(bytes32,(bytes32,bytes32))` and selector `0xb5071c6a`. -```solidity -function updateSeraiKey(bytes32 newSeraiKey, Signature memory signature) external; -```*/ - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct updateSeraiKeyCall { - pub newSeraiKey: ::alloy_sol_types::private::FixedBytes<32>, - pub signature: ::RustType, - } - ///Container type for the return parameters of the [`updateSeraiKey(bytes32,(bytes32,bytes32))`](updateSeraiKeyCall) function. - #[allow(non_camel_case_types, non_snake_case)] - #[derive(Clone)] - pub struct updateSeraiKeyReturn {} - #[allow(non_camel_case_types, non_snake_case, clippy::style)] - const _: () = { - use ::alloy_sol_types as alloy_sol_types; - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = ( - ::alloy_sol_types::sol_data::FixedBytes<32>, - Signature, - ); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = ( - ::alloy_sol_types::private::FixedBytes<32>, - ::RustType, - ); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From for UnderlyingRustTuple<'_> { - fn from(value: updateSeraiKeyCall) -> Self { - (value.newSeraiKey, value.signature) - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> for updateSeraiKeyCall { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self { - newSeraiKey: tuple.0, - signature: tuple.1, - } - } - } - } - { - #[doc(hidden)] - type UnderlyingSolTuple<'a> = (); - #[doc(hidden)] - type UnderlyingRustTuple<'a> = (); - #[cfg(test)] - #[allow(dead_code, unreachable_patterns)] - fn _type_assertion( - _t: alloy_sol_types::private::AssertTypeEq, - ) { - match _t { - alloy_sol_types::private::AssertTypeEq::< - ::RustType, - >(_) => {} - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From - for UnderlyingRustTuple<'_> { - fn from(value: updateSeraiKeyReturn) -> Self { - () - } - } - #[automatically_derived] - #[doc(hidden)] - impl ::core::convert::From> - for updateSeraiKeyReturn { - fn from(tuple: UnderlyingRustTuple<'_>) -> Self { - Self {} - } - } - } - #[automatically_derived] - impl alloy_sol_types::SolCall for updateSeraiKeyCall { - type Parameters<'a> = ( - ::alloy_sol_types::sol_data::FixedBytes<32>, - Signature, - ); - type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; - type Return = updateSeraiKeyReturn; - type ReturnTuple<'a> = (); - type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; - const SIGNATURE: &'static str = "updateSeraiKey(bytes32,(bytes32,bytes32))"; - const SELECTOR: [u8; 4] = [181u8, 7u8, 28u8, 106u8]; - #[inline] - fn new<'a>( - tuple: as alloy_sol_types::SolType>::RustType, - ) -> Self { - tuple.into() - } - #[inline] - fn tokenize(&self) -> Self::Token<'_> { - ( - <::alloy_sol_types::sol_data::FixedBytes< - 32, - > as alloy_sol_types::SolType>::tokenize(&self.newSeraiKey), - ::tokenize(&self.signature), - ) - } - #[inline] - fn abi_decode_returns( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - as alloy_sol_types::SolType>::abi_decode_sequence(data, validate) - .map(Into::into) - } - } - }; - ///Container for all the [`Router`](self) function calls. - pub enum RouterCalls { - arbitaryCallOut(arbitaryCallOutCall), - execute(executeCall), - inInstruction(inInstructionCall), - nonce(nonceCall), - seraiKey(seraiKeyCall), - smartContractNonce(smartContractNonceCall), - updateSeraiKey(updateSeraiKeyCall), - } - #[automatically_derived] - impl RouterCalls { - /// All the selectors of this enum. - /// - /// Note that the selectors might not be in the same order as the variants. - /// No guarantees are made about the order of the selectors. - /// - /// Prefer using `SolInterface` methods instead. - pub const SELECTORS: &'static [[u8; 4usize]] = &[ - [7u8, 89u8, 161u8, 164u8], - [60u8, 189u8, 43u8, 246u8], - [157u8, 110u8, 234u8, 10u8], - [175u8, 254u8, 208u8, 224u8], - [181u8, 7u8, 28u8, 106u8], - [195u8, 114u8, 117u8, 52u8], - [213u8, 242u8, 33u8, 130u8], - ]; - } - #[automatically_derived] - impl alloy_sol_types::SolInterface for RouterCalls { - const NAME: &'static str = "RouterCalls"; - const MIN_DATA_LENGTH: usize = 0usize; - const COUNT: usize = 7usize; - #[inline] - fn selector(&self) -> [u8; 4] { - match self { - Self::arbitaryCallOut(_) => { - ::SELECTOR - } - Self::execute(_) => ::SELECTOR, - Self::inInstruction(_) => { - ::SELECTOR - } - Self::nonce(_) => ::SELECTOR, - Self::seraiKey(_) => ::SELECTOR, - Self::smartContractNonce(_) => { - ::SELECTOR - } - Self::updateSeraiKey(_) => { - ::SELECTOR - } - } - } - #[inline] - fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { - Self::SELECTORS.get(i).copied() - } - #[inline] - fn valid_selector(selector: [u8; 4]) -> bool { - Self::SELECTORS.binary_search(&selector).is_ok() - } - #[inline] - #[allow(unsafe_code, non_snake_case)] - fn abi_decode_raw( - selector: [u8; 4], - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - static DECODE_SHIMS: &[fn( - &[u8], - bool, - ) -> alloy_sol_types::Result] = &[ - { - fn inInstruction( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterCalls::inInstruction) - } - inInstruction - }, - { - fn arbitaryCallOut( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterCalls::arbitaryCallOut) - } - arbitaryCallOut - }, - { - fn seraiKey( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterCalls::seraiKey) - } - seraiKey - }, - { - fn nonce( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterCalls::nonce) - } - nonce - }, - { - fn updateSeraiKey( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterCalls::updateSeraiKey) - } - updateSeraiKey - }, - { - fn smartContractNonce( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterCalls::smartContractNonce) - } - smartContractNonce - }, - { - fn execute( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterCalls::execute) - } - execute - }, - ]; - let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { - return Err( - alloy_sol_types::Error::unknown_selector( - ::NAME, - selector, - ), - ); - }; - (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) - } - #[inline] - fn abi_encoded_size(&self) -> usize { - match self { - Self::arbitaryCallOut(inner) => { - ::abi_encoded_size( - inner, - ) - } - Self::execute(inner) => { - ::abi_encoded_size(inner) - } - Self::inInstruction(inner) => { - ::abi_encoded_size( - inner, - ) - } - Self::nonce(inner) => { - ::abi_encoded_size(inner) - } - Self::seraiKey(inner) => { - ::abi_encoded_size(inner) - } - Self::smartContractNonce(inner) => { - ::abi_encoded_size( - inner, - ) - } - Self::updateSeraiKey(inner) => { - ::abi_encoded_size( - inner, - ) - } - } - } - #[inline] - fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { - match self { - Self::arbitaryCallOut(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::execute(inner) => { - ::abi_encode_raw(inner, out) - } - Self::inInstruction(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::nonce(inner) => { - ::abi_encode_raw(inner, out) - } - Self::seraiKey(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::smartContractNonce(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::updateSeraiKey(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - } - } - } - ///Container for all the [`Router`](self) custom errors. - pub enum RouterErrors { - FailedTransfer(FailedTransfer), - InvalidAmount(InvalidAmount), - InvalidSignature(InvalidSignature), - } - #[automatically_derived] - impl RouterErrors { - /// All the selectors of this enum. - /// - /// Note that the selectors might not be in the same order as the variants. - /// No guarantees are made about the order of the selectors. - /// - /// Prefer using `SolInterface` methods instead. - pub const SELECTORS: &'static [[u8; 4usize]] = &[ - [44u8, 82u8, 17u8, 198u8], - [139u8, 170u8, 87u8, 159u8], - [191u8, 168u8, 113u8, 197u8], - ]; - } - #[automatically_derived] - impl alloy_sol_types::SolInterface for RouterErrors { - const NAME: &'static str = "RouterErrors"; - const MIN_DATA_LENGTH: usize = 0usize; - const COUNT: usize = 3usize; - #[inline] - fn selector(&self) -> [u8; 4] { - match self { - Self::FailedTransfer(_) => { - ::SELECTOR - } - Self::InvalidAmount(_) => { - ::SELECTOR - } - Self::InvalidSignature(_) => { - ::SELECTOR - } - } - } - #[inline] - fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { - Self::SELECTORS.get(i).copied() - } - #[inline] - fn valid_selector(selector: [u8; 4]) -> bool { - Self::SELECTORS.binary_search(&selector).is_ok() - } - #[inline] - #[allow(unsafe_code, non_snake_case)] - fn abi_decode_raw( - selector: [u8; 4], - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - static DECODE_SHIMS: &[fn( - &[u8], - bool, - ) -> alloy_sol_types::Result] = &[ - { - fn InvalidAmount( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterErrors::InvalidAmount) - } - InvalidAmount - }, - { - fn InvalidSignature( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterErrors::InvalidSignature) - } - InvalidSignature - }, - { - fn FailedTransfer( - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - ::abi_decode_raw( - data, - validate, - ) - .map(RouterErrors::FailedTransfer) - } - FailedTransfer - }, - ]; - let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { - return Err( - alloy_sol_types::Error::unknown_selector( - ::NAME, - selector, - ), - ); - }; - (unsafe { DECODE_SHIMS.get_unchecked(idx) })(data, validate) - } - #[inline] - fn abi_encoded_size(&self) -> usize { - match self { - Self::FailedTransfer(inner) => { - ::abi_encoded_size( - inner, - ) - } - Self::InvalidAmount(inner) => { - ::abi_encoded_size(inner) - } - Self::InvalidSignature(inner) => { - ::abi_encoded_size( - inner, - ) - } - } - } - #[inline] - fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { - match self { - Self::FailedTransfer(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::InvalidAmount(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - Self::InvalidSignature(inner) => { - ::abi_encode_raw( - inner, - out, - ) - } - } - } - } - ///Container for all the [`Router`](self) events. - pub enum RouterEvents { - Executed(Executed), - InInstruction(InInstruction), - SeraiKeyUpdated(SeraiKeyUpdated), - } - #[automatically_derived] - impl RouterEvents { - /// All the selectors of this enum. - /// - /// Note that the selectors might not be in the same order as the variants. - /// No guarantees are made about the order of the selectors. - /// - /// Prefer using `SolInterface` methods instead. - pub const SELECTORS: &'static [[u8; 32usize]] = &[ - [ - 27u8, - 159u8, - 240u8, - 22u8, - 78u8, - 129u8, - 16u8, - 69u8, - 166u8, - 23u8, - 174u8, - 120u8, - 62u8, - 128u8, - 117u8, - 1u8, - 168u8, - 226u8, - 119u8, - 98u8, - 167u8, - 203u8, - 143u8, - 47u8, - 189u8, - 2u8, - 120u8, - 81u8, - 117u8, - 37u8, - 112u8, - 181u8, - ], - [ - 52u8, - 111u8, - 213u8, - 205u8, - 109u8, - 25u8, - 210u8, - 109u8, - 58u8, - 253u8, - 34u8, - 47u8, - 67u8, - 3u8, - 62u8, - 205u8, - 13u8, - 86u8, - 20u8, - 202u8, - 100u8, - 190u8, - 192u8, - 174u8, - 209u8, - 1u8, - 72u8, - 44u8, - 216u8, - 126u8, - 146u8, - 47u8, - ], - [ - 194u8, - 24u8, - 199u8, - 126u8, - 84u8, - 202u8, - 193u8, - 22u8, - 37u8, - 113u8, - 229u8, - 43u8, - 101u8, - 187u8, - 39u8, - 170u8, - 12u8, - 223u8, - 204u8, - 112u8, - 183u8, - 199u8, - 41u8, - 106u8, - 216u8, - 57u8, - 51u8, - 145u8, - 75u8, - 19u8, - 32u8, - 145u8, - ], - ]; - } - #[automatically_derived] - impl alloy_sol_types::SolEventInterface for RouterEvents { - const NAME: &'static str = "RouterEvents"; - const COUNT: usize = 3usize; - fn decode_raw_log( - topics: &[alloy_sol_types::Word], - data: &[u8], - validate: bool, - ) -> alloy_sol_types::Result { - match topics.first().copied() { - Some(::SIGNATURE_HASH) => { - ::decode_raw_log( - topics, - data, - validate, - ) - .map(Self::Executed) - } - Some(::SIGNATURE_HASH) => { - ::decode_raw_log( - topics, - data, - validate, - ) - .map(Self::InInstruction) - } - Some(::SIGNATURE_HASH) => { - ::decode_raw_log( - topics, - data, - validate, - ) - .map(Self::SeraiKeyUpdated) - } - _ => { - alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { - name: ::NAME, - log: alloy_sol_types::private::Box::new( - alloy_sol_types::private::LogData::new_unchecked( - topics.to_vec(), - data.to_vec().into(), - ), - ), - }) - } - } - } - } - #[automatically_derived] - impl alloy_sol_types::private::IntoLogData for RouterEvents { - fn to_log_data(&self) -> alloy_sol_types::private::LogData { - match self { - Self::Executed(inner) => { - alloy_sol_types::private::IntoLogData::to_log_data(inner) - } - Self::InInstruction(inner) => { - alloy_sol_types::private::IntoLogData::to_log_data(inner) - } - Self::SeraiKeyUpdated(inner) => { - alloy_sol_types::private::IntoLogData::to_log_data(inner) - } - } - } - fn into_log_data(self) -> alloy_sol_types::private::LogData { - match self { - Self::Executed(inner) => { - alloy_sol_types::private::IntoLogData::into_log_data(inner) - } - Self::InInstruction(inner) => { - alloy_sol_types::private::IntoLogData::into_log_data(inner) - } - Self::SeraiKeyUpdated(inner) => { - alloy_sol_types::private::IntoLogData::into_log_data(inner) - } - } - } - } -} diff --git a/processor/ethereum/contracts/src/lib.rs b/processor/ethereum/contracts/src/lib.rs deleted file mode 100644 index 9087eaed2..000000000 --- a/processor/ethereum/contracts/src/lib.rs +++ /dev/null @@ -1,16 +0,0 @@ -#[rustfmt::skip] -#[expect(warnings)] -#[expect(needless_pass_by_value)] -#[expect(clippy::all)] -#[expect(clippy::ignored_unit_patterns)] -#[expect(clippy::redundant_closure_for_method_calls)] -mod abigen; - -pub mod erc20 { - pub use super::abigen::erc20::IERC20::*; -} -pub mod router { - pub const BYTECODE: &[u8] = - include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-contracts/Router.bin")); - pub use super::abigen::router::Router::*; -} diff --git a/processor/ethereum/ethereum-serai/Cargo.toml b/processor/ethereum/ethereum-serai/Cargo.toml deleted file mode 100644 index 73c5b2672..000000000 --- a/processor/ethereum/ethereum-serai/Cargo.toml +++ /dev/null @@ -1,52 +0,0 @@ -[package] -name = "ethereum-serai" -version = "0.1.0" -description = "An Ethereum library supporting Schnorr signing and on-chain verification" -license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/ethereum-serai" -authors = ["Luke Parker ", "Elizabeth Binks "] -edition = "2021" -publish = false -rust-version = "1.79" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true - -[dependencies] -thiserror = { version = "1", default-features = false } - -rand_core = { version = "0.6", default-features = false, features = ["std"] } - -transcript = { package = "flexible-transcript", path = "../../../crypto/transcript", default-features = false, features = ["recommended"] } - -group = { version = "0.13", default-features = false } -k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } -frost = { package = "modular-frost", path = "../../../crypto/frost", default-features = false, features = ["secp256k1"] } - -alloy-core = { version = "0.8", default-features = false } -alloy-sol-types = { version = "0.8", default-features = false, features = ["json"] } -alloy-consensus = { version = "0.3", default-features = false, features = ["k256"] } -alloy-network = { version = "0.3", default-features = false } -alloy-rpc-types-eth = { version = "0.3", default-features = false } -alloy-rpc-client = { version = "0.3", default-features = false } -alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } -alloy-provider = { version = "0.3", default-features = false } - -alloy-node-bindings = { version = "0.3", default-features = false, optional = true } - -ethereum-schnorr-contract = { path = "../../../networks/ethereum/schnorr", default-features = false } -contracts = { package = "serai-processor-ethereum-contracts", path = "../contracts" } - -[dev-dependencies] -frost = { package = "modular-frost", path = "../../../crypto/frost", default-features = false, features = ["tests"] } - -tokio = { version = "1", features = ["macros"] } - -alloy-node-bindings = { version = "0.3", default-features = false } - -[features] -tests = ["alloy-node-bindings", "frost/tests"] diff --git a/processor/ethereum/ethereum-serai/LICENSE b/processor/ethereum/ethereum-serai/LICENSE deleted file mode 100644 index c425427c8..000000000 --- a/processor/ethereum/ethereum-serai/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -AGPL-3.0-only license - -Copyright (c) 2022-2023 Luke Parker - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License Version 3 as -published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/processor/ethereum/ethereum-serai/README.md b/processor/ethereum/ethereum-serai/README.md deleted file mode 100644 index 0090b26bd..000000000 --- a/processor/ethereum/ethereum-serai/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Ethereum - -This package contains Ethereum-related functionality, specifically deploying and -interacting with Serai contracts. - -While `monero-serai` and `bitcoin-serai` are general purpose libraries, -`ethereum-serai` is Serai specific. If any of the utilities are generally -desired, please fork and maintain your own copy to ensure the desired -functionality is preserved, or open an issue to request we make this library -general purpose. - -### Dependencies - -- solc -- [Foundry](https://github.com/foundry-rs/foundry) diff --git a/processor/ethereum/ethereum-serai/src/crypto.rs b/processor/ethereum/ethereum-serai/src/crypto.rs deleted file mode 100644 index 3b9dc58a1..000000000 --- a/processor/ethereum/ethereum-serai/src/crypto.rs +++ /dev/null @@ -1,32 +0,0 @@ -use group::ff::PrimeField; -use k256::{ - elliptic_curve::{ - ops::Reduce, - point::{AffineCoordinates, DecompressPoint}, - sec1::ToEncodedPoint, - }, - AffinePoint, ProjectivePoint, Scalar, U256 as KU256, -}; - -use frost::{ - algorithm::{Hram, SchnorrSignature}, - curve::{Ciphersuite, Secp256k1}, -}; - -pub use ethereum_schnorr_contract::*; - -use alloy_core::primitives::{Parity, Signature as AlloySignature, Address}; -use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; - -/// The HRAm to use for the Schnorr Solidity library. -/// -/// This will panic if the public key being signed for is not representable within the Schnorr -/// Solidity library. -#[derive(Clone, Default)] -pub struct EthereumHram {} -impl Hram for EthereumHram { - #[allow(non_snake_case)] - fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - Signature::challenge(*R, &PublicKey::new(*A).unwrap(), m) - } -} diff --git a/processor/ethereum/ethereum-serai/src/lib.rs b/processor/ethereum/ethereum-serai/src/lib.rs deleted file mode 100644 index 1a013ddf4..000000000 --- a/processor/ethereum/ethereum-serai/src/lib.rs +++ /dev/null @@ -1,41 +0,0 @@ -use thiserror::Error; - -pub mod alloy { - pub use alloy_core::primitives; - pub use alloy_core as core; - pub use alloy_sol_types as sol_types; - - pub use alloy_consensus as consensus; - pub use alloy_network as network; - pub use alloy_rpc_types_eth as rpc_types; - pub use alloy_simple_request_transport as simple_request_transport; - pub use alloy_rpc_client as rpc_client; - pub use alloy_provider as provider; -} - -pub mod crypto; - -/* -pub(crate) mod abi { - pub use contracts::erc20; - pub use contracts::deployer; - pub use contracts::router; -} - -pub mod erc20; -pub mod deployer; -pub mod router; - -pub mod machine; - -#[cfg(any(test, feature = "tests"))] -pub mod tests; - -#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] -pub enum Error { - #[error("failed to verify Schnorr signature")] - InvalidSignature, - #[error("couldn't make call/send TX")] - ConnectionError, -} -*/ diff --git a/processor/ethereum/ethereum-serai/src/machine.rs b/processor/ethereum/ethereum-serai/src/machine.rs deleted file mode 100644 index 404922f56..000000000 --- a/processor/ethereum/ethereum-serai/src/machine.rs +++ /dev/null @@ -1,427 +0,0 @@ -use std::{ - io::{self, Read}, - collections::HashMap, -}; - -use rand_core::{RngCore, CryptoRng}; - -use transcript::{Transcript, RecommendedTranscript}; - -use group::GroupEncoding; -use frost::{ - curve::{Ciphersuite, Secp256k1}, - Participant, ThresholdKeys, FrostError, - algorithm::Schnorr, - sign::*, -}; - -use alloy_core::primitives::U256; - -use crate::{ - crypto::{PublicKey, EthereumHram, Signature}, - router::{ - abi::{Call as AbiCall, OutInstruction as AbiOutInstruction}, - Router, - }, -}; - -/// The HRAm to use for the Schnorr Solidity library. -/// -/// This will panic if the public key being signed for is not representable within the Schnorr -/// Solidity library. -#[derive(Clone, Default)] -pub struct EthereumHram {} -impl Hram for EthereumHram { - #[allow(non_snake_case)] - fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - Signature::challenge(*R, &PublicKey::new(*A).unwrap(), m) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Call { - pub to: [u8; 20], - pub value: U256, - pub data: Vec, -} -impl Call { - pub fn read(reader: &mut R) -> io::Result { - let mut to = [0; 20]; - reader.read_exact(&mut to)?; - - let value = { - let mut value_bytes = [0; 32]; - reader.read_exact(&mut value_bytes)?; - U256::from_le_slice(&value_bytes) - }; - - let mut data_len = { - let mut data_len = [0; 4]; - reader.read_exact(&mut data_len)?; - usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize") - }; - - // A valid DoS would be to claim a 4 GB data is present for only 4 bytes - // We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB) - let mut data = vec![]; - while data_len > 0 { - let chunk_len = data_len.min(1024); - let mut chunk = vec![0; chunk_len]; - reader.read_exact(&mut chunk)?; - data.extend(&chunk); - data_len -= chunk_len; - } - - Ok(Call { to, value, data }) - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.to)?; - writer.write_all(&self.value.as_le_bytes())?; - - let data_len = u32::try_from(self.data.len()) - .map_err(|_| io::Error::other("call data length exceeded 2**32"))?; - writer.write_all(&data_len.to_le_bytes())?; - writer.write_all(&self.data) - } -} -impl From for AbiCall { - fn from(call: Call) -> AbiCall { - AbiCall { to: call.to.into(), value: call.value, data: call.data.into() } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum OutInstructionTarget { - Direct([u8; 20]), - Calls(Vec), -} -impl OutInstructionTarget { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - - match kind[0] { - 0 => { - let mut addr = [0; 20]; - reader.read_exact(&mut addr)?; - Ok(OutInstructionTarget::Direct(addr)) - } - 1 => { - let mut calls_len = [0; 4]; - reader.read_exact(&mut calls_len)?; - let calls_len = u32::from_le_bytes(calls_len); - - let mut calls = vec![]; - for _ in 0 .. calls_len { - calls.push(Call::read(reader)?); - } - Ok(OutInstructionTarget::Calls(calls)) - } - _ => Err(io::Error::other("unrecognized OutInstructionTarget"))?, - } - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - OutInstructionTarget::Direct(addr) => { - writer.write_all(&[0])?; - writer.write_all(addr)?; - } - OutInstructionTarget::Calls(calls) => { - writer.write_all(&[1])?; - let call_len = u32::try_from(calls.len()) - .map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?; - writer.write_all(&call_len.to_le_bytes())?; - for call in calls { - call.write(writer)?; - } - } - } - Ok(()) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct OutInstruction { - pub target: OutInstructionTarget, - pub value: U256, -} -impl OutInstruction { - fn read(reader: &mut R) -> io::Result { - let target = OutInstructionTarget::read(reader)?; - - let value = { - let mut value_bytes = [0; 32]; - reader.read_exact(&mut value_bytes)?; - U256::from_le_slice(&value_bytes) - }; - - Ok(OutInstruction { target, value }) - } - fn write(&self, writer: &mut W) -> io::Result<()> { - self.target.write(writer)?; - writer.write_all(&self.value.as_le_bytes()) - } -} -impl From for AbiOutInstruction { - fn from(instruction: OutInstruction) -> AbiOutInstruction { - match instruction.target { - OutInstructionTarget::Direct(addr) => { - AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value } - } - OutInstructionTarget::Calls(calls) => AbiOutInstruction { - to: [0; 20].into(), - calls: calls.into_iter().map(Into::into).collect(), - value: instruction.value, - }, - } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum RouterCommand { - UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey }, - Execute { chain_id: U256, nonce: U256, outs: Vec }, -} - -impl RouterCommand { - pub fn msg(&self) -> Vec { - match self { - RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { - Router::update_serai_key_message(*chain_id, *nonce, key) - } - RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message( - *chain_id, - *nonce, - outs.iter().map(|out| out.clone().into()).collect(), - ), - } - } - - pub fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - - match kind[0] { - 0 => { - let mut chain_id = [0; 32]; - reader.read_exact(&mut chain_id)?; - - let mut nonce = [0; 32]; - reader.read_exact(&mut nonce)?; - - let key = PublicKey::new(Secp256k1::read_G(reader)?) - .ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?; - Ok(RouterCommand::UpdateSeraiKey { - chain_id: U256::from_le_slice(&chain_id), - nonce: U256::from_le_slice(&nonce), - key, - }) - } - 1 => { - let mut chain_id = [0; 32]; - reader.read_exact(&mut chain_id)?; - let chain_id = U256::from_le_slice(&chain_id); - - let mut nonce = [0; 32]; - reader.read_exact(&mut nonce)?; - let nonce = U256::from_le_slice(&nonce); - - let mut outs_len = [0; 4]; - reader.read_exact(&mut outs_len)?; - let outs_len = u32::from_le_bytes(outs_len); - - let mut outs = vec![]; - for _ in 0 .. outs_len { - outs.push(OutInstruction::read(reader)?); - } - - Ok(RouterCommand::Execute { chain_id, nonce, outs }) - } - _ => Err(io::Error::other("reading unknown type of RouterCommand"))?, - } - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { - writer.write_all(&[0])?; - writer.write_all(&chain_id.as_le_bytes())?; - writer.write_all(&nonce.as_le_bytes())?; - writer.write_all(&key.point().to_bytes()) - } - RouterCommand::Execute { chain_id, nonce, outs } => { - writer.write_all(&[1])?; - writer.write_all(&chain_id.as_le_bytes())?; - writer.write_all(&nonce.as_le_bytes())?; - writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; - for out in outs { - out.write(writer)?; - } - Ok(()) - } - } - } - - pub fn serialize(&self) -> Vec { - let mut res = vec![]; - self.write(&mut res).unwrap(); - res - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct SignedRouterCommand { - command: RouterCommand, - signature: Signature, -} - -impl SignedRouterCommand { - pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option { - let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?; - let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?; - let signature = Signature { c, s }; - - if !signature.verify(key, &command.msg()) { - None? - } - Some(SignedRouterCommand { command, signature }) - } - - pub fn command(&self) -> &RouterCommand { - &self.command - } - - pub fn signature(&self) -> &Signature { - &self.signature - } - - pub fn read(reader: &mut R) -> io::Result { - let command = RouterCommand::read(reader)?; - - let mut sig = [0; 64]; - reader.read_exact(&mut sig)?; - let signature = Signature::from_bytes(sig)?; - - Ok(SignedRouterCommand { command, signature }) - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - self.command.write(writer)?; - writer.write_all(&self.signature.to_bytes()) - } -} - -pub struct RouterCommandMachine { - key: PublicKey, - command: RouterCommand, - machine: AlgorithmMachine>, -} - -impl RouterCommandMachine { - pub fn new(keys: ThresholdKeys, command: RouterCommand) -> Option { - // The Schnorr algorithm should be fine without this, even when using the IETF variant - // If this is better and more comprehensive, we should do it, even if not necessary - let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1"); - let key = keys.group_key(); - transcript.append_message(b"key", key.to_bytes()); - transcript.append_message(b"command", command.serialize()); - - Some(Self { - key: PublicKey::new(key)?, - command, - machine: AlgorithmMachine::new(Schnorr::new(transcript), keys), - }) - } -} - -impl PreprocessMachine for RouterCommandMachine { - type Preprocess = Preprocess; - type Signature = SignedRouterCommand; - type SignMachine = RouterCommandSignMachine; - - fn preprocess( - self, - rng: &mut R, - ) -> (Self::SignMachine, Self::Preprocess) { - let (machine, preprocess) = self.machine.preprocess(rng); - - (RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess) - } -} - -pub struct RouterCommandSignMachine { - key: PublicKey, - command: RouterCommand, - machine: AlgorithmSignMachine>, -} - -impl SignMachine for RouterCommandSignMachine { - type Params = (); - type Keys = ThresholdKeys; - type Preprocess = Preprocess; - type SignatureShare = SignatureShare; - type SignatureMachine = RouterCommandSignatureMachine; - - fn cache(self) -> CachedPreprocess { - unimplemented!( - "RouterCommand machines don't support caching their preprocesses due to {}", - "being already bound to a specific command" - ); - } - - fn from_cache( - (): (), - _: ThresholdKeys, - _: CachedPreprocess, - ) -> (Self, Self::Preprocess) { - unimplemented!( - "RouterCommand machines don't support caching their preprocesses due to {}", - "being already bound to a specific command" - ); - } - - fn read_preprocess(&self, reader: &mut R) -> io::Result { - self.machine.read_preprocess(reader) - } - - fn sign( - self, - commitments: HashMap, - msg: &[u8], - ) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> { - if !msg.is_empty() { - panic!("message was passed to a RouterCommand machine when it generates its own"); - } - - let (machine, share) = self.machine.sign(commitments, &self.command.msg())?; - - Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share)) - } -} - -pub struct RouterCommandSignatureMachine { - key: PublicKey, - command: RouterCommand, - machine: - AlgorithmSignatureMachine>, -} - -impl SignatureMachine for RouterCommandSignatureMachine { - type SignatureShare = SignatureShare; - - fn read_share(&self, reader: &mut R) -> io::Result { - self.machine.read_share(reader) - } - - fn complete( - self, - shares: HashMap, - ) -> Result { - let signature = self.machine.complete(shares)?; - let signature = Signature::new(signature).expect("machine produced an invalid signature"); - assert!(signature.verify(&self.key, &self.command.msg())); - Ok(SignedRouterCommand { command: self.command, signature }) - } -} diff --git a/processor/ethereum/src/primitives/transaction.rs b/processor/ethereum/src/primitives/transaction.rs index f77153ffa..eeba31803 100644 --- a/processor/ethereum/src/primitives/transaction.rs +++ b/processor/ethereum/src/primitives/transaction.rs @@ -40,8 +40,12 @@ impl Action { fn message(&self) -> Vec { match self { - Action::SetKey { chain_id, nonce, key } => Router::update_serai_key_message(*chain_id, *nonce, key), - Action::Batch { chain_id, nonce, outs } => Router::execute_message(*chain_id, *nonce, OutInstructions::from(outs.as_ref())), + Action::SetKey { chain_id, nonce, key } => { + Router::update_serai_key_message(*chain_id, *nonce, key) + } + Action::Batch { chain_id, nonce, outs } => { + Router::execute_message(*chain_id, *nonce, OutInstructions::from(outs.as_ref())) + } } } @@ -129,9 +133,17 @@ impl PreprocessMachine for ClonableTransctionMachine { self, rng: &mut R, ) -> (Self::SignMachine, Self::Preprocess) { - let (machine, preprocess) = AlgorithmMachine::new(IetfSchnorr::::ietf(), self.0.clone()) - .preprocess(rng); - (ActionSignMachine(PublicKey::new(self.0.group_key()).expect("signing with non-representable key"), self.1, machine), preprocess) + let (machine, preprocess) = + AlgorithmMachine::new(IetfSchnorr::::ietf(), self.0.clone()) + .preprocess(rng); + ( + ActionSignMachine( + PublicKey::new(self.0.group_key()).expect("signing with non-representable key"), + self.1, + machine, + ), + preprocess, + ) } } @@ -157,7 +169,7 @@ impl SignMachine for ActionSignMachine { params: Self::Params, keys: Self::Keys, cache: CachedPreprocess, -) -> (Self, Self::Preprocess) { + ) -> (Self, Self::Preprocess) { unimplemented!() } diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index e37dc2a9b..c7267b55e 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -29,7 +29,6 @@ dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] bitcoin-serai = { path = "../../networks/bitcoin" } k256 = "0.13" -ethereum-serai = { path = "../../processor/ethereum/ethereum-serai" } monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request" } monero-wallet = { path = "../../networks/monero/wallet" } From 4f5773cb9451424c789664e9ef8b831517ffce46 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 18 Sep 2024 01:09:07 -0400 Subject: [PATCH 151/179] Add note on the returned top-level transfers being unordered --- processor/ethereum/erc20/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/processor/ethereum/erc20/src/lib.rs b/processor/ethereum/erc20/src/lib.rs index 920915e93..400a5baac 100644 --- a/processor/ethereum/erc20/src/lib.rs +++ b/processor/ethereum/erc20/src/lib.rs @@ -149,6 +149,8 @@ impl Erc20 { } /// Fetch all top-level transfers to the specified address. + /// + /// The result of this function is unordered. pub async fn top_level_transfers( &self, block: u64, From ece0b03d59aa88f24e10790491fa30f46208a93b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 18 Sep 2024 01:09:42 -0400 Subject: [PATCH 152/179] Move the Ethereum Action machine to its own file --- Cargo.lock | 5 - Cargo.toml | 3 - processor/ethereum/src/primitives/machine.rs | 146 ++++++++++++++++ processor/ethereum/src/primitives/mod.rs | 1 + .../ethereum/src/primitives/transaction.rs | 158 +----------------- processor/ethereum/src/publisher.rs | 8 + 6 files changed, 163 insertions(+), 158 deletions(-) create mode 100644 processor/ethereum/src/primitives/machine.rs diff --git a/Cargo.lock b/Cargo.lock index a7f3792aa..7e51ec8aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11890,8 +11890,3 @@ dependencies = [ "cc", "pkg-config", ] - -[[patch.unused]] -name = "alloy-sol-type-parser" -version = "0.8.0" -source = "git+https://github.com/alloy-rs/core?rev=446b9d2fbce12b88456152170709a3eaac929af0#446b9d2fbce12b88456152170709a3eaac929af0" diff --git a/Cargo.toml b/Cargo.toml index 99a10be04..d0c91a300 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -204,9 +204,6 @@ directories-next = { path = "patches/directories-next" } # The official pasta_curves repo doesn't support Zeroize pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" } -# https://github.com/alloy-rs/core/issues/717 -alloy-sol-type-parser = { git = "https://github.com/alloy-rs/core", rev = "446b9d2fbce12b88456152170709a3eaac929af0" } - [workspace.lints.clippy] unwrap_or_default = "allow" borrow_as_ptr = "deny" diff --git a/processor/ethereum/src/primitives/machine.rs b/processor/ethereum/src/primitives/machine.rs new file mode 100644 index 000000000..f37fb4404 --- /dev/null +++ b/processor/ethereum/src/primitives/machine.rs @@ -0,0 +1,146 @@ +use std::{io, collections::HashMap}; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::{Ciphersuite, Secp256k1}; +use frost::{ + dkg::{Participant, ThresholdKeys}, + FrostError, + algorithm::*, + sign::*, +}; + +use ethereum_schnorr::{PublicKey, Signature}; + +use crate::transaction::{Action, Transaction}; + +/// The HRAm to use for the Schnorr Solidity library. +/// +/// This will panic if the public key being signed for is not representable within the Schnorr +/// Solidity library. +#[derive(Clone, Default, Debug)] +pub struct EthereumHram; +impl Hram for EthereumHram { + #[allow(non_snake_case)] + fn hram( + R: &::G, + A: &::G, + m: &[u8], + ) -> ::F { + Signature::challenge(*R, &PublicKey::new(*A).unwrap(), m) + } +} + +/// A clonable machine to sign an action. +/// +/// This will panic if the public key being signed with is not representable within the Schnorr +/// Solidity library. +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine { + pub(crate) keys: ThresholdKeys, + pub(crate) action: Action, +} + +type LiteralAlgorithmMachine = AlgorithmMachine>; +type LiteralAlgorithmSignMachine = + AlgorithmSignMachine>; + +pub(crate) struct ActionSignMachine { + key: PublicKey, + action: Action, + machine: LiteralAlgorithmSignMachine, +} + +type LiteralAlgorithmSignatureMachine = + AlgorithmSignatureMachine>; + +pub(crate) struct ActionSignatureMachine { + key: PublicKey, + action: Action, + machine: LiteralAlgorithmSignatureMachine, +} + +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = Transaction; + type SignMachine = ActionSignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + let (machine, preprocess) = + AlgorithmMachine::new(IetfSchnorr::::ietf(), self.keys.clone()) + .preprocess(rng); + ( + ActionSignMachine { + key: PublicKey::new(self.keys.group_key()).expect("signing with non-representable key"), + action: self.action, + machine, + }, + preprocess, + ) + } +} + +impl SignMachine for ActionSignMachine { + type Params = ::Signature, + >>::Params; + type Keys = ::Signature, + >>::Keys; + type Preprocess = ::Signature, + >>::Preprocess; + type SignatureShare = ::Signature, + >>::SignatureShare; + type SignatureMachine = ActionSignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!() + } + fn from_cache( + params: Self::Params, + keys: Self::Keys, + cache: CachedPreprocess, + ) -> (Self, Self::Preprocess) { + unimplemented!() + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.machine.read_preprocess(reader) + } + fn sign( + self, + commitments: HashMap, + msg: &[u8], + ) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError> { + assert!(msg.is_empty()); + self.machine.sign(commitments, &self.action.message()).map(|(machine, shares)| { + (ActionSignatureMachine { key: self.key, action: self.action, machine }, shares) + }) + } +} + +impl SignatureMachine for ActionSignatureMachine { + type SignatureShare = ::Signature, + >>::SignatureShare; + + fn read_share(&self, reader: &mut R) -> io::Result { + self.machine.read_share(reader) + } + + fn complete( + self, + shares: HashMap, + ) -> Result { + self.machine.complete(shares).map(|signature| { + let s = signature.s; + let c = Signature::challenge(signature.R, &self.key, &self.action.message()); + Transaction(self.action, Signature::new(c, s)) + }) + } +} diff --git a/processor/ethereum/src/primitives/mod.rs b/processor/ethereum/src/primitives/mod.rs index 8d2a9118a..f0d318029 100644 --- a/processor/ethereum/src/primitives/mod.rs +++ b/processor/ethereum/src/primitives/mod.rs @@ -1,5 +1,6 @@ pub(crate) mod output; pub(crate) mod transaction; +pub(crate) mod machine; pub(crate) mod block; pub(crate) const DAI: [u8; 20] = diff --git a/processor/ethereum/src/primitives/transaction.rs b/processor/ethereum/src/primitives/transaction.rs index eeba31803..525953759 100644 --- a/processor/ethereum/src/primitives/transaction.rs +++ b/processor/ethereum/src/primitives/transaction.rs @@ -1,14 +1,7 @@ -use std::{io, collections::HashMap}; +use std::io; -use rand_core::{RngCore, CryptoRng}; - -use ciphersuite::{Ciphersuite, Secp256k1}; -use frost::{ - dkg::{Participant, ThresholdKeys}, - FrostError, - algorithm::*, - sign::*, -}; +use ciphersuite::Secp256k1; +use frost::dkg::ThresholdKeys; use alloy_core::primitives::U256; @@ -20,7 +13,7 @@ use ethereum_primitives::keccak256; use ethereum_schnorr::{PublicKey, Signature}; use ethereum_router::{Coin, OutInstructions, Executed, Router}; -use crate::output::OutputId; +use crate::{output::OutputId, machine::ClonableTransctionMachine}; #[derive(Clone, PartialEq, Debug)] pub(crate) enum Action { @@ -32,13 +25,13 @@ pub(crate) enum Action { pub(crate) struct Eventuality(pub(crate) Executed); impl Action { - fn nonce(&self) -> u64 { + pub(crate) fn nonce(&self) -> u64 { match self { Action::SetKey { nonce, .. } | Action::Batch { nonce, .. } => *nonce, } } - fn message(&self) -> Vec { + pub(crate) fn message(&self) -> Vec { match self { Action::SetKey { chain_id, nonce, key } => { Router::update_serai_key_message(*chain_id, *nonce, key) @@ -67,155 +60,20 @@ impl Action { } #[derive(Clone, PartialEq, Debug)] -pub(crate) struct Transaction(Action, Signature); +pub(crate) struct Transaction(pub(crate) Action, pub(crate) Signature); impl scheduler::Transaction for Transaction { fn read(reader: &mut impl io::Read) -> io::Result { - /* - let buf: Vec = borsh::from_reader(reader)?; - // We can only read this from a &[u8], hence prior reading into a Vec - ::decode(&mut buf.as_slice()) - .map(Self) - .map_err(io::Error::other) - */ let action = Action::read(reader)?; let signature = Signature::read(reader)?; Ok(Transaction(action, signature)) } fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { - /* - let mut buf = Vec::with_capacity(256); - ::encode(&self.0, &mut buf); - borsh::BorshSerialize::serialize(&buf, writer) - */ self.0.write(writer)?; self.1.write(writer)?; Ok(()) } } -/// The HRAm to use for the Schnorr Solidity library. -/// -/// This will panic if the public key being signed for is not representable within the Schnorr -/// Solidity library. -#[derive(Clone, Default, Debug)] -pub struct EthereumHram; -impl Hram for EthereumHram { - #[allow(non_snake_case)] - fn hram( - R: &::G, - A: &::G, - m: &[u8], - ) -> ::F { - Signature::challenge(*R, &PublicKey::new(*A).unwrap(), m) - } -} - -#[derive(Clone)] -pub(crate) struct ClonableTransctionMachine(ThresholdKeys, Action); - -type LiteralAlgorithmMachine = AlgorithmMachine>; -type LiteralAlgorithmSignMachine = - AlgorithmSignMachine>; - -pub(crate) struct ActionSignMachine(PublicKey, Action, LiteralAlgorithmSignMachine); - -type LiteralAlgorithmSignatureMachine = - AlgorithmSignatureMachine>; - -pub(crate) struct ActionSignatureMachine(PublicKey, Action, LiteralAlgorithmSignatureMachine); - -impl PreprocessMachine for ClonableTransctionMachine { - type Preprocess = ::Preprocess; - type Signature = Transaction; - type SignMachine = ActionSignMachine; - - fn preprocess( - self, - rng: &mut R, - ) -> (Self::SignMachine, Self::Preprocess) { - let (machine, preprocess) = - AlgorithmMachine::new(IetfSchnorr::::ietf(), self.0.clone()) - .preprocess(rng); - ( - ActionSignMachine( - PublicKey::new(self.0.group_key()).expect("signing with non-representable key"), - self.1, - machine, - ), - preprocess, - ) - } -} - -impl SignMachine for ActionSignMachine { - type Params = ::Signature, - >>::Params; - type Keys = ::Signature, - >>::Keys; - type Preprocess = ::Signature, - >>::Preprocess; - type SignatureShare = ::Signature, - >>::SignatureShare; - type SignatureMachine = ActionSignatureMachine; - - fn cache(self) -> CachedPreprocess { - unimplemented!() - } - fn from_cache( - params: Self::Params, - keys: Self::Keys, - cache: CachedPreprocess, - ) -> (Self, Self::Preprocess) { - unimplemented!() - } - - fn read_preprocess(&self, reader: &mut R) -> io::Result { - self.2.read_preprocess(reader) - } - fn sign( - self, - commitments: HashMap, - msg: &[u8], - ) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError> { - assert!(msg.is_empty()); - self - .2 - .sign(commitments, &self.1.message()) - .map(|(machine, shares)| (ActionSignatureMachine(self.0, self.1, machine), shares)) - } -} - -impl SignatureMachine for ActionSignatureMachine { - type SignatureShare = ::Signature, - >>::SignatureShare; - - fn read_share(&self, reader: &mut R) -> io::Result { - self.2.read_share(reader) - } - - fn complete( - self, - shares: HashMap, - ) -> Result { - /* - match self.1 { - Action::SetKey { chain_id: _, nonce: _, key } => self.0.update_serai_key(key, signature), - Action::Batch { chain_id: _, nonce: _, outs } => self.0.execute(outs, signature), - } - */ - self.2.complete(shares).map(|signature| { - let s = signature.s; - let c = Signature::challenge(signature.R, &self.0, &self.1.message()); - Transaction(self.1, Signature::new(c, s)) - }) - } -} - impl SignableTransaction for Action { type Transaction = Transaction; type Ciphersuite = Secp256k1; @@ -296,7 +154,7 @@ impl SignableTransaction for Action { } fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { - ClonableTransctionMachine(keys, self) + ClonableTransctionMachine { keys, action: self } } } diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs index ad8bd09dc..1874e5562 100644 --- a/processor/ethereum/src/publisher.rs +++ b/processor/ethereum/src/publisher.rs @@ -20,6 +20,14 @@ impl signers::TransactionPublisher for TransactionPublisher { &self, tx: Transaction, ) -> impl Send + Future> { + // Convert from an Action (an internal representation of a signable event) to a TxLegacy + /* TODO + match tx.0 { + Action::SetKey { chain_id: _, nonce: _, key } => self.router.update_serai_key(key, tx.1), + Action::Batch { chain_id: _, nonce: _, outs } => self.router.execute(outs, tx.1), + } + */ + async move { /* use tokio::{ From 917500740aef385cb8072738fb23032ab4fbb61d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 18 Sep 2024 15:50:21 -0400 Subject: [PATCH 153/179] Have the TransactionPublisher build a TxLegacy from Transaction --- Cargo.lock | 1 + processor/ethereum/Cargo.toml | 1 + processor/ethereum/src/main.rs | 17 +++--- processor/ethereum/src/primitives/output.rs | 30 ++++++----- processor/ethereum/src/publisher.rs | 58 ++++++++++++++++----- processor/ethereum/src/scheduler.rs | 1 + 6 files changed, 75 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e51ec8aa..9afcdcc87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8334,6 +8334,7 @@ dependencies = [ "alloy-rpc-client", "alloy-rpc-types-eth", "alloy-simple-request-transport", + "alloy-transport", "borsh", "ciphersuite", "const-hex", diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml index 9a3b264cc..649e3fb86 100644 --- a/processor/ethereum/Cargo.toml +++ b/processor/ethereum/Cargo.toml @@ -35,6 +35,7 @@ alloy-rlp = { version = "0.3", default-features = false } alloy-consensus = { version = "0.3", default-features = false } alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-transport = { version = "0.3", default-features = false } alloy-simple-request-transport = { path = "../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-rpc-client = { version = "0.3", default-features = false } alloy-provider = { version = "0.3", default-features = false } diff --git a/processor/ethereum/src/main.rs b/processor/ethereum/src/main.rs index 06c0bc98f..0ebf0f59a 100644 --- a/processor/ethereum/src/main.rs +++ b/processor/ethereum/src/main.rs @@ -30,14 +30,13 @@ use publisher::TransactionPublisher; #[tokio::main] async fn main() { let db = bin::init(); - let feed = { - let provider = Arc::new(RootProvider::new( - ClientBuilder::default().transport(SimpleRequest::new(bin::url()), true), - )); - Rpc { provider } - }; + + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(bin::url()), true), + )); + let chain_id = loop { - match feed.provider.get_chain_id().await { + match provider.get_chain_id().await { Ok(chain_id) => break U256::try_from(chain_id).unwrap(), Err(e) => { log::error!("couldn't connect to the Ethereum node for the chain ID: {e:?}"); @@ -48,9 +47,9 @@ async fn main() { bin::main_loop::<_, KeyGenParams, _>( db, - feed.clone(), + Rpc { provider: provider.clone() }, Scheduler::new(SmartContract { chain_id }), - TransactionPublisher::new({ + TransactionPublisher::new(provider, { let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") .expect("ethereum relayer hostname wasn't specified") .to_string(); diff --git a/processor/ethereum/src/primitives/output.rs b/processor/ethereum/src/primitives/output.rs index 843f22f6b..0f3279211 100644 --- a/processor/ethereum/src/primitives/output.rs +++ b/processor/ethereum/src/primitives/output.rs @@ -1,6 +1,6 @@ use std::io; -use ciphersuite::{Ciphersuite, Secp256k1}; +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; use alloy_core::primitives::U256; @@ -59,7 +59,10 @@ impl AsMut<[u8]> for OutputId { } #[derive(Clone, PartialEq, Eq, Debug)] -pub(crate) struct Output(pub(crate) EthereumInInstruction); +pub(crate) struct Output { + pub(crate) key: ::G, + pub(crate) instruction: EthereumInInstruction, +} impl ReceivedOutput<::G, Address> for Output { type Id = OutputId; type TransactionId = [u8; 32]; @@ -71,40 +74,43 @@ impl ReceivedOutput<::G, Address> for Output { fn id(&self) -> Self::Id { let mut id = [0; 40]; - id[.. 32].copy_from_slice(&self.0.id.0); - id[32 ..].copy_from_slice(&self.0.id.1.to_le_bytes()); + id[.. 32].copy_from_slice(&self.instruction.id.0); + id[32 ..].copy_from_slice(&self.instruction.id.1.to_le_bytes()); OutputId(id) } fn transaction_id(&self) -> Self::TransactionId { - self.0.id.0 + self.instruction.id.0 } fn key(&self) -> ::G { - todo!("TODO") + self.key } fn presumed_origin(&self) -> Option
{ - Some(Address::from(self.0.from)) + Some(Address::from(self.instruction.from)) } fn balance(&self) -> Balance { - let coin = coin_to_serai_coin(&self.0.coin).unwrap_or_else(|| { + let coin = coin_to_serai_coin(&self.instruction.coin).unwrap_or_else(|| { panic!( "mapping coin from an EthereumInInstruction with coin {}, which we don't handle.", "this never should have been yielded" ) }); - Balance { coin, amount: amount_to_serai_amount(coin, self.0.amount) } + Balance { coin, amount: amount_to_serai_amount(coin, self.instruction.amount) } } fn data(&self) -> &[u8] { - &self.0.data + &self.instruction.data } fn write(&self, writer: &mut W) -> io::Result<()> { - self.0.write(writer) + writer.write_all(self.key.to_bytes().as_ref())?; + self.instruction.write(writer) } fn read(reader: &mut R) -> io::Result { - EthereumInInstruction::read(reader).map(Self) + let key = Secp256k1::read_G(reader)?; + let instruction = EthereumInInstruction::read(reader)?; + Ok(Self { key, instruction }) } } diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs index 1874e5562..cc9c1f5f4 100644 --- a/processor/ethereum/src/publisher.rs +++ b/processor/ethereum/src/publisher.rs @@ -1,34 +1,68 @@ use core::future::Future; +use std::sync::Arc; -use crate::transaction::Transaction; +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::RootProvider; + +use tokio::sync::{RwLockReadGuard, RwLock}; + +use ethereum_schnorr::PublicKey; +use ethereum_router::{OutInstructions, Router}; + +use crate::transaction::{Action, Transaction}; #[derive(Clone)] pub(crate) struct TransactionPublisher { + initial_serai_key: PublicKey, + rpc: Arc>, + router: Arc>>, relayer_url: String, } impl TransactionPublisher { - pub(crate) fn new(relayer_url: String) -> Self { - Self { relayer_url } + pub(crate) fn new(rpc: Arc>, relayer_url: String) -> Self { + Self { initial_serai_key: todo!("TODO"), rpc, router: Arc::new(RwLock::new(None)), relayer_url } + } + + // This will always return Ok(Some(_)) or Err(_), never Ok(None) + async fn router(&self) -> Result>, RpcError> { + let router = self.router.read().await; + + // If the router is None, find it on-chain + if router.is_none() { + drop(router); + let mut router = self.router.write().await; + // Check again if it's None in case a different task already did this + if router.is_none() { + let Some(router_actual) = Router::new(self.rpc.clone(), &self.initial_serai_key).await? else { + Err(TransportErrorKind::Custom("publishing transaction yet couldn't find router on chain. was our node reset?".to_string().into()))? + }; + *router = Some(router_actual); + } + return Ok(router.downgrade()); + } + + Ok(router) } } impl signers::TransactionPublisher for TransactionPublisher { - type EphemeralError = (); + type EphemeralError = RpcError; fn publish( &self, tx: Transaction, ) -> impl Send + Future> { - // Convert from an Action (an internal representation of a signable event) to a TxLegacy - /* TODO - match tx.0 { - Action::SetKey { chain_id: _, nonce: _, key } => self.router.update_serai_key(key, tx.1), - Action::Batch { chain_id: _, nonce: _, outs } => self.router.execute(outs, tx.1), - } - */ - async move { + // Convert from an Action (an internal representation of a signable event) to a TxLegacy + let router = self.router().await?; + let router = router.as_ref().unwrap(); + let tx = match tx.0 { + Action::SetKey { chain_id: _, nonce: _, key } => router.update_serai_key(&key, &tx.1), + Action::Batch { chain_id: _, nonce: _, outs } => router.execute(OutInstructions::from(outs.as_ref()), &tx.1), + }; + /* use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs index ca636b5bb..6683eeac7 100644 --- a/processor/ethereum/src/scheduler.rs +++ b/processor/ethereum/src/scheduler.rs @@ -68,6 +68,7 @@ impl smart_contract_scheduler::SmartContract for SmartContract { // TODO: Per-batch gas limit // TODO: Create several batches + // TODO: Handle fees let action = Action::Batch { chain_id: self.chain_id, nonce, outs }; vec![(action.clone(), action.eventuality())] From 6c0f98a533d207a2589583accd49937deda6319b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 18 Sep 2024 18:35:31 -0400 Subject: [PATCH 154/179] cargo fmt, move ScannerFeed from String to the RPC error --- processor/ethereum/src/publisher.rs | 19 +++++++++++++----- processor/ethereum/src/rpc.rs | 30 ++++++++++++++++++----------- 2 files changed, 33 insertions(+), 16 deletions(-) diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs index cc9c1f5f4..d133768ba 100644 --- a/processor/ethereum/src/publisher.rs +++ b/processor/ethereum/src/publisher.rs @@ -26,7 +26,9 @@ impl TransactionPublisher { } // This will always return Ok(Some(_)) or Err(_), never Ok(None) - async fn router(&self) -> Result>, RpcError> { + async fn router( + &self, + ) -> Result>, RpcError> { let router = self.router.read().await; // If the router is None, find it on-chain @@ -35,9 +37,14 @@ impl TransactionPublisher { let mut router = self.router.write().await; // Check again if it's None in case a different task already did this if router.is_none() { - let Some(router_actual) = Router::new(self.rpc.clone(), &self.initial_serai_key).await? else { - Err(TransportErrorKind::Custom("publishing transaction yet couldn't find router on chain. was our node reset?".to_string().into()))? - }; + let Some(router_actual) = Router::new(self.rpc.clone(), &self.initial_serai_key).await? + else { + Err(TransportErrorKind::Custom( + "publishing transaction yet couldn't find router on chain. was our node reset?" + .to_string() + .into(), + ))? + }; *router = Some(router_actual); } return Ok(router.downgrade()); @@ -60,7 +67,9 @@ impl signers::TransactionPublisher for TransactionPublisher { let router = router.as_ref().unwrap(); let tx = match tx.0 { Action::SetKey { chain_id: _, nonce: _, key } => router.update_serai_key(&key, &tx.1), - Action::Batch { chain_id: _, nonce: _, outs } => router.execute(OutInstructions::from(outs.as_ref()), &tx.1), + Action::Batch { chain_id: _, nonce: _, outs } => { + router.execute(OutInstructions::from(outs.as_ref()), &tx.1) + } }; /* diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs index 819fbf484..e3f25f86c 100644 --- a/processor/ethereum/src/rpc.rs +++ b/processor/ethereum/src/rpc.rs @@ -2,6 +2,7 @@ use core::future::Future; use std::sync::Arc; use alloy_rpc_types_eth::{BlockTransactionsKind, BlockNumberOrTag}; +use alloy_transport::{RpcError, TransportErrorKind}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; @@ -28,7 +29,7 @@ impl ScannerFeed for Rpc { type Block = FullEpoch; - type EphemeralError = String; + type EphemeralError = RpcError; fn latest_finalized_block_number( &self, @@ -37,14 +38,17 @@ impl ScannerFeed for Rpc { let actual_number = self .provider .get_block(BlockNumberOrTag::Finalized.into(), BlockTransactionsKind::Hashes) - .await - .map_err(|e| format!("couldn't get the latest finalized block: {e:?}"))? - .ok_or_else(|| "there was no finalized block".to_string())? + .await? + .ok_or_else(|| { + TransportErrorKind::Custom("there was no finalized block".to_string().into()) + })? .header .number; // Error if there hasn't been a full epoch yet if actual_number < 32 { - Err("there has not been a completed epoch yet".to_string())? + Err(TransportErrorKind::Custom( + "there has not been a completed epoch yet".to_string().into(), + ))? } // The divison by 32 returns the amount of completed epochs // Converting from amount of completed epochs to the latest completed epoch requires @@ -75,10 +79,12 @@ impl ScannerFeed for Rpc { self .provider .get_block((start - 1).into(), BlockTransactionsKind::Hashes) - .await - .map_err(|e| format!("couldn't get block: {e:?}"))? + .await? .ok_or_else(|| { - format!("ethereum node didn't have requested block: {number:?}. did we reorg?") + TransportErrorKind::Custom( + format!("ethereum node didn't have requested block: {number:?}. was the node reset?") + .into(), + ) })? .header .hash @@ -88,10 +94,12 @@ impl ScannerFeed for Rpc { let end_header = self .provider .get_block((start + 31).into(), BlockTransactionsKind::Hashes) - .await - .map_err(|e| format!("couldn't get block: {e:?}"))? + .await? .ok_or_else(|| { - format!("ethereum node didn't have requested block: {number:?}. did we reorg?") + TransportErrorKind::Custom( + format!("ethereum node didn't have requested block: {number:?}. was the node reset?") + .into(), + ) })? .header; From 5f3da020dfaed11265c2f21263c1aa9d652876f2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 18 Sep 2024 22:00:32 -0400 Subject: [PATCH 155/179] Explicitly add an unspendable script path to the processor's generated keys --- Cargo.lock | 1 + processor/bitcoin/Cargo.toml | 1 + processor/bitcoin/src/key_gen.rs | 35 ++++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 9afcdcc87..2e2faecb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8108,6 +8108,7 @@ dependencies = [ "dkg", "flexible-transcript", "hex", + "k256", "log", "modular-frost", "parity-scale-codec", diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index 52cca1ae1..2a69d2343 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -24,6 +24,7 @@ scale = { package = "parity-scale-codec", version = "3", default-features = fals borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } +k256 = { version = "0.13", default-features = false, features = ["std"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } diff --git a/processor/bitcoin/src/key_gen.rs b/processor/bitcoin/src/key_gen.rs index 415441348..bc911676a 100644 --- a/processor/bitcoin/src/key_gen.rs +++ b/processor/bitcoin/src/key_gen.rs @@ -1,6 +1,8 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; use frost::ThresholdKeys; +use bitcoin_serai::bitcoin::{hashes::Hash, TapTweakHash}; + use crate::{primitives::x_coord_to_even_point, scan::scanner}; pub(crate) struct KeyGenParams; @@ -10,6 +12,39 @@ impl key_gen::KeyGenParams for KeyGenParams { type ExternalNetworkCiphersuite = Secp256k1; fn tweak_keys(keys: &mut ThresholdKeys) { + /* + Offset the keys by their hash to prevent a malicious participant from inserting a script + path, as specified in + https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki#cite_note-23 + + This isn't exactly the same, as we then increment the key until it happens to be even, yet + the goal is simply that someone who biases the key-gen can't insert their own script path. + By adding the hash of the key to the key, anyone who attempts such bias will change the key + used (changing the bias necessary). + + This is also potentially unnecessary for Serai, which uses an eVRF-based DKG. While that can + be biased (by manipulating who participates as we use it robustly and only require `t` + participants), contributions cannot be arbitrarily defined. That presumably requires + performing a search of the possible keys for some collision with 2**128 work. It's better to + offset regardless and avoid this question however. + */ + { + use k256::elliptic_curve::{ + bigint::{Encoding, U256}, + ops::Reduce, + }; + let tweak_hash = TapTweakHash::hash(&keys.group_key().to_bytes().as_slice()[1 ..]); + /* + https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki#cite_ref-13-0 states how the + bias is negligible. This reduction shouldn't ever occur, yet if it did, the script path + would be unusable due to a check the script path hash is less than the order. That doesn't + impact us as we don't want the script path to be usable. + */ + *keys = keys.offset(::F::reduce(U256::from_be_bytes( + *tweak_hash.to_raw_hash().as_ref(), + ))); + } + *keys = bitcoin_serai::wallet::tweak_keys(keys); // Also create a scanner to assert these keys, and all expected paths, are usable scanner(keys.group_key()); From 4310101bb0499c3c815a7fe8ccac1fe74209f164 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 00:39:51 -0400 Subject: [PATCH 156/179] Finish the Ethereum TX publishing code --- networks/ethereum/relayer/README.md | 4 +-- networks/ethereum/relayer/src/main.rs | 20 ++++++----- processor/ethereum/src/publisher.rs | 51 ++++++++++++++------------- 3 files changed, 39 insertions(+), 36 deletions(-) diff --git a/networks/ethereum/relayer/README.md b/networks/ethereum/relayer/README.md index beed4b724..fc2d36fdd 100644 --- a/networks/ethereum/relayer/README.md +++ b/networks/ethereum/relayer/README.md @@ -1,4 +1,4 @@ # Ethereum Transaction Relayer -This server collects Ethereum router commands to be published, offering an RPC -to fetch them. +This server collects Ethereum transactions to be published, offering an RPC to +fetch them. diff --git a/networks/ethereum/relayer/src/main.rs b/networks/ethereum/relayer/src/main.rs index 545930040..f5a7e0f90 100644 --- a/networks/ethereum/relayer/src/main.rs +++ b/networks/ethereum/relayer/src/main.rs @@ -40,8 +40,8 @@ async fn main() { db }; - // Start command recipience server - // This should not be publicly exposed + // Start transaction recipience server + // This MUST NOT be publicly exposed // TODO: Add auth tokio::spawn({ let db = db.clone(); @@ -58,25 +58,27 @@ async fn main() { let mut buf = vec![0; usize::try_from(msg_len).unwrap()]; let Ok(_) = socket.read_exact(&mut buf).await else { break }; - if buf.len() < 5 { + if buf.len() < (4 + 1) { break; } let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap()); let mut txn = db.txn(); + // Save the transaction txn.put(nonce.to_le_bytes(), &buf[4 ..]); txn.commit(); let Ok(()) = socket.write_all(&[1]).await else { break }; - log::info!("received signed command #{nonce}"); + log::info!("received transaction to publish (nonce {nonce})"); } }); } } }); - // Start command fetch server + // Start transaction fetch server // 5132 ^ ((b'E' << 8) | b'R') + 1 + // TODO: JSON-RPC server which returns this as JSON? let server = TcpListener::bind("0.0.0.0:20831").await.unwrap(); loop { let (mut socket, _) = server.accept().await.unwrap(); @@ -84,16 +86,16 @@ async fn main() { tokio::spawn(async move { let db = db.clone(); loop { - // Nonce to get the router comamnd for + // Nonce to get the unsigned transaction for let mut buf = vec![0; 4]; let Ok(_) = socket.read_exact(&mut buf).await else { break }; - let command = db.get(&buf[.. 4]).unwrap_or(vec![]); - let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await + let transaction = db.get(&buf[.. 4]).unwrap_or(vec![]); + let Ok(()) = socket.write_all(&u32::try_from(transaction.len()).unwrap().to_le_bytes()).await else { break; }; - let Ok(()) = socket.write_all(&command).await else { break }; + let Ok(()) = socket.write_all(&transaction).await else { break }; } }); } diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs index d133768ba..03b1d24c1 100644 --- a/processor/ethereum/src/publisher.rs +++ b/processor/ethereum/src/publisher.rs @@ -1,11 +1,17 @@ use core::future::Future; use std::sync::Arc; +use alloy_rlp::Encodable; + use alloy_transport::{TransportErrorKind, RpcError}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::RootProvider; -use tokio::sync::{RwLockReadGuard, RwLock}; +use tokio::{ + sync::{RwLockReadGuard, RwLock}, + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, +}; use ethereum_schnorr::PublicKey; use ethereum_router::{OutInstructions, Router}; @@ -62,9 +68,11 @@ impl signers::TransactionPublisher for TransactionPublisher { tx: Transaction, ) -> impl Send + Future> { async move { - // Convert from an Action (an internal representation of a signable event) to a TxLegacy let router = self.router().await?; let router = router.as_ref().unwrap(); + + let nonce = tx.0.nonce(); + // Convert from an Action (an internal representation of a signable event) to a TxLegacy let tx = match tx.0 { Action::SetKey { chain_id: _, nonce: _, key } => router.update_serai_key(&key, &tx.1), Action::Batch { chain_id: _, nonce: _, outs } => { @@ -72,40 +80,33 @@ impl signers::TransactionPublisher for TransactionPublisher { } }; - /* - use tokio::{ - io::{AsyncReadExt, AsyncWriteExt}, - net::TcpStream, - }; - - let mut msg = vec![]; - match completion.command() { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - msg.extend(&u32::try_from(nonce).unwrap().to_le_bytes()); - } - } - completion.write(&mut msg).unwrap(); + // Nonce + let mut msg = nonce.to_le_bytes().to_vec(); + // Transaction + tx.encode(&mut msg); let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { - log::warn!("couldn't connect to the relayer server"); - Err(NetworkError::ConnectionError)? + Err(TransportErrorKind::Custom( + "couldn't connect to the relayer server".to_string().into(), + ))? }; let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { - log::warn!("couldn't send the message's len to the relayer server"); - Err(NetworkError::ConnectionError)? + Err(TransportErrorKind::Custom( + "couldn't send the message's len to the relayer server".to_string().into(), + ))? }; let Ok(()) = socket.write_all(&msg).await else { - log::warn!("couldn't write the message to the relayer server"); - Err(NetworkError::ConnectionError)? + Err(TransportErrorKind::Custom( + "couldn't write the message to the relayer server".to_string().into(), + ))? }; if socket.read_u8().await.ok() != Some(1) { - log::warn!("didn't get the ack from the relayer server"); - Err(NetworkError::ConnectionError)?; + Err(TransportErrorKind::Custom( + "didn't get the ack from the relayer server".to_string().into(), + ))?; } Ok(()) - */ - todo!("TODO") } } } From 8c88b53de36024d5bc35bc3dd72019058dac0f29 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 01:00:31 -0400 Subject: [PATCH 157/179] Pass the latest active key to the Block's scan function Effectively necessary for networks on which we utilize account abstraction in order to know what key to associate the received coins with. --- networks/ethereum/relayer/src/main.rs | 3 ++- processor/bitcoin/src/primitives/block.rs | 6 +++++- processor/ethereum/src/primitives/block.rs | 13 ++++++++++++- processor/monero/src/primitives/block.rs | 6 +++++- processor/primitives/src/block.rs | 6 +++++- processor/scanner/src/db.rs | 1 + processor/scanner/src/eventuality/mod.rs | 14 +++++++++++++- processor/scanner/src/lib.rs | 6 +++--- processor/scanner/src/scan/mod.rs | 15 ++++++++++++++- 9 files changed, 60 insertions(+), 10 deletions(-) diff --git a/networks/ethereum/relayer/src/main.rs b/networks/ethereum/relayer/src/main.rs index f5a7e0f90..6424c90f5 100644 --- a/networks/ethereum/relayer/src/main.rs +++ b/networks/ethereum/relayer/src/main.rs @@ -91,7 +91,8 @@ async fn main() { let Ok(_) = socket.read_exact(&mut buf).await else { break }; let transaction = db.get(&buf[.. 4]).unwrap_or(vec![]); - let Ok(()) = socket.write_all(&u32::try_from(transaction.len()).unwrap().to_le_bytes()).await + let Ok(()) = + socket.write_all(&u32::try_from(transaction.len()).unwrap().to_le_bytes()).await else { break; }; diff --git a/processor/bitcoin/src/primitives/block.rs b/processor/bitcoin/src/primitives/block.rs index e3df7e693..02b8e5957 100644 --- a/processor/bitcoin/src/primitives/block.rs +++ b/processor/bitcoin/src/primitives/block.rs @@ -43,7 +43,11 @@ impl primitives::Block for Block { primitives::BlockHeader::id(&BlockHeader(self.1.header)) } - fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { + fn scan_for_outputs_unordered( + &self, + _latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec { let scanner = scanner(key); let mut res = vec![]; diff --git a/processor/ethereum/src/primitives/block.rs b/processor/ethereum/src/primitives/block.rs index 2c0e0505f..a6268c0be 100644 --- a/processor/ethereum/src/primitives/block.rs +++ b/processor/ethereum/src/primitives/block.rs @@ -59,8 +59,19 @@ impl primitives::Block for FullEpoch { self.epoch.end_hash } - fn scan_for_outputs_unordered(&self, _key: Self::Key) -> Vec { + fn scan_for_outputs_unordered( + &self, + latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec { // Only return these outputs for the latest key + if latest_active_key != key { + return vec![]; + } + + // Associate all outputs with the latest active key + // We don't associate these with the current key within the SC as that'll cause outputs to be + // marked for forwarding if the SC is delayed to actually rotate todo!("TODO") } diff --git a/processor/monero/src/primitives/block.rs b/processor/monero/src/primitives/block.rs index 70a559c1d..6afae4291 100644 --- a/processor/monero/src/primitives/block.rs +++ b/processor/monero/src/primitives/block.rs @@ -40,7 +40,11 @@ impl primitives::Block for Block { self.0.block.hash() } - fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec { + fn scan_for_outputs_unordered( + &self, + _latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec { let mut scanner = GuaranteedScanner::new(view_pair(key)); scanner.register_subaddress(EXTERNAL_SUBADDRESS); scanner.register_subaddress(BRANCH_SUBADDRESS); diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs index da4812470..a3dec40bb 100644 --- a/processor/primitives/src/block.rs +++ b/processor/primitives/src/block.rs @@ -43,7 +43,11 @@ pub trait Block: Send + Sync + Sized + Clone + Debug { /// Scan all outputs within this block to find the outputs spendable by this key. /// /// No assumption on the order of the returned outputs is made. - fn scan_for_outputs_unordered(&self, key: Self::Key) -> Vec; + fn scan_for_outputs_unordered( + &self, + latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec; /// Check if this block resolved any Eventualities. /// diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 49ab17859..884e0e2b5 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -28,6 +28,7 @@ struct SeraiKeyDbEntry { key: K, } +#[derive(Clone)] pub(crate) struct SeraiKey { pub(crate) key: K, pub(crate) stage: LifetimeStage, diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs index 99fea2fbd..bb3e4b7e4 100644 --- a/processor/scanner/src/eventuality/mod.rs +++ b/processor/scanner/src/eventuality/mod.rs @@ -273,6 +273,18 @@ impl> ContinuallyRan for EventualityTas log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b); + let latest_active_key = { + let mut keys_with_stages = keys_with_stages.clone(); + loop { + // Use the most recent key + let (key, stage) = keys_with_stages.pop().unwrap(); + // Unless this key is active, but not yet reporting + if stage == LifetimeStage::ActiveYetNotReporting { + continue; + } + break key; + } + }; let mut txn = self.db.txn(); @@ -307,7 +319,7 @@ impl> ContinuallyRan for EventualityTas } // Fetch all non-External outputs - let mut non_external_outputs = block.scan_for_outputs(key.key); + let mut non_external_outputs = block.scan_for_outputs(latest_active_key, key.key); non_external_outputs.retain(|output| output.kind() != OutputType::External); // Drop any outputs less than the dust limit non_external_outputs.retain(|output| { diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 1b6afaa91..e591d2101 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -46,11 +46,11 @@ pub(crate) fn sort_outputs /// Extension traits around Block. pub(crate) trait BlockExt: Block { - fn scan_for_outputs(&self, key: Self::Key) -> Vec; + fn scan_for_outputs(&self, latest_active_key: Self::Key, key: Self::Key) -> Vec; } impl BlockExt for B { - fn scan_for_outputs(&self, key: Self::Key) -> Vec { - let mut outputs = self.scan_for_outputs_unordered(key); + fn scan_for_outputs(&self, latest_active_key: Self::Key, key: Self::Key) -> Vec { + let mut outputs = self.scan_for_outputs_unordered(latest_active_key, key); outputs.sort_by(sort_outputs); outputs } diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index b235ff154..7004a4d9e 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -122,6 +122,19 @@ impl ContinuallyRan for ScanTask { let keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) .expect("scanning for a blockchain without any keys set"); + let latest_active_key = { + let mut keys = keys.clone(); + loop { + // Use the most recent key + let key = keys.pop().unwrap(); + // Unless this key is active, but not yet reporting + if key.stage == LifetimeStage::ActiveYetNotReporting { + continue; + } + break key.key; + } + }; + // The scan data for this block let mut scan_data = SenderScanData { block_number: b, @@ -157,7 +170,7 @@ impl ContinuallyRan for ScanTask { // Scan for each key for key in &keys { - for output in block.scan_for_outputs(key.key) { + for output in block.scan_for_outputs(latest_active_key, key.key) { assert_eq!(output.key(), key.key); /* From bb84b81a5656b7908bb391dae35343b5ec47fa1a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 01:05:36 -0400 Subject: [PATCH 158/179] Call tidy_keys upon queue_key Prevents the potential case of the substrate task and the scan task writing to the same storage slot at once. --- processor/scanner/src/db.rs | 46 +++++++++++++++++-------------- processor/scanner/src/scan/mod.rs | 3 -- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs index 884e0e2b5..a985ba43c 100644 --- a/processor/scanner/src/db.rs +++ b/processor/scanner/src/db.rs @@ -116,6 +116,28 @@ impl ScannerGlobalDb { StartBlock::set(txn, &block) } + fn tidy_keys(txn: &mut impl DbTxn) { + let mut keys: Vec>>> = + ActiveKeys::get(txn).expect("retiring key yet no active keys"); + let Some(key) = keys.first() else { return }; + + // Get the block we're scanning for next + let block_number = next_to_scan_for_outputs_block::(txn).expect( + "tidying keys despite never setting the next to scan for block (done on initialization)", + ); + // If this key is scheduled for retiry... + if let Some(retire_at) = RetireAt::get(txn, key.key) { + // And is retired by/at this block... + if retire_at <= block_number { + // Remove it from the list of keys + let key = keys.remove(0); + ActiveKeys::set(txn, &keys); + // Also clean up the retiry block + RetireAt::del(txn, key.key); + } + } + } + /// Queue a key. /// /// Keys may be queued whenever, so long as they're scheduled to activate `WINDOW_LENGTH` blocks @@ -165,6 +187,9 @@ impl ScannerGlobalDb { // Push and save the next key keys.push(SeraiKeyDbEntry { activation_block_number, key: EncodableG(key) }); ActiveKeys::set(txn, &keys); + + // Now tidy the keys, ensuring this has a maximum length of 2 + Self::tidy_keys(txn); } /// Retire a key. /// @@ -181,27 +206,6 @@ impl ScannerGlobalDb { RetireAt::set(txn, EncodableG(key), &at_block); } - pub(crate) fn tidy_keys(txn: &mut impl DbTxn) { - let mut keys: Vec>>> = - ActiveKeys::get(txn).expect("retiring key yet no active keys"); - let Some(key) = keys.first() else { return }; - - // Get the block we're scanning for next - let block_number = next_to_scan_for_outputs_block::(txn).expect( - "tidying keys despite never setting the next to scan for block (done on initialization)", - ); - // If this key is scheduled for retiry... - if let Some(retire_at) = RetireAt::get(txn, key.key) { - // And is retired by/at this block... - if retire_at <= block_number { - // Remove it from the list of keys - let key = keys.remove(0); - ActiveKeys::set(txn, &keys); - // Also clean up the retiry block - RetireAt::del(txn, key.key); - } - } - } /// Fetch the active keys, as of the next-to-scan-for-outputs Block. /// /// This means the scan task should scan for all keys returned by this. diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs index 7004a4d9e..0ebdf9925 100644 --- a/processor/scanner/src/scan/mod.rs +++ b/processor/scanner/src/scan/mod.rs @@ -116,9 +116,6 @@ impl ContinuallyRan for ScanTask { assert_eq!(ScanDb::::next_to_scan_for_outputs_block(&txn).unwrap(), b); - // Tidy the keys, then fetch them - // We don't have to tidy them here, we just have to somewhere, so why not here? - ScannerGlobalDb::::tidy_keys(&mut txn); let keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) .expect("scanning for a blockchain without any keys set"); From bf0638490c2c64933e21eab8f3532d51fa8354d9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 01:31:52 -0400 Subject: [PATCH 159/179] Add hooks to the main loop Lets the Ethereum processor track the first key set as soon as it's set. --- Cargo.lock | 1 + processor/bin/src/lib.rs | 11 +++++++ processor/bitcoin/src/main.rs | 2 +- processor/ethereum/Cargo.toml | 1 + processor/ethereum/router/src/lib.rs | 7 ----- processor/ethereum/src/main.rs | 34 ++++++++++++++++++++-- processor/ethereum/src/primitives/block.rs | 6 ++-- processor/ethereum/src/publisher.rs | 26 ++++++++++++----- processor/monero/src/main.rs | 2 +- 9 files changed, 67 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e2faecb6..00cb2ac50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8355,6 +8355,7 @@ dependencies = [ "serai-processor-ethereum-primitives", "serai-processor-ethereum-router", "serai-processor-key-gen", + "serai-processor-messages", "serai-processor-primitives", "serai-processor-scanner", "serai-processor-scheduler-primitives", diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs index 7758b1ead..651514ad2 100644 --- a/processor/bin/src/lib.rs +++ b/processor/bin/src/lib.rs @@ -157,8 +157,18 @@ async fn first_block_after_time(feed: &S, serai_time: u64) -> u6 } } +/// Hooks to run during the main loop. +pub trait Hooks { + /// A hook to run upon receiving a message. + fn on_message(txn: &mut impl DbTxn, msg: &messages::CoordinatorMessage); +} +impl Hooks for () { + fn on_message(_: &mut impl DbTxn, _: &messages::CoordinatorMessage) {} +} + /// The main loop of a Processor, interacting with the Coordinator. pub async fn main_loop< + H: Hooks, S: ScannerFeed, K: KeyGenParams>>, Sch: Clone @@ -183,6 +193,7 @@ pub async fn main_loop< let db_clone = db.clone(); let mut txn = db.txn(); let msg = coordinator.next_message(&mut txn).await; + H::on_message(&mut txn, &msg); let mut txn = Some(txn); match msg { messages::CoordinatorMessage::KeyGen(msg) => { diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs index f260c47cb..5feb3e25d 100644 --- a/processor/bitcoin/src/main.rs +++ b/processor/bitcoin/src/main.rs @@ -57,7 +57,7 @@ async fn main() { tokio::spawn(TxIndexTask(feed.clone()).continually_run(index_task, vec![])); core::mem::forget(index_handle); - bin::main_loop::<_, KeyGenParams, _>(db, feed.clone(), Scheduler::new(Planner), feed).await; + bin::main_loop::<(), _, KeyGenParams, _>(db, feed.clone(), Scheduler::new(Planner), feed).await; } /* diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml index 649e3fb86..c2a6f581d 100644 --- a/processor/ethereum/Cargo.toml +++ b/processor/ethereum/Cargo.toml @@ -49,6 +49,7 @@ tokio = { version = "1", default-features = false, features = ["rt-multi-thread" serai-env = { path = "../../common/env" } serai-db = { path = "../../common/db" } +messages = { package = "serai-processor-messages", path = "../messages" } key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } primitives = { package = "serai-processor-primitives", path = "../primitives" } diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs index 344e2beed..d56c514f1 100644 --- a/processor/ethereum/router/src/lib.rs +++ b/processor/ethereum/router/src/lib.rs @@ -6,13 +6,6 @@ use std::{sync::Arc, io, collections::HashSet}; use group::ff::PrimeField; -/* -use k256::{ - elliptic_curve::{group::GroupEncoding, sec1}, - ProjectivePoint, -}; -*/ - use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; use alloy_consensus::TxLegacy; diff --git a/processor/ethereum/src/main.rs b/processor/ethereum/src/main.rs index 0ebf0f59a..bfb9a8df4 100644 --- a/processor/ethereum/src/main.rs +++ b/processor/ethereum/src/main.rs @@ -13,7 +13,13 @@ use alloy_simple_request_transport::SimpleRequest; use alloy_rpc_client::ClientBuilder; use alloy_provider::{Provider, RootProvider}; +use serai_client::validator_sets::primitives::Session; + use serai_env as env; +use serai_db::{Get, DbTxn, create_db}; + +use ::primitives::EncodableG; +use ::key_gen::KeyGenParams as KeyGenParamsTrait; mod primitives; pub(crate) use crate::primitives::*; @@ -27,6 +33,28 @@ use scheduler::{SmartContract, Scheduler}; mod publisher; use publisher::TransactionPublisher; +create_db! { + EthereumProcessor { + // The initial key for Serai on Ethereum + InitialSeraiKey: () -> EncodableG, + } +} + +struct SetInitialKey; +impl bin::Hooks for SetInitialKey { + fn on_message(txn: &mut impl DbTxn, msg: &messages::CoordinatorMessage) { + if let messages::CoordinatorMessage::Substrate( + messages::substrate::CoordinatorMessage::SetKeys { session, key_pair, .. }, + ) = msg + { + assert_eq!(*session, Session(0)); + let key = KeyGenParams::decode_key(key_pair.1.as_ref()) + .expect("invalid Ethereum key confirmed on Substrate"); + InitialSeraiKey::set(txn, &EncodableG(key)); + } + } +} + #[tokio::main] async fn main() { let db = bin::init(); @@ -45,11 +73,11 @@ async fn main() { } }; - bin::main_loop::<_, KeyGenParams, _>( - db, + bin::main_loop::( + db.clone(), Rpc { provider: provider.clone() }, Scheduler::new(SmartContract { chain_id }), - TransactionPublisher::new(provider, { + TransactionPublisher::new(db, provider, { let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") .expect("ethereum relayer hostname wasn't specified") .to_string(); diff --git a/processor/ethereum/src/primitives/block.rs b/processor/ethereum/src/primitives/block.rs index a6268c0be..cd26b400f 100644 --- a/processor/ethereum/src/primitives/block.rs +++ b/processor/ethereum/src/primitives/block.rs @@ -6,7 +6,7 @@ use serai_client::networks::ethereum::Address; use primitives::{ReceivedOutput, EventualityTracker}; -use ethereum_router::Executed; +use ethereum_router::{InInstruction as EthereumInInstruction, Executed}; use crate::{output::Output, transaction::Eventuality}; @@ -43,7 +43,7 @@ impl primitives::BlockHeader for Epoch { #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct FullEpoch { epoch: Epoch, - outputs: Vec, + instructions: Vec, executed: Vec, } @@ -72,7 +72,7 @@ impl primitives::Block for FullEpoch { // Associate all outputs with the latest active key // We don't associate these with the current key within the SC as that'll cause outputs to be // marked for forwarding if the SC is delayed to actually rotate - todo!("TODO") + self.instructions.iter().cloned().map(|instruction| Output { key, instruction }).collect() } #[allow(clippy::type_complexity)] diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs index 03b1d24c1..5a7a958a0 100644 --- a/processor/ethereum/src/publisher.rs +++ b/processor/ethereum/src/publisher.rs @@ -13,22 +13,27 @@ use tokio::{ net::TcpStream, }; +use serai_db::Db; + use ethereum_schnorr::PublicKey; use ethereum_router::{OutInstructions, Router}; -use crate::transaction::{Action, Transaction}; +use crate::{ + InitialSeraiKey, + transaction::{Action, Transaction}, +}; #[derive(Clone)] -pub(crate) struct TransactionPublisher { - initial_serai_key: PublicKey, +pub(crate) struct TransactionPublisher { + db: D, rpc: Arc>, router: Arc>>, relayer_url: String, } -impl TransactionPublisher { - pub(crate) fn new(rpc: Arc>, relayer_url: String) -> Self { - Self { initial_serai_key: todo!("TODO"), rpc, router: Arc::new(RwLock::new(None)), relayer_url } +impl TransactionPublisher { + pub(crate) fn new(db: D, rpc: Arc>, relayer_url: String) -> Self { + Self { db, rpc, router: Arc::new(RwLock::new(None)), relayer_url } } // This will always return Ok(Some(_)) or Err(_), never Ok(None) @@ -43,7 +48,12 @@ impl TransactionPublisher { let mut router = self.router.write().await; // Check again if it's None in case a different task already did this if router.is_none() { - let Some(router_actual) = Router::new(self.rpc.clone(), &self.initial_serai_key).await? + let Some(router_actual) = Router::new( + self.rpc.clone(), + &PublicKey::new(InitialSeraiKey::get(&self.db).unwrap().0) + .expect("initial key used by Serai wasn't representable on Ethereum"), + ) + .await? else { Err(TransportErrorKind::Custom( "publishing transaction yet couldn't find router on chain. was our node reset?" @@ -60,7 +70,7 @@ impl TransactionPublisher { } } -impl signers::TransactionPublisher for TransactionPublisher { +impl signers::TransactionPublisher for TransactionPublisher { type EphemeralError = RpcError; fn publish( diff --git a/processor/monero/src/main.rs b/processor/monero/src/main.rs index d36118d01..b5c67f12a 100644 --- a/processor/monero/src/main.rs +++ b/processor/monero/src/main.rs @@ -33,7 +33,7 @@ async fn main() { }, }; - bin::main_loop::<_, KeyGenParams, _>( + bin::main_loop::<(), _, KeyGenParams, _>( db, feed.clone(), Scheduler::new(Planner(feed.clone())), From 40f5c35d13a3831e1a96cd924eaa0e29ffbee090 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 02:41:07 -0400 Subject: [PATCH 160/179] Finish Ethereum ScannerFeed --- processor/ethereum/TODO/old_processor.rs | 219 --------------------- processor/ethereum/src/main.rs | 4 +- processor/ethereum/src/primitives/block.rs | 8 +- processor/ethereum/src/primitives/mod.rs | 2 + processor/ethereum/src/publisher.rs | 8 +- processor/ethereum/src/rpc.rs | 114 +++++++++-- processor/ethereum/src/scheduler.rs | 18 +- 7 files changed, 126 insertions(+), 247 deletions(-) diff --git a/processor/ethereum/TODO/old_processor.rs b/processor/ethereum/TODO/old_processor.rs index a8f55c791..2e2daa3e4 100644 --- a/processor/ethereum/TODO/old_processor.rs +++ b/processor/ethereum/TODO/old_processor.rs @@ -1,146 +1,5 @@ -/* -#![cfg_attr(docsrs, feature(doc_auto_cfg))] -#![doc = include_str!("../README.md")] -#![deny(missing_docs)] - -use core::{fmt, time::Duration}; -use std::{ - sync::Arc, - collections::{HashSet, HashMap}, - io, -}; - -use async_trait::async_trait; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; -use frost::ThresholdKeys; - -use ethereum_serai::{ - alloy::{ - primitives::U256, - rpc_types::{BlockTransactionsKind, BlockNumberOrTag, Transaction}, - simple_request_transport::SimpleRequest, - rpc_client::ClientBuilder, - provider::{Provider, RootProvider}, - }, - crypto::{PublicKey, Signature}, - erc20::Erc20, - deployer::Deployer, - router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction}, - machine::*, -}; -#[cfg(test)] -use ethereum_serai::alloy::primitives::B256; - -use tokio::{ - time::sleep, - sync::{RwLock, RwLockReadGuard}, -}; -#[cfg(not(test))] -use tokio::{ - io::{AsyncReadExt, AsyncWriteExt}, - net::TcpStream, -}; - -use serai_client::{ - primitives::{Coin, Amount, Balance, NetworkId}, - validator_sets::primitives::Session, -}; - -use crate::{ - Db, Payment, - networks::{ - OutputType, Output, Transaction as TransactionTrait, SignableTransaction, Block, - Eventuality as EventualityTrait, EventualitiesTracker, NetworkError, Network, - }, - key_gen::NetworkKeyDb, - multisigs::scheduler::{ - Scheduler as SchedulerTrait, - smart_contract::{Addendum, Scheduler}, - }, -}; - -#[derive(Clone)] -pub struct Ethereum { - // This DB is solely used to access the first key generated, as needed to determine the Router's - // address. Accordingly, all methods present are consistent to a Serai chain with a finalized - // first key (regardless of local state), and this is safe. - db: D, - #[cfg_attr(test, allow(unused))] - relayer_url: String, - provider: Arc>, - deployer: Deployer, - router: Arc>>, -} -impl Ethereum { - pub async fn new(db: D, daemon_url: String, relayer_url: String) -> Self { - let provider = Arc::new(RootProvider::new( - ClientBuilder::default().transport(SimpleRequest::new(daemon_url), true), - )); - - let mut deployer = Deployer::new(provider.clone()).await; - while !matches!(deployer, Ok(Some(_))) { - log::error!("Deployer wasn't deployed yet or networking error"); - sleep(Duration::from_secs(5)).await; - deployer = Deployer::new(provider.clone()).await; - } - let deployer = deployer.unwrap().unwrap(); - - dbg!(&relayer_url); - dbg!(relayer_url.len()); - Ethereum { db, relayer_url, provider, deployer, router: Arc::new(RwLock::new(None)) } - } - - // Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been. - // This is guaranteed to return Some. - pub async fn router(&self) -> RwLockReadGuard<'_, Option> { - // If we've already instantiated the Router, return a read reference - { - let router = self.router.read().await; - if router.is_some() { - return router; - } - } - - // Instantiate it - let mut router = self.router.write().await; - // If another attempt beat us to it, return - if router.is_some() { - drop(router); - return self.router.read().await; - } - - // Get the first key from the DB - let first_key = - NetworkKeyDb::get(&self.db, Session(0)).expect("getting outputs before confirming a key"); - let key = Secp256k1::read_G(&mut first_key.as_slice()).unwrap(); - let public_key = PublicKey::new(key).unwrap(); - - // Find the router - let mut found = self.deployer.find_router(self.provider.clone(), &public_key).await; - while !matches!(found, Ok(Some(_))) { - log::error!("Router wasn't deployed yet or networking error"); - sleep(Duration::from_secs(5)).await; - found = self.deployer.find_router(self.provider.clone(), &public_key).await; - } - - // Set it - *router = Some(found.unwrap().unwrap()); - - // Downgrade to a read lock - // Explicitly doesn't use `downgrade` so that another pending write txn can realize it's no - // longer necessary - drop(router); - self.router.read().await - } -} - #[async_trait] impl Network for Ethereum { - const DUST: u64 = 0; // TODO - - const COST_TO_AGGREGATE: u64 = 0; - async fn get_outputs( &self, block: &Self::Block, @@ -220,66 +79,6 @@ impl Network for Ethereum { all_events } - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Self::Block, - ) -> HashMap< - [u8; 32], - ( - usize, - >::Id, - ::Completion, - ), - > { - let mut res = HashMap::new(); - if eventualities.map.is_empty() { - return res; - } - - let router = self.router().await; - let router = router.as_ref().unwrap(); - - let past_scanned_epoch = loop { - match self.get_block(eventualities.block_number).await { - Ok(block) => break block, - Err(e) => log::error!("couldn't get the last scanned block in the tracker: {}", e), - } - sleep(Duration::from_secs(10)).await; - }; - assert_eq!( - past_scanned_epoch.start / 32, - u64::try_from(eventualities.block_number).unwrap(), - "assumption of tracker block number's relation to epoch start is incorrect" - ); - - // Iterate from after the epoch number in the tracker to the end of this epoch - for block_num in (past_scanned_epoch.end() + 1) ..= block.end() { - let executed = loop { - match router.executed_commands(block_num).await { - Ok(executed) => break executed, - Err(e) => log::error!("couldn't get the executed commands in block {block_num}: {e}"), - } - sleep(Duration::from_secs(10)).await; - }; - - for executed in executed { - let lookup = executed.nonce.to_le_bytes().to_vec(); - if let Some((plan_id, eventuality)) = eventualities.map.get(&lookup) { - if let Some(command) = - SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &executed.signature) - { - res.insert(*plan_id, (block_num.try_into().unwrap(), executed.tx_id, command)); - eventualities.map.remove(&lookup); - } - } - } - } - eventualities.block_number = (block.start / 32).try_into().unwrap(); - - res - } - async fn publish_completion( &self, completion: &::Completion, @@ -333,14 +132,6 @@ impl Network for Ethereum { } } - async fn confirm_completion( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> Result::Completion>, NetworkError> { - Ok(SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature)) - } - #[cfg(test)] async fn get_block_number(&self, id: &>::Id) -> usize { self @@ -355,15 +146,6 @@ impl Network for Ethereum { .unwrap() } - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> bool { - SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature).is_some() - } - #[cfg(test)] async fn get_transaction_by_eventuality( &self, @@ -474,4 +256,3 @@ impl Network for Ethereum { self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() } } -*/ diff --git a/processor/ethereum/src/main.rs b/processor/ethereum/src/main.rs index bfb9a8df4..7acdffdbd 100644 --- a/processor/ethereum/src/main.rs +++ b/processor/ethereum/src/main.rs @@ -75,8 +75,8 @@ async fn main() { bin::main_loop::( db.clone(), - Rpc { provider: provider.clone() }, - Scheduler::new(SmartContract { chain_id }), + Rpc { db: db.clone(), provider: provider.clone() }, + Scheduler::::new(SmartContract { chain_id }), TransactionPublisher::new(db, provider, { let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") .expect("ethereum relayer hostname wasn't specified") diff --git a/processor/ethereum/src/primitives/block.rs b/processor/ethereum/src/primitives/block.rs index cd26b400f..d5f0cb99a 100644 --- a/processor/ethereum/src/primitives/block.rs +++ b/processor/ethereum/src/primitives/block.rs @@ -20,8 +20,6 @@ pub(crate) struct Epoch { pub(crate) start: u64, // The hash of the last block within this Epoch. pub(crate) end_hash: [u8; 32], - // The monotonic time for this Epoch. - pub(crate) time: u64, } impl Epoch { @@ -42,9 +40,9 @@ impl primitives::BlockHeader for Epoch { #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct FullEpoch { - epoch: Epoch, - instructions: Vec, - executed: Vec, + pub(crate) epoch: Epoch, + pub(crate) instructions: Vec, + pub(crate) executed: Vec, } impl primitives::Block for FullEpoch { diff --git a/processor/ethereum/src/primitives/mod.rs b/processor/ethereum/src/primitives/mod.rs index f0d318029..00a5980f7 100644 --- a/processor/ethereum/src/primitives/mod.rs +++ b/processor/ethereum/src/primitives/mod.rs @@ -8,3 +8,5 @@ pub(crate) const DAI: [u8; 20] = Ok(res) => res, Err(_) => panic!("invalid non-test DAI hex address"), }; + +pub(crate) const TOKENS: [[u8; 20]; 1] = [DAI]; diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs index 5a7a958a0..4a62bad72 100644 --- a/processor/ethereum/src/publisher.rs +++ b/processor/ethereum/src/publisher.rs @@ -50,8 +50,12 @@ impl TransactionPublisher { if router.is_none() { let Some(router_actual) = Router::new( self.rpc.clone(), - &PublicKey::new(InitialSeraiKey::get(&self.db).unwrap().0) - .expect("initial key used by Serai wasn't representable on Ethereum"), + &PublicKey::new( + InitialSeraiKey::get(&self.db) + .expect("publishing a transaction yet never confirmed a key") + .0, + ) + .expect("initial key used by Serai wasn't representable on Ethereum"), ) .await? else { diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs index e3f25f86c..a53e6b334 100644 --- a/processor/ethereum/src/rpc.rs +++ b/processor/ethereum/src/rpc.rs @@ -1,6 +1,7 @@ use core::future::Future; -use std::sync::Arc; +use std::{sync::Arc, collections::HashSet}; +use alloy_core::primitives::B256; use alloy_rpc_types_eth::{BlockTransactionsKind, BlockNumberOrTag}; use alloy_transport::{RpcError, TransportErrorKind}; use alloy_simple_request_transport::SimpleRequest; @@ -8,16 +9,26 @@ use alloy_provider::{Provider, RootProvider}; use serai_client::primitives::{NetworkId, Coin, Amount}; +use serai_db::Db; + use scanner::ScannerFeed; -use crate::block::{Epoch, FullEpoch}; +use ethereum_schnorr::PublicKey; +use ethereum_erc20::{TopLevelTransfer, Erc20}; +use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction, Router}; + +use crate::{ + TOKENS, InitialSeraiKey, + block::{Epoch, FullEpoch}, +}; #[derive(Clone)] -pub(crate) struct Rpc { +pub(crate) struct Rpc { + pub(crate) db: D, pub(crate) provider: Arc>, } -impl ScannerFeed for Rpc { +impl ScannerFeed for Rpc { const NETWORK: NetworkId = NetworkId::Ethereum; // We only need one confirmation as Ethereum properly finalizes @@ -62,7 +73,22 @@ impl ScannerFeed for Rpc { &self, number: u64, ) -> impl Send + Future> { - async move { todo!("TODO") } + async move { + let header = self + .provider + .get_block(BlockNumberOrTag::Number(number).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + "asked for time of a block our node doesn't have".to_string().into(), + ) + })? + .header; + // This is monotonic ever since the merge + // https://github.com/ethereum/consensus-specs/blob/4afe39822c9ad9747e0f5635cca117c18441ec1b + // /specs/bellatrix/beacon-chain.md?plain=1#L393-L394 + Ok(header.timestamp) + } } fn unchecked_block_header_by_number( @@ -104,25 +130,91 @@ impl ScannerFeed for Rpc { .header; let end_hash = end_header.hash.into(); - let time = end_header.timestamp; - Ok(Epoch { prior_end_hash, start, end_hash, time }) + Ok(Epoch { prior_end_hash, start, end_hash }) } } - #[rustfmt::skip] // It wants to improperly format the `async move` to a single line fn unchecked_block_by_number( &self, number: u64, ) -> impl Send + Future> { async move { - todo!("TODO") + let epoch = self.unchecked_block_header_by_number(number).await?; + let mut instructions = vec![]; + let mut executed = vec![]; + + let Some(router) = Router::new( + self.provider.clone(), + &PublicKey::new( + InitialSeraiKey::get(&self.db).expect("fetching a block yet never confirmed a key").0, + ) + .expect("initial key used by Serai wasn't representable on Ethereum"), + ) + .await? + else { + // The Router wasn't deployed yet so we cannot have any on-chain interactions + // If the Router has been deployed by the block we've synced to, it won't have any events + // for these blocks anways, so this doesn't risk a consensus split + // TODO: This does as we can have top-level transfers to the router before it's deployed + return Ok(FullEpoch { epoch, instructions, executed }); + }; + + let mut to_check = epoch.end_hash; + while to_check != epoch.prior_end_hash { + let to_check_block = self + .provider + .get_block(B256::from(to_check).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + format!( + "ethereum node didn't have requested block: {}. was the node reset?", + hex::encode(to_check) + ) + .into(), + ) + })? + .header; + + instructions.append( + &mut router.in_instructions(to_check_block.number, &HashSet::from(TOKENS)).await?, + ); + for token in TOKENS { + for TopLevelTransfer { id, from, amount, data } in + Erc20::new(self.provider.clone(), token) + .top_level_transfers(to_check_block.number, router.address()) + .await? + { + instructions.push(EthereumInInstruction { + id: (id, u64::MAX), + from, + coin: EthereumCoin::Erc20(token), + amount, + data, + }); + } + } + + executed.append(&mut router.executed(to_check_block.number).await?); + + to_check = *to_check_block.parent_hash; + } + + Ok(FullEpoch { epoch, instructions, executed }) } } fn dust(coin: Coin) -> Amount { assert_eq!(coin.network(), NetworkId::Ethereum); - todo!("TODO") + #[allow(clippy::inconsistent_digit_grouping)] + match coin { + // 5 USD if Ether is ~3300 USD + Coin::Ether => Amount(1_500_00), + // 5 DAI + Coin::Dai => Amount(5_000_000_00), + _ => unreachable!(), + } } fn cost_to_aggregate( @@ -132,7 +224,7 @@ impl ScannerFeed for Rpc { ) -> impl Send + Future> { async move { assert_eq!(coin.network(), NetworkId::Ethereum); - // TODO + // There is no cost to aggregate as we receive to an account Ok(Amount(0)) } } diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs index 6683eeac7..39f3fed38 100644 --- a/processor/ethereum/src/scheduler.rs +++ b/processor/ethereum/src/scheduler.rs @@ -2,6 +2,8 @@ use alloy_core::primitives::U256; use serai_client::primitives::{NetworkId, Coin, Balance}; +use serai_db::Db; + use primitives::Payment; use scanner::{KeyFor, AddressFor, EventualityFor}; @@ -32,15 +34,15 @@ fn balance_to_ethereum_amount(balance: Balance) -> U256 { pub(crate) struct SmartContract { pub(crate) chain_id: U256, } -impl smart_contract_scheduler::SmartContract for SmartContract { +impl smart_contract_scheduler::SmartContract> for SmartContract { type SignableTransaction = Action; fn rotate( &self, nonce: u64, - retiring_key: KeyFor, - new_key: KeyFor, - ) -> (Self::SignableTransaction, EventualityFor) { + retiring_key: KeyFor>, + new_key: KeyFor>, + ) -> (Self::SignableTransaction, EventualityFor>) { let action = Action::SetKey { chain_id: self.chain_id, nonce, @@ -52,9 +54,9 @@ impl smart_contract_scheduler::SmartContract for SmartContract { fn fulfill( &self, nonce: u64, - key: KeyFor, - payments: Vec>>, - ) -> Vec<(Self::SignableTransaction, EventualityFor)> { + key: KeyFor>, + payments: Vec>>>, + ) -> Vec<(Self::SignableTransaction, EventualityFor>)> { let mut outs = Vec::with_capacity(payments.len()); for payment in payments { outs.push(( @@ -75,4 +77,4 @@ impl smart_contract_scheduler::SmartContract for SmartContract { } } -pub(crate) type Scheduler = smart_contract_scheduler::Scheduler; +pub(crate) type Scheduler = smart_contract_scheduler::Scheduler, SmartContract>; From 155f80e02e00dc366e92ba202a581472e2503f09 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 02:46:32 -0400 Subject: [PATCH 161/179] Remove unused code in the Ethereum processor --- processor/ethereum/src/primitives/block.rs | 9 --------- processor/ethereum/src/primitives/machine.rs | 6 +++--- processor/ethereum/src/rpc.rs | 2 +- processor/ethereum/src/scheduler.rs | 4 ++-- 4 files changed, 6 insertions(+), 15 deletions(-) diff --git a/processor/ethereum/src/primitives/block.rs b/processor/ethereum/src/primitives/block.rs index d5f0cb99a..723e099d4 100644 --- a/processor/ethereum/src/primitives/block.rs +++ b/processor/ethereum/src/primitives/block.rs @@ -16,19 +16,10 @@ use crate::{output::Output, transaction::Eventuality}; pub(crate) struct Epoch { // The hash of the block which ended the prior Epoch. pub(crate) prior_end_hash: [u8; 32], - // The first block number within this Epoch. - pub(crate) start: u64, // The hash of the last block within this Epoch. pub(crate) end_hash: [u8; 32], } -impl Epoch { - // The block number of the last block within this epoch. - fn end(&self) -> u64 { - self.start + 31 - } -} - impl primitives::BlockHeader for Epoch { fn id(&self) -> [u8; 32] { self.end_hash diff --git a/processor/ethereum/src/primitives/machine.rs b/processor/ethereum/src/primitives/machine.rs index f37fb4404..1762eb283 100644 --- a/processor/ethereum/src/primitives/machine.rs +++ b/processor/ethereum/src/primitives/machine.rs @@ -102,9 +102,9 @@ impl SignMachine for ActionSignMachine { unimplemented!() } fn from_cache( - params: Self::Params, - keys: Self::Keys, - cache: CachedPreprocess, + _params: Self::Params, + _keys: Self::Keys, + _cache: CachedPreprocess, ) -> (Self, Self::Preprocess) { unimplemented!() } diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs index a53e6b334..0769c5c39 100644 --- a/processor/ethereum/src/rpc.rs +++ b/processor/ethereum/src/rpc.rs @@ -131,7 +131,7 @@ impl ScannerFeed for Rpc { let end_hash = end_header.hash.into(); - Ok(Epoch { prior_end_hash, start, end_hash }) + Ok(Epoch { prior_end_hash, end_hash }) } } diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs index 39f3fed38..55e091fc1 100644 --- a/processor/ethereum/src/scheduler.rs +++ b/processor/ethereum/src/scheduler.rs @@ -40,7 +40,7 @@ impl smart_contract_scheduler::SmartContract> for SmartContract { fn rotate( &self, nonce: u64, - retiring_key: KeyFor>, + _retiring_key: KeyFor>, new_key: KeyFor>, ) -> (Self::SignableTransaction, EventualityFor>) { let action = Action::SetKey { @@ -54,7 +54,7 @@ impl smart_contract_scheduler::SmartContract> for SmartContract { fn fulfill( &self, nonce: u64, - key: KeyFor>, + _key: KeyFor>, payments: Vec>>>, ) -> Vec<(Self::SignableTransaction, EventualityFor>)> { let mut outs = Vec::with_capacity(payments.len()); From f837cc98da8c0020d50d83886f0fb664b95b62cb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 02:58:02 -0400 Subject: [PATCH 162/179] Read NetworkId from ScannerFeed trait, not env --- processor/bin/src/coordinator.rs | 17 ++++++----------- processor/bin/src/lib.rs | 2 +- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/processor/bin/src/coordinator.rs b/processor/bin/src/coordinator.rs index 6fe5aea08..e05712cfe 100644 --- a/processor/bin/src/coordinator.rs +++ b/processor/bin/src/coordinator.rs @@ -5,13 +5,15 @@ use tokio::sync::mpsc; use scale::Encode; use serai_client::{ - primitives::{NetworkId, Signature}, + primitives::Signature, validator_sets::primitives::Session, in_instructions::primitives::{Batch, SignedBatch}, }; use serai_db::{Get, DbTxn, Db, create_db, db_channel}; -use serai_env as env; + +use scanner::ScannerFeed; + use message_queue::{Service, Metadata, client::MessageQueue}; create_db! { @@ -60,18 +62,11 @@ pub(crate) struct Coordinator { } impl Coordinator { - pub(crate) fn new(db: crate::Db) -> Self { + pub(crate) fn new(db: crate::Db) -> Self { let (received_message_send, received_message_recv) = mpsc::unbounded_channel(); let (sent_message_send, mut sent_message_recv) = mpsc::unbounded_channel(); - let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { - "bitcoin" => NetworkId::Bitcoin, - "ethereum" => NetworkId::Ethereum, - "monero" => NetworkId::Monero, - _ => panic!("unrecognized network"), - }; - // TODO: Read this from ScannerFeed - let service = Service::Processor(network_id); + let service = Service::Processor(S::NETWORK); let message_queue = Arc::new(MessageQueue::from_env(service)); // Spawn a task to move messages from the message-queue to our database so we can achieve diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs index 651514ad2..662eafb9d 100644 --- a/processor/bin/src/lib.rs +++ b/processor/bin/src/lib.rs @@ -182,7 +182,7 @@ pub async fn main_loop< scheduler: Sch, publisher: impl TransactionPublisher>, ) { - let mut coordinator = Coordinator::new(db.clone()); + let mut coordinator = Coordinator::new::(db.clone()); let mut key_gen = key_gen::(); let mut scanner = Scanner::new(db.clone(), feed.clone(), scheduler.clone()).await; From b85df85c0b55a51b61fece6c9927f41efcf40514 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 03:16:17 -0400 Subject: [PATCH 163/179] Merge BlockWithAcknowledgedBatch and BatchWithoutAcknowledgeBatch Offers a simpler API to the coordinator. --- processor/bin/src/lib.rs | 57 +++++++++++++++----------- processor/messages/src/lib.rs | 29 ++++--------- processor/scanner/Cargo.toml | 2 + processor/scanner/src/lib.rs | 7 +--- processor/scanner/src/substrate/db.rs | 12 +++--- processor/scanner/src/substrate/mod.rs | 14 +++---- 6 files changed, 59 insertions(+), 62 deletions(-) diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs index 662eafb9d..86a3a0cd7 100644 --- a/processor/bin/src/lib.rs +++ b/processor/bin/src/lib.rs @@ -270,32 +270,43 @@ pub async fn main_loop< // This is a cheap call signers.retire_session(txn, session, &key) } - messages::substrate::CoordinatorMessage::BlockWithBatchAcknowledgement { - block: _, - batch_id, - in_instruction_succeededs, - burns, + messages::substrate::CoordinatorMessage::Block { + serai_block_number: _, + batches, + mut burns, } => { - let mut txn = txn.take().unwrap(); - let scanner = scanner.as_mut().unwrap(); - let key_to_activate = KeyToActivate::>::try_recv(&mut txn).map(|key| key.0); - // This is a cheap call as it internally just queues this to be done later - scanner.acknowledge_batch( - txn, - batch_id, - in_instruction_succeededs, - burns, - key_to_activate, - ) - } - messages::substrate::CoordinatorMessage::BlockWithoutBatchAcknowledgement { - block: _, - burns, - } => { - let txn = txn.take().unwrap(); let scanner = scanner.as_mut().unwrap(); + + // Substrate sets this limit to prevent DoSs from malicious validator sets + // That bound lets us consume this txn in the following loop body, as an optimization + assert!(batches.len() <= 1); + for messages::substrate::ExecutedBatch { id, in_instructions } in batches { + let key_to_activate = + KeyToActivate::>::try_recv(txn.as_mut().unwrap()).map(|key| key.0); + + /* + `acknowledge_batch` takes burns to optimize handling returns with standard payments. + That's why handling these with a Batch (and not waiting until the following potential + `queue_burns` call makes sense. As for which Batch, the first is equally valid unless + we want to start introspecting (and should be our only Batch anyways). + */ + let mut this_batchs_burns = vec![]; + std::mem::swap(&mut burns, &mut this_batchs_burns); + + // This is a cheap call as it internally just queues this to be done later + let _: () = scanner.acknowledge_batch( + txn.take().unwrap(), + id, + in_instructions, + this_batchs_burns, + key_to_activate, + ); + } + // This is a cheap call as it internally just queues this to be done later - scanner.queue_burns(txn, burns) + if !burns.is_empty() { + let _: () = scanner.queue_burns(txn.take().unwrap(), burns); + } } }, }; diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 080864dc2..659491d49 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -181,7 +181,6 @@ pub mod coordinator { pub mod substrate { use super::*; - /* TODO #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum InInstructionResult { Succeeded, @@ -189,15 +188,9 @@ pub mod substrate { } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct ExecutedBatch { - batch_id: u32, - in_instructions: Vec, + pub id: u32, + pub in_instructions: Vec, } - Block { - block: u64, - batches: Vec, - burns: Vec, - } - */ #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { @@ -205,15 +198,12 @@ pub mod substrate { SetKeys { serai_time: u64, session: Session, key_pair: KeyPair }, /// Slashes reported on the Serai blockchain OR the process timed out. SlashesReported { session: Session }, - /// The data from a block which acknowledged a Batch. - BlockWithBatchAcknowledgement { - block: u64, - batch_id: u32, - in_instruction_succeededs: Vec, + /// A block from Serai with relevance to this processor. + Block { + serai_block_number: u64, + batches: Vec, burns: Vec, }, - /// The data from a block which didn't acknowledge a Batch. - BlockWithoutBatchAcknowledgement { block: u64, burns: Vec }, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] @@ -323,11 +313,8 @@ impl CoordinatorMessage { let (sub, id) = match msg { substrate::CoordinatorMessage::SetKeys { session, .. } => (0, session.encode()), substrate::CoordinatorMessage::SlashesReported { session } => (1, session.encode()), - substrate::CoordinatorMessage::BlockWithBatchAcknowledgement { block, .. } => { - (2, block.encode()) - } - substrate::CoordinatorMessage::BlockWithoutBatchAcknowledgement { block, .. } => { - (3, block.encode()) + substrate::CoordinatorMessage::Block { serai_block_number, .. } => { + (2, serai_block_number.encode()) } }; diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml index 1ff154cd8..09a6a937c 100644 --- a/processor/scanner/Cargo.toml +++ b/processor/scanner/Cargo.toml @@ -31,6 +31,8 @@ tokio = { version = "1", default-features = false, features = ["rt-multi-thread" serai-db = { path = "../../common/db" } +messages = { package = "serai-processor-messages", path = "../messages" } + serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] } serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index e591d2101..72d661a30 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -429,9 +429,6 @@ impl Scanner { /// This means the specified Batch was ordered on Serai in relation to Burn events, and all /// validators have achieved synchrony on it. /// - /// `in_instruction_succeededs` is the result of executing each InInstruction within this batch, - /// true if it succeeded and false if it did not (and did not cause any state changes on Serai). - /// /// `burns` is a list of Burns to queue with the acknowledgement of this Batch for efficiency's /// sake. Any Burns passed here MUST NOT be passed into any other call of `acknowledge_batch` nor /// `queue_burns`. Doing so will cause them to be executed multiple times. @@ -441,7 +438,7 @@ impl Scanner { &mut self, mut txn: impl DbTxn, batch_id: u32, - in_instruction_succeededs: Vec, + in_instruction_results: Vec, burns: Vec, key_to_activate: Option>, ) { @@ -451,7 +448,7 @@ impl Scanner { substrate::queue_acknowledge_batch::( &mut txn, batch_id, - in_instruction_succeededs, + in_instruction_results, burns, key_to_activate, ); diff --git a/processor/scanner/src/substrate/db.rs b/processor/scanner/src/substrate/db.rs index 184358569..c1a1b0e22 100644 --- a/processor/scanner/src/substrate/db.rs +++ b/processor/scanner/src/substrate/db.rs @@ -12,7 +12,7 @@ use crate::{ScannerFeed, KeyFor}; #[derive(BorshSerialize, BorshDeserialize)] struct AcknowledgeBatchEncodable { batch_id: u32, - in_instruction_succeededs: Vec, + in_instruction_results: Vec, burns: Vec, key_to_activate: Option>, } @@ -25,7 +25,7 @@ enum ActionEncodable { pub(crate) struct AcknowledgeBatch { pub(crate) batch_id: u32, - pub(crate) in_instruction_succeededs: Vec, + pub(crate) in_instruction_results: Vec, pub(crate) burns: Vec, pub(crate) key_to_activate: Option>, } @@ -46,7 +46,7 @@ impl SubstrateDb { pub(crate) fn queue_acknowledge_batch( txn: &mut impl DbTxn, batch_id: u32, - in_instruction_succeededs: Vec, + in_instruction_results: Vec, burns: Vec, key_to_activate: Option>, ) { @@ -54,7 +54,7 @@ impl SubstrateDb { txn, &ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { batch_id, - in_instruction_succeededs, + in_instruction_results, burns, key_to_activate: key_to_activate.map(|key| key.to_bytes().as_ref().to_vec()), }), @@ -69,12 +69,12 @@ impl SubstrateDb { Some(match action_encodable { ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { batch_id, - in_instruction_succeededs, + in_instruction_results, burns, key_to_activate, }) => Action::AcknowledgeBatch(AcknowledgeBatch { batch_id, - in_instruction_succeededs, + in_instruction_results, burns, key_to_activate: key_to_activate.map(|key| { let mut repr = as GroupEncoding>::Repr::default(); diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs index 89186c69f..ce28470d4 100644 --- a/processor/scanner/src/substrate/mod.rs +++ b/processor/scanner/src/substrate/mod.rs @@ -16,14 +16,14 @@ use db::*; pub(crate) fn queue_acknowledge_batch( txn: &mut impl DbTxn, batch_id: u32, - in_instruction_succeededs: Vec, + in_instruction_results: Vec, burns: Vec, key_to_activate: Option>, ) { SubstrateDb::::queue_acknowledge_batch( txn, batch_id, - in_instruction_succeededs, + in_instruction_results, burns, key_to_activate, ) @@ -67,7 +67,7 @@ impl ContinuallyRan for SubstrateTask { match action { Action::AcknowledgeBatch(AcknowledgeBatch { batch_id, - in_instruction_succeededs, + in_instruction_results, mut burns, key_to_activate, }) => { @@ -127,16 +127,16 @@ impl ContinuallyRan for SubstrateTask { let return_information = report::take_return_information::(&mut txn, batch_id) .expect("didn't save the return information for Batch we published"); assert_eq!( - in_instruction_succeededs.len(), + in_instruction_results.len(), return_information.len(), "amount of InInstruction succeededs differed from amount of return information saved" ); // We map these into standard Burns - for (succeeded, return_information) in - in_instruction_succeededs.into_iter().zip(return_information) + for (result, return_information) in + in_instruction_results.into_iter().zip(return_information) { - if succeeded { + if result == messages::substrate::InInstructionResult::Succeeded { continue; } From 3385c266da36e1c7f386f0f30f7831346a747189 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 03:18:14 -0400 Subject: [PATCH 164/179] Mark files in TODO/ with "TODO" to ensure it pops up on search --- Cargo.lock | 1 + processor/ethereum/TODO/old_processor.rs | 82 +----------------------- processor/ethereum/TODO/tests/crypto.rs | 2 + processor/ethereum/TODO/tests/mod.rs | 2 + processor/ethereum/TODO/tests/router.rs | 2 + 5 files changed, 8 insertions(+), 81 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00cb2ac50..065432d0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8801,6 +8801,7 @@ dependencies = [ "serai-db", "serai-in-instructions-primitives", "serai-primitives", + "serai-processor-messages", "serai-processor-primitives", "serai-processor-scheduler-primitives", "tokio", diff --git a/processor/ethereum/TODO/old_processor.rs b/processor/ethereum/TODO/old_processor.rs index 2e2daa3e4..50250c435 100644 --- a/processor/ethereum/TODO/old_processor.rs +++ b/processor/ethereum/TODO/old_processor.rs @@ -1,83 +1,4 @@ -#[async_trait] -impl Network for Ethereum { - async fn get_outputs( - &self, - block: &Self::Block, - _: ::G, - ) -> Vec { - let router = self.router().await; - let router = router.as_ref().unwrap(); - // Grab the key at the end of the epoch - let key_at_end_of_block = loop { - match router.key_at_end_of_block(block.start + 31).await { - Ok(Some(key)) => break key, - Ok(None) => return vec![], - Err(e) => { - log::error!("couldn't connect to router for the key at the end of the block: {e:?}"); - sleep(Duration::from_secs(5)).await; - continue; - } - } - }; - - let mut all_events = vec![]; - let mut top_level_txids = HashSet::new(); - for erc20_addr in [DAI] { - let erc20 = Erc20::new(self.provider.clone(), erc20_addr); - - for block in block.start .. (block.start + 32) { - let transfers = loop { - match erc20.top_level_transfers(block, router.address()).await { - Ok(transfers) => break transfers, - Err(e) => { - log::error!("couldn't connect to Ethereum node for the top-level transfers: {e:?}"); - sleep(Duration::from_secs(5)).await; - continue; - } - } - }; - - for transfer in transfers { - top_level_txids.insert(transfer.id); - all_events.push(EthereumInInstruction { - id: (transfer.id, 0), - from: transfer.from, - coin: EthereumCoin::Erc20(erc20_addr), - amount: transfer.amount, - data: transfer.data, - key_at_end_of_block, - }); - } - } - } - - for block in block.start .. (block.start + 32) { - let mut events = router.in_instructions(block, &HashSet::from([DAI])).await; - while let Err(e) = events { - log::error!("couldn't connect to Ethereum node for the Router's events: {e:?}"); - sleep(Duration::from_secs(5)).await; - events = router.in_instructions(block, &HashSet::from([DAI])).await; - } - let mut events = events.unwrap(); - for event in &mut events { - // A transaction should either be a top-level transfer or a Router InInstruction - if top_level_txids.contains(&event.id.0) { - panic!("top-level transfer had {} and router had {:?}", hex::encode(event.id.0), event); - } - // Overwrite the key at end of block to key at end of epoch - event.key_at_end_of_block = key_at_end_of_block; - } - all_events.extend(events); - } - - for event in &all_events { - assert!( - coin_to_serai_coin(&event.coin).is_some(), - "router yielded events for unrecognized coins" - ); - } - all_events - } +TODO async fn publish_completion( &self, @@ -255,4 +176,3 @@ impl Network for Ethereum { // Yield the freshly mined block self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() } -} diff --git a/processor/ethereum/TODO/tests/crypto.rs b/processor/ethereum/TODO/tests/crypto.rs index a4f86ae9f..20ba40b8d 100644 --- a/processor/ethereum/TODO/tests/crypto.rs +++ b/processor/ethereum/TODO/tests/crypto.rs @@ -1,3 +1,5 @@ +// TODO + use rand_core::OsRng; use group::ff::{Field, PrimeField}; diff --git a/processor/ethereum/TODO/tests/mod.rs b/processor/ethereum/TODO/tests/mod.rs index 91b03d9b7..a865868f6 100644 --- a/processor/ethereum/TODO/tests/mod.rs +++ b/processor/ethereum/TODO/tests/mod.rs @@ -1,3 +1,5 @@ +// TODO + use std::{sync::Arc, collections::HashMap}; use rand_core::OsRng; diff --git a/processor/ethereum/TODO/tests/router.rs b/processor/ethereum/TODO/tests/router.rs index 724348cc3..63e5f1d53 100644 --- a/processor/ethereum/TODO/tests/router.rs +++ b/processor/ethereum/TODO/tests/router.rs @@ -1,3 +1,5 @@ +// TODO + use std::{convert::TryFrom, sync::Arc, collections::HashMap}; use rand_core::OsRng; From dd9f6c0ece533b2dfeee52910d135e2aa6152783 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 21:19:34 -0400 Subject: [PATCH 165/179] Update to the latest bitcoin-serai --- Cargo.lock | 2 -- processor/bitcoin/Cargo.toml | 2 -- processor/bitcoin/src/key_gen.rs | 35 ------------------- .../bitcoin/src/primitives/transaction.rs | 3 +- 4 files changed, 1 insertion(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 065432d0e..12da8dd69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8106,9 +8106,7 @@ dependencies = [ "borsh", "ciphersuite", "dkg", - "flexible-transcript", "hex", - "k256", "log", "modular-frost", "parity-scale-codec", diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml index 2a69d2343..90b9566b6 100644 --- a/processor/bitcoin/Cargo.toml +++ b/processor/bitcoin/Cargo.toml @@ -23,8 +23,6 @@ hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } -k256 = { version = "0.13", default-features = false, features = ["std"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } diff --git a/processor/bitcoin/src/key_gen.rs b/processor/bitcoin/src/key_gen.rs index bc911676a..415441348 100644 --- a/processor/bitcoin/src/key_gen.rs +++ b/processor/bitcoin/src/key_gen.rs @@ -1,8 +1,6 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; use frost::ThresholdKeys; -use bitcoin_serai::bitcoin::{hashes::Hash, TapTweakHash}; - use crate::{primitives::x_coord_to_even_point, scan::scanner}; pub(crate) struct KeyGenParams; @@ -12,39 +10,6 @@ impl key_gen::KeyGenParams for KeyGenParams { type ExternalNetworkCiphersuite = Secp256k1; fn tweak_keys(keys: &mut ThresholdKeys) { - /* - Offset the keys by their hash to prevent a malicious participant from inserting a script - path, as specified in - https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki#cite_note-23 - - This isn't exactly the same, as we then increment the key until it happens to be even, yet - the goal is simply that someone who biases the key-gen can't insert their own script path. - By adding the hash of the key to the key, anyone who attempts such bias will change the key - used (changing the bias necessary). - - This is also potentially unnecessary for Serai, which uses an eVRF-based DKG. While that can - be biased (by manipulating who participates as we use it robustly and only require `t` - participants), contributions cannot be arbitrarily defined. That presumably requires - performing a search of the possible keys for some collision with 2**128 work. It's better to - offset regardless and avoid this question however. - */ - { - use k256::elliptic_curve::{ - bigint::{Encoding, U256}, - ops::Reduce, - }; - let tweak_hash = TapTweakHash::hash(&keys.group_key().to_bytes().as_slice()[1 ..]); - /* - https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki#cite_ref-13-0 states how the - bias is negligible. This reduction shouldn't ever occur, yet if it did, the script path - would be unusable due to a check the script path hash is less than the order. That doesn't - impact us as we don't want the script path to be usable. - */ - *keys = keys.offset(::F::reduce(U256::from_be_bytes( - *tweak_hash.to_raw_hash().as_ref(), - ))); - } - *keys = bitcoin_serai::wallet::tweak_keys(keys); // Also create a scanner to assert these keys, and all expected paths, are usable scanner(keys.group_key()); diff --git a/processor/bitcoin/src/primitives/transaction.rs b/processor/bitcoin/src/primitives/transaction.rs index 8e7a26f6c..9b81d2f0c 100644 --- a/processor/bitcoin/src/primitives/transaction.rs +++ b/processor/bitcoin/src/primitives/transaction.rs @@ -2,7 +2,6 @@ use std::io; use rand_core::{RngCore, CryptoRng}; -use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::Secp256k1; use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; @@ -81,7 +80,7 @@ impl PreprocessMachine for ClonableTransctionMachine { .0 .signable() .expect("signing an invalid SignableTransaction") - .multisig(&self.1, RecommendedTranscript::new(b"Serai Processor Bitcoin Transaction")) + .multisig(&self.1) .expect("incorrect keys used for SignableTransaction") .preprocess(rng) } From 1cb82d7516174a7908645534adbe5d0ec6d19d3a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 23:23:41 -0400 Subject: [PATCH 166/179] Correct forge fmt config --- .github/workflows/lint.yml | 2 +- networks/ethereum/schnorr/contracts/Schnorr.sol | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 63a676498..b994a3cbd 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -80,7 +80,7 @@ jobs: cache: false - name: Run forge fmt - run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TABLE_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol") + run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol") machete: runs-on: ubuntu-latest diff --git a/networks/ethereum/schnorr/contracts/Schnorr.sol b/networks/ethereum/schnorr/contracts/Schnorr.sol index 247e0fbe9..7405051ac 100644 --- a/networks/ethereum/schnorr/contracts/Schnorr.sol +++ b/networks/ethereum/schnorr/contracts/Schnorr.sol @@ -15,11 +15,7 @@ library Schnorr { // message := the message signed // c := Schnorr signature challenge // s := Schnorr signature solution - function verify(bytes32 px, bytes32 message, bytes32 c, bytes32 s) - internal - pure - returns (bool) - { + function verify(bytes32 px, bytes32 message, bytes32 c, bytes32 s) internal pure returns (bool) { // ecrecover = (m, v, r, s) -> key // We instead pass the following to obtain the nonce (not the key) // Then we hash it and verify it matches the challenge From 30e3a0c239dcb76e59ce904342e64ff4fbe1f2f9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 19 Sep 2024 23:24:20 -0400 Subject: [PATCH 167/179] Update the Router smart contract to pay fees to the caller The caller is paid a fixed fee per unit of gas spent. That arguably incentivizes the publisher to raise the gas used by internal calls, yet this doesn't effect the user UX as they'll have flatly paid the worst-case fee already. It does pose a risk where callers are arguably incentivized to cause transaction failures which consume all the gas, not just increased gas, yet: 1) Modern smart contracts don't error by consuming all the gas 2) This is presumably infeasible 3) Even if it was feasible, the gas fees gained presumably exceed the gas fees spent causing the failure The benefit to only paying the callers for the gas used, not the gas alotted, is it allows Serai to build up a buffer. While this should be minor, a few cents on every transaction at best, if we ever do have any costs slip through the cracks, it ideally is sufficient to handle those. --- .../ethereum/router/contracts/Router.sol | 118 +++++++++++------- processor/ethereum/router/src/lib.rs | 55 +++++--- 2 files changed, 108 insertions(+), 65 deletions(-) diff --git a/processor/ethereum/router/contracts/Router.sol b/processor/ethereum/router/contracts/Router.sol index bc0debde5..a74f82572 100644 --- a/processor/ethereum/router/contracts/Router.sol +++ b/processor/ethereum/router/contracts/Router.sol @@ -27,14 +27,13 @@ contract Router { } struct CodeDestination { - uint32 gas; + uint32 gas_limit; bytes code; } struct OutInstruction { DestinationType destinationType; bytes destination; - address coin; uint256 value; } @@ -79,7 +78,8 @@ contract Router { { // This DST needs a length prefix as well to prevent DSTs potentially being substrings of each // other, yet this fine for our very well-defined, limited use - bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", block.chainid, _nonce, newSeraiKey)); + bytes32 message = + keccak256(abi.encodePacked("updateSeraiKey", block.chainid, _nonce, newSeraiKey)); _nonce++; if (!Schnorr.verify(_seraiKey, message, signature.c, signature.s)) { @@ -89,9 +89,7 @@ contract Router { function inInstruction(address coin, uint256 amount, bytes memory instruction) external payable { if (coin == address(0)) { - if (amount != msg.value) { - revert InvalidAmount(); - } + if (amount != msg.value) revert InvalidAmount(); } else { (bool success, bytes memory res) = address(coin).call( abi.encodeWithSelector(IERC20.transferFrom.selector, msg.sender, address(this), amount) @@ -100,32 +98,30 @@ contract Router { // Require there was nothing returned, which is done by some non-standard tokens, or that the // ERC20 contract did in fact return true bool nonStandardResOrTrue = (res.length == 0) || abi.decode(res, (bool)); - if (!(success && nonStandardResOrTrue)) { - revert FailedTransfer(); - } + if (!(success && nonStandardResOrTrue)) revert FailedTransfer(); } /* - Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. The amount - instructed to be transferred may not actually be the amount transferred. - - If we add nonReentrant to every single function which can effect the balance, we can check the - amount exactly matches. This prevents transfers of less value than expected occurring, at - least, not without an additional transfer to top up the difference (which isn't routed through - this contract and accordingly isn't trying to artificially create events from this contract). - - If we don't add nonReentrant, a transfer can be started, and then a new transfer for the - difference can follow it up (again and again until a rounding error is reached). This contract - would believe all transfers were done in full, despite each only being done in part (except - for the last one). - - Given fee-on-transfer tokens aren't intended to be supported, the only token actively planned - to be supported is Dai and it doesn't have any fee-on-transfer logic, and how fee-on-transfer - tokens aren't even able to be supported at this time by the larger Serai network, we simply - classify this entire class of tokens as non-standard implementations which induce undefined - behavior. - - It is the Serai network's role not to add support for any non-standard implementations. + Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. The amount + instructed to be transferred may not actually be the amount transferred. + + If we add nonReentrant to every single function which can effect the balance, we can check the + amount exactly matches. This prevents transfers of less value than expected occurring, at + least, not without an additional transfer to top up the difference (which isn't routed through + this contract and accordingly isn't trying to artificially create events from this contract). + + If we don't add nonReentrant, a transfer can be started, and then a new transfer for the + difference can follow it up (again and again until a rounding error is reached). This contract + would believe all transfers were done in full, despite each only being done in part (except + for the last one). + + Given fee-on-transfer tokens aren't intended to be supported, the only token actively planned + to be supported is Dai and it doesn't have any fee-on-transfer logic, and how fee-on-transfer + tokens aren't even able to be supported at this time by the larger Serai network, we simply + classify this entire class of tokens as non-standard implementations which induce undefined + behavior. + + It is the Serai network's role not to add support for any non-standard implementations. */ emit InInstruction(msg.sender, coin, amount, instruction); } @@ -133,13 +129,13 @@ contract Router { // Perform a transfer out function _transferOut(address to, address coin, uint256 value) private { /* - We on purposely do not check if these calls succeed. A call either succeeded, and there's no - problem, or the call failed due to: - A) An insolvency - B) A malicious receiver - C) A non-standard token - A is an invariant, B should be dropped, C is something out of the control of this contract. - It is again the Serai's network role to not add support for any non-standard tokens, + We on purposely do not check if these calls succeed. A call either succeeded, and there's no + problem, or the call failed due to: + A) An insolvency + B) A malicious receiver + C) A non-standard token + A is an invariant, B should be dropped, C is something out of the control of this contract. + It is again the Serai's network role to not add support for any non-standard tokens, */ if (coin == address(0)) { // Enough gas to service the transfer and a minimal amount of logic @@ -151,9 +147,9 @@ contract Router { } /* - Serai supports arbitrary calls out via deploying smart contracts (with user-specified code), - letting them execute whatever calls they're coded for. Since we can't meter CREATE, we call - CREATE from this function which we call not internally, but with CALL (which we can meter). + Serai supports arbitrary calls out via deploying smart contracts (with user-specified code), + letting them execute whatever calls they're coded for. Since we can't meter CREATE, we call + CREATE from this function which we call not internally, but with CALL (which we can meter). */ function arbitaryCallOut(bytes memory code) external { // Because we're creating a contract, increment our nonce @@ -166,12 +162,20 @@ contract Router { } // Execute a list of transactions if they were signed by the current key with the current nonce - function execute(OutInstruction[] calldata transactions, Signature calldata signature) external { + function execute( + address coin, + uint256 fee_per_gas, + OutInstruction[] calldata transactions, + Signature calldata signature + ) external { + uint256 gasLeftAtStart = gasleft(); + // Verify the signature // We hash the message here as we need the message's hash for the Executed event // Since we're already going to hash it, hashing it prior to verifying the signature reduces the // amount of words hashed by its challenge function (reducing our gas costs) - bytes32 message = keccak256(abi.encode("execute", block.chainid, _nonce, transactions)); + bytes32 message = + keccak256(abi.encode("execute", block.chainid, _nonce, coin, fee_per_gas, transactions)); if (!Schnorr.verify(_seraiKey, message, signature.c, signature.s)) { revert InvalidSignature(); } @@ -187,8 +191,9 @@ contract Router { if (transactions[i].destinationType == DestinationType.Address) { // This may cause a panic and the contract to become stuck if the destination isn't actually // 20 bytes. Serai is trusted to not pass a malformed destination - (AddressDestination memory destination) = abi.decode(transactions[i].destination, (AddressDestination)); - _transferOut(destination.destination, transactions[i].coin, transactions[i].value); + (AddressDestination memory destination) = + abi.decode(transactions[i].destination, (AddressDestination)); + _transferOut(destination.destination, coin, transactions[i].value); } else { // The destination is a piece of initcode. We calculate the hash of the will-be contract, // transfer to it, and then run the initcode @@ -196,15 +201,36 @@ contract Router { address(uint160(uint256(keccak256(abi.encode(address(this), _smartContractNonce))))); // Perform the transfer - _transferOut(nextAddress, transactions[i].coin, transactions[i].value); + _transferOut(nextAddress, coin, transactions[i].value); // Perform the calls with a set gas budget - (CodeDestination memory destination) = abi.decode(transactions[i].destination, (CodeDestination)); - address(this).call{ gas: destination.gas }( + (CodeDestination memory destination) = + abi.decode(transactions[i].destination, (CodeDestination)); + address(this).call{ gas: destination.gas_limit }( abi.encodeWithSelector(Router.arbitaryCallOut.selector, destination.code) ); } } + + // Calculate the gas which will be used to transfer the fee out + // This is meant to be always over, never under, with any excess being a tip to the publisher + uint256 gasToTransferOut; + if (coin == address(0)) { + // 5,000 gas is explicitly allowed, with another 10,000 for whatever overhead remains + // unaccounted for + gasToTransferOut = 15_000; + } else { + // 100_000 gas is explicitly allowed, with another 15,000 for whatever overhead remains + // unaccounted for. More gas is given than for ETH due to needing to ABI encode the function + // call + gasToTransferOut = 115_000; + } + + // Calculate the gas used + uint256 gasLeftAtEnd = gasleft(); + uint256 gasUsed = gasLeftAtStart - gasLeftAtEnd; + // Transfer to the caller the fee + _transferOut(msg.sender, coin, (gasUsed + gasToTransferOut) * fee_per_gas); } function nonce() external view returns (uint256) { diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs index d56c514f1..32fcc449f 100644 --- a/processor/ethereum/router/src/lib.rs +++ b/processor/ethereum/router/src/lib.rs @@ -47,7 +47,7 @@ impl From<&Signature> for abi::Signature { } /// A coin on Ethereum. -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum Coin { /// Ether, the native coin of Ethereum. Ether, @@ -56,6 +56,14 @@ pub enum Coin { } impl Coin { + fn address(&self) -> Address { + (match self { + Coin::Ether => [0; 20], + Coin::Erc20(address) => *address, + }) + .into() + } + /// Read a `Coin`. pub fn read(reader: &mut R) -> io::Result { let mut kind = [0xff]; @@ -152,12 +160,12 @@ impl InInstruction { /// A list of `OutInstruction`s. #[derive(Clone)] pub struct OutInstructions(Vec); -impl From<&[(SeraiAddress, (Coin, U256))]> for OutInstructions { - fn from(outs: &[(SeraiAddress, (Coin, U256))]) -> Self { +impl From<&[(SeraiAddress, U256)]> for OutInstructions { + fn from(outs: &[(SeraiAddress, U256)]) -> Self { Self( outs .iter() - .map(|(address, (coin, amount))| { + .map(|(address, amount)| { #[allow(non_snake_case)] let (destinationType, destination) = match address { SeraiAddress::Address(address) => ( @@ -166,19 +174,14 @@ impl From<&[(SeraiAddress, (Coin, U256))]> for OutInstructions { ), SeraiAddress::Contract(contract) => ( abi::DestinationType::Code, - (abi::CodeDestination { gas: contract.gas(), code: contract.code().to_vec().into() }) - .abi_encode(), + (abi::CodeDestination { + gas_limit: contract.gas_limit(), + code: contract.code().to_vec().into(), + }) + .abi_encode(), ), }; - abi::OutInstruction { - destinationType, - destination: destination.into(), - coin: match coin { - Coin::Ether => [0; 20].into(), - Coin::Erc20(address) => address.into(), - }, - value: *amount, - } + abi::OutInstruction { destinationType, destination: destination.into(), value: *amount } }) .collect(), ) @@ -318,17 +321,31 @@ impl Router { } /// Get the message to be signed in order to execute a series of `OutInstruction`s. - pub fn execute_message(chain_id: U256, nonce: u64, outs: OutInstructions) -> Vec { - ("execute", chain_id, U256::try_from(nonce).expect("couldn't convert u64 to u256"), outs.0) + pub fn execute_message( + chain_id: U256, + nonce: u64, + coin: Coin, + fee_per_gas: U256, + outs: OutInstructions, + ) -> Vec { + ("execute", chain_id, U256::try_from(nonce).unwrap(), coin.address(), fee_per_gas, outs.0) .abi_encode() } /// Construct a transaction to execute a batch of `OutInstruction`s. - pub fn execute(&self, outs: OutInstructions, sig: &Signature) -> TxLegacy { + pub fn execute( + &self, + coin: Coin, + fee_per_gas: U256, + outs: OutInstructions, + sig: &Signature, + ) -> TxLegacy { let outs_len = outs.0.len(); TxLegacy { to: TxKind::Call(self.1), - input: abi::executeCall::new((outs.0, sig.into())).abi_encode().into(), + input: abi::executeCall::new((coin.address(), fee_per_gas, outs.0, sig.into())) + .abi_encode() + .into(), // TODO gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs_len).unwrap()), ..Default::default() From d2d41584680490d321628342537190e324a55bfd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 00:12:54 -0400 Subject: [PATCH 168/179] Have the Ethereum scheduler create Batches as necessary Also introduces the fee logic, despite it being stubbed. --- .../ethereum/src/primitives/transaction.rs | 38 ++++---- processor/ethereum/src/publisher.rs | 4 +- processor/ethereum/src/scheduler.rs | 94 ++++++++++++++++--- substrate/client/src/networks/ethereum.rs | 41 +++++--- 4 files changed, 132 insertions(+), 45 deletions(-) diff --git a/processor/ethereum/src/primitives/transaction.rs b/processor/ethereum/src/primitives/transaction.rs index 525953759..67f17d316 100644 --- a/processor/ethereum/src/primitives/transaction.rs +++ b/processor/ethereum/src/primitives/transaction.rs @@ -18,7 +18,7 @@ use crate::{output::OutputId, machine::ClonableTransctionMachine}; #[derive(Clone, PartialEq, Debug)] pub(crate) enum Action { SetKey { chain_id: U256, nonce: u64, key: PublicKey }, - Batch { chain_id: U256, nonce: u64, outs: Vec<(Address, (Coin, U256))> }, + Batch { chain_id: U256, nonce: u64, coin: Coin, fee_per_gas: U256, outs: Vec<(Address, U256)> }, } #[derive(Clone, PartialEq, Eq, Debug)] @@ -36,9 +36,13 @@ impl Action { Action::SetKey { chain_id, nonce, key } => { Router::update_serai_key_message(*chain_id, *nonce, key) } - Action::Batch { chain_id, nonce, outs } => { - Router::execute_message(*chain_id, *nonce, OutInstructions::from(outs.as_ref())) - } + Action::Batch { chain_id, nonce, coin, fee_per_gas, outs } => Router::execute_message( + *chain_id, + *nonce, + *coin, + *fee_per_gas, + OutInstructions::from(outs.as_ref()), + ), } } @@ -47,13 +51,9 @@ impl Action { Self::SetKey { chain_id: _, nonce, key } => { Executed::SetKey { nonce: *nonce, key: key.eth_repr() } } - Self::Batch { chain_id, nonce, outs } => Executed::Batch { + Self::Batch { nonce, .. } => Executed::Batch { nonce: *nonce, - message_hash: keccak256(Router::execute_message( - *chain_id, - *nonce, - OutInstructions::from(outs.as_ref()), - )), + message_hash: keccak256(self.message()), }, }) } @@ -104,6 +104,12 @@ impl SignableTransaction for Action { Action::SetKey { chain_id, nonce, key } } 1 => { + let coin = Coin::read(reader)?; + + let mut fee_per_gas = [0; 32]; + reader.read_exact(&mut fee_per_gas)?; + let fee_per_gas = U256::from_le_bytes(fee_per_gas); + let mut outs_len = [0; 4]; reader.read_exact(&mut outs_len)?; let outs_len = usize::try_from(u32::from_le_bytes(outs_len)).unwrap(); @@ -111,15 +117,14 @@ impl SignableTransaction for Action { let mut outs = vec![]; for _ in 0 .. outs_len { let address = borsh::from_reader(reader)?; - let coin = Coin::read(reader)?; let mut amount = [0; 32]; reader.read_exact(&mut amount)?; let amount = U256::from_le_bytes(amount); - outs.push((address, (coin, amount))); + outs.push((address, amount)); } - Action::Batch { chain_id, nonce, outs } + Action::Batch { chain_id, nonce, coin, fee_per_gas, outs } } _ => unreachable!(), }) @@ -132,14 +137,15 @@ impl SignableTransaction for Action { writer.write_all(&nonce.to_le_bytes())?; writer.write_all(&key.eth_repr()) } - Self::Batch { chain_id, nonce, outs } => { + Self::Batch { chain_id, nonce, coin, fee_per_gas, outs } => { writer.write_all(&[1])?; writer.write_all(&chain_id.as_le_bytes())?; writer.write_all(&nonce.to_le_bytes())?; + coin.write(writer)?; + writer.write_all(&fee_per_gas.as_le_bytes())?; writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; - for (address, (coin, amount)) in outs { + for (address, amount) in outs { borsh::BorshSerialize::serialize(address, writer)?; - coin.write(writer)?; writer.write_all(&amount.as_le_bytes())?; } Ok(()) diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs index 4a62bad72..a49ea67f8 100644 --- a/processor/ethereum/src/publisher.rs +++ b/processor/ethereum/src/publisher.rs @@ -89,8 +89,8 @@ impl signers::TransactionPublisher for TransactionPublisher< // Convert from an Action (an internal representation of a signable event) to a TxLegacy let tx = match tx.0 { Action::SetKey { chain_id: _, nonce: _, key } => router.update_serai_key(&key, &tx.1), - Action::Batch { chain_id: _, nonce: _, outs } => { - router.execute(OutInstructions::from(outs.as_ref()), &tx.1) + Action::Batch { chain_id: _, nonce: _, coin, fee_per_gas, outs } => { + router.execute(coin, fee_per_gas, OutInstructions::from(outs.as_ref()), &tx.1) } }; diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs index 55e091fc1..5a3fd4286 100644 --- a/processor/ethereum/src/scheduler.rs +++ b/processor/ethereum/src/scheduler.rs @@ -1,6 +1,11 @@ +use std::collections::HashMap; + use alloy_core::primitives::U256; -use serai_client::primitives::{NetworkId, Coin, Balance}; +use serai_client::{ + primitives::{NetworkId, Coin, Balance}, + networks::ethereum::Address, +}; use serai_db::Db; @@ -53,27 +58,86 @@ impl smart_contract_scheduler::SmartContract> for SmartContract { fn fulfill( &self, - nonce: u64, + mut nonce: u64, _key: KeyFor>, payments: Vec>>>, ) -> Vec<(Self::SignableTransaction, EventualityFor>)> { - let mut outs = Vec::with_capacity(payments.len()); + // Sort by coin + let mut outs = HashMap::<_, _>::new(); for payment in payments { - outs.push(( - payment.address().clone(), - ( - coin_to_ethereum_coin(payment.balance().coin), - balance_to_ethereum_amount(payment.balance()), - ), - )); + let coin = payment.balance().coin; + outs + .entry(coin) + .or_insert_with(|| Vec::with_capacity(1)) + .push((payment.address().clone(), balance_to_ethereum_amount(payment.balance()))); } - // TODO: Per-batch gas limit - // TODO: Create several batches - // TODO: Handle fees - let action = Action::Batch { chain_id: self.chain_id, nonce, outs }; + let mut res = vec![]; + for coin in [Coin::Ether, Coin::Dai] { + let Some(outs) = outs.remove(&coin) else { continue }; + assert!(!outs.is_empty()); + + let fee_per_gas: U256 = todo!("TODO"); + + // The gas required to perform any interaction with the Router. + const BASE_GAS: u32 = 0; // TODO + + // The gas required to handle an additional payment to an address, in the worst case. + const ADDRESS_PAYMENT_GAS: u32 = 0; // TODO + + // The gas required to handle an additional payment to an smart contract, in the worst case. + // This does not include the explicit gas budget defined within the address specification. + const CONTRACT_PAYMENT_GAS: u32 = 0; // TODO + + // The maximum amount of gas for a batch. + const BATCH_GAS_LIMIT: u32 = 10_000_000; + + // Split these outs into batches, respecting BATCH_GAS_LIMIT + let mut batches = vec![vec![]]; + let mut current_gas = BASE_GAS; + for out in outs { + let payment_gas = match out.0 { + Address::Address(_) => ADDRESS_PAYMENT_GAS, + Address::Contract(deployment) => CONTRACT_PAYMENT_GAS + deployment.gas_limit(), + }; + if (current_gas + payment_gas) > BATCH_GAS_LIMIT { + assert!(!batches.last().unwrap().is_empty()); + batches.push(vec![]); + current_gas = BASE_GAS; + } + batches.last_mut().unwrap().push(out); + current_gas += payment_gas; + } + + // Push each batch onto the result + for outs in batches { + let base_gas = BASE_GAS.div_ceil(u32::try_from(outs.len()).unwrap()); + // Deduce the fee from each out + for out in &mut outs { + let payment_gas = base_gas + + match out.0 { + Address::Address(_) => ADDRESS_PAYMENT_GAS, + Address::Contract(deployment) => CONTRACT_PAYMENT_GAS + deployment.gas_limit(), + }; + + let payment_gas_cost = fee_per_gas * U256::try_from(payment_gas).unwrap(); + out.1 -= payment_gas_cost; + } + + res.push(Action::Batch { + chain_id: self.chain_id, + nonce, + coin: coin_to_ethereum_coin(coin), + fee_per_gas, + outs, + }); + nonce += 1; + } + } + // Ensure we handled all payments we're supposed to + assert!(outs.is_empty()); - vec![(action.clone(), action.eventuality())] + res.into_iter().map(|action| (action.clone(), action.eventuality())).collect() } } diff --git a/substrate/client/src/networks/ethereum.rs b/substrate/client/src/networks/ethereum.rs index ddf15480c..47b58af56 100644 --- a/substrate/client/src/networks/ethereum.rs +++ b/substrate/client/src/networks/ethereum.rs @@ -5,13 +5,18 @@ use borsh::{BorshSerialize, BorshDeserialize}; use crate::primitives::{MAX_ADDRESS_LEN, ExternalAddress}; +/// THe maximum amount of gas an address is allowed to specify as its gas limit. +/// +/// Payments to an address with a gas limit which exceed this value will be dropped entirely. +pub const ADDRESS_GAS_LIMIT: u32 = 950_000; + #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct ContractDeployment { /// The gas limit to use for this contract's execution. /// /// THis MUST be less than the Serai gas limit. The cost of it will be deducted from the amount /// transferred. - gas: u32, + gas_limit: u32, /// The initialization code of the contract to deploy. /// /// This contract will be deployed (executing the initialization code). No further calls will @@ -21,17 +26,23 @@ pub struct ContractDeployment { /// A contract to deploy, enabling executing arbitrary code. impl ContractDeployment { - pub fn new(gas: u32, code: Vec) -> Option { + pub fn new(gas_limit: u32, code: Vec) -> Option { + // Check the gas limit is less the address gas limit + if gas_limit > ADDRESS_GAS_LIMIT { + None?; + } + // The max address length, minus the type byte, minus the size of the gas const MAX_CODE_LEN: usize = (MAX_ADDRESS_LEN as usize) - (1 + core::mem::size_of::()); if code.len() > MAX_CODE_LEN { None?; } - Some(Self { gas, code }) + + Some(Self { gas_limit, code }) } - pub fn gas(&self) -> u32 { - self.gas + pub fn gas_limit(&self) -> u32 { + self.gas_limit } pub fn code(&self) -> &[u8] { &self.code @@ -66,12 +77,18 @@ impl TryFrom for Address { Address::Address(address) } 1 => { - let mut gas = [0xff; 4]; - reader.read_exact(&mut gas).map_err(|_| ())?; - // The code is whatever's left since the ExternalAddress is a delimited container of - // appropriately bounded length + let mut gas_limit = [0xff; 4]; + reader.read_exact(&mut gas_limit).map_err(|_| ())?; Address::Contract(ContractDeployment { - gas: u32::from_le_bytes(gas), + gas_limit: { + let gas_limit = u32::from_le_bytes(gas_limit); + if gas_limit > ADDRESS_GAS_LIMIT { + Err(())?; + } + gas_limit + }, + // The code is whatever's left since the ExternalAddress is a delimited container of + // appropriately bounded length code: reader.to_vec(), }) } @@ -87,9 +104,9 @@ impl From
for ExternalAddress { res.push(0); res.extend(&address); } - Address::Contract(ContractDeployment { gas, code }) => { + Address::Contract(ContractDeployment { gas_limit, code }) => { res.push(1); - res.extend(&gas.to_le_bytes()); + res.extend(&gas_limit.to_le_bytes()); res.extend(&code); } } From 05e007fbd046d36fa43e002114b96bb8e443b918 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 00:15:08 -0400 Subject: [PATCH 169/179] Remove accidentally included bitcoin feature from processor-bin --- processor/bin/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/processor/bin/Cargo.toml b/processor/bin/Cargo.toml index f6da8b7c3..116916ab1 100644 --- a/processor/bin/Cargo.toml +++ b/processor/bin/Cargo.toml @@ -26,7 +26,7 @@ borsh = { version = "1", default-features = false, features = ["std", "derive", ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } -serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } +serai-client = { path = "../../substrate/client", default-features = false } log = { version = "0.4", default-features = false, features = ["std"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] } From c583a00abf7806350763214e2427234363db5125 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 00:20:05 -0400 Subject: [PATCH 170/179] Set a fixed fee transferred to the caller for publication Avoids the risk of the gas used by the contract exceeding the gas presumed to be used (causing an insolvency). --- .../ethereum/router/contracts/Router.sol | 25 +++---------------- processor/ethereum/router/src/lib.rs | 17 +++---------- .../ethereum/src/primitives/transaction.rs | 25 +++++++++---------- processor/ethereum/src/publisher.rs | 4 +-- processor/ethereum/src/scheduler.rs | 11 +++++--- 5 files changed, 28 insertions(+), 54 deletions(-) diff --git a/processor/ethereum/router/contracts/Router.sol b/processor/ethereum/router/contracts/Router.sol index a74f82572..c4c038e2a 100644 --- a/processor/ethereum/router/contracts/Router.sol +++ b/processor/ethereum/router/contracts/Router.sol @@ -164,18 +164,16 @@ contract Router { // Execute a list of transactions if they were signed by the current key with the current nonce function execute( address coin, - uint256 fee_per_gas, + uint256 fee, OutInstruction[] calldata transactions, Signature calldata signature ) external { - uint256 gasLeftAtStart = gasleft(); - // Verify the signature // We hash the message here as we need the message's hash for the Executed event // Since we're already going to hash it, hashing it prior to verifying the signature reduces the // amount of words hashed by its challenge function (reducing our gas costs) bytes32 message = - keccak256(abi.encode("execute", block.chainid, _nonce, coin, fee_per_gas, transactions)); + keccak256(abi.encode("execute", block.chainid, _nonce, coin, fee, transactions)); if (!Schnorr.verify(_seraiKey, message, signature.c, signature.s)) { revert InvalidSignature(); } @@ -212,25 +210,8 @@ contract Router { } } - // Calculate the gas which will be used to transfer the fee out - // This is meant to be always over, never under, with any excess being a tip to the publisher - uint256 gasToTransferOut; - if (coin == address(0)) { - // 5,000 gas is explicitly allowed, with another 10,000 for whatever overhead remains - // unaccounted for - gasToTransferOut = 15_000; - } else { - // 100_000 gas is explicitly allowed, with another 15,000 for whatever overhead remains - // unaccounted for. More gas is given than for ETH due to needing to ABI encode the function - // call - gasToTransferOut = 115_000; - } - - // Calculate the gas used - uint256 gasLeftAtEnd = gasleft(); - uint256 gasUsed = gasLeftAtStart - gasLeftAtEnd; // Transfer to the caller the fee - _transferOut(msg.sender, coin, (gasUsed + gasToTransferOut) * fee_per_gas); + _transferOut(msg.sender, coin, fee); } function nonce() external view returns (uint256) { diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs index 32fcc449f..248523b8d 100644 --- a/processor/ethereum/router/src/lib.rs +++ b/processor/ethereum/router/src/lib.rs @@ -325,27 +325,18 @@ impl Router { chain_id: U256, nonce: u64, coin: Coin, - fee_per_gas: U256, + fee: U256, outs: OutInstructions, ) -> Vec { - ("execute", chain_id, U256::try_from(nonce).unwrap(), coin.address(), fee_per_gas, outs.0) - .abi_encode() + ("execute", chain_id, U256::try_from(nonce).unwrap(), coin.address(), fee, outs.0).abi_encode() } /// Construct a transaction to execute a batch of `OutInstruction`s. - pub fn execute( - &self, - coin: Coin, - fee_per_gas: U256, - outs: OutInstructions, - sig: &Signature, - ) -> TxLegacy { + pub fn execute(&self, coin: Coin, fee: U256, outs: OutInstructions, sig: &Signature) -> TxLegacy { let outs_len = outs.0.len(); TxLegacy { to: TxKind::Call(self.1), - input: abi::executeCall::new((coin.address(), fee_per_gas, outs.0, sig.into())) - .abi_encode() - .into(), + input: abi::executeCall::new((coin.address(), fee, outs.0, sig.into())).abi_encode().into(), // TODO gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs_len).unwrap()), ..Default::default() diff --git a/processor/ethereum/src/primitives/transaction.rs b/processor/ethereum/src/primitives/transaction.rs index 67f17d316..6730e7a99 100644 --- a/processor/ethereum/src/primitives/transaction.rs +++ b/processor/ethereum/src/primitives/transaction.rs @@ -18,7 +18,7 @@ use crate::{output::OutputId, machine::ClonableTransctionMachine}; #[derive(Clone, PartialEq, Debug)] pub(crate) enum Action { SetKey { chain_id: U256, nonce: u64, key: PublicKey }, - Batch { chain_id: U256, nonce: u64, coin: Coin, fee_per_gas: U256, outs: Vec<(Address, U256)> }, + Batch { chain_id: U256, nonce: u64, coin: Coin, fee: U256, outs: Vec<(Address, U256)> }, } #[derive(Clone, PartialEq, Eq, Debug)] @@ -36,11 +36,11 @@ impl Action { Action::SetKey { chain_id, nonce, key } => { Router::update_serai_key_message(*chain_id, *nonce, key) } - Action::Batch { chain_id, nonce, coin, fee_per_gas, outs } => Router::execute_message( + Action::Batch { chain_id, nonce, coin, fee, outs } => Router::execute_message( *chain_id, *nonce, *coin, - *fee_per_gas, + *fee, OutInstructions::from(outs.as_ref()), ), } @@ -51,10 +51,9 @@ impl Action { Self::SetKey { chain_id: _, nonce, key } => { Executed::SetKey { nonce: *nonce, key: key.eth_repr() } } - Self::Batch { nonce, .. } => Executed::Batch { - nonce: *nonce, - message_hash: keccak256(self.message()), - }, + Self::Batch { nonce, .. } => { + Executed::Batch { nonce: *nonce, message_hash: keccak256(self.message()) } + } }) } } @@ -106,9 +105,9 @@ impl SignableTransaction for Action { 1 => { let coin = Coin::read(reader)?; - let mut fee_per_gas = [0; 32]; - reader.read_exact(&mut fee_per_gas)?; - let fee_per_gas = U256::from_le_bytes(fee_per_gas); + let mut fee = [0; 32]; + reader.read_exact(&mut fee)?; + let fee = U256::from_le_bytes(fee); let mut outs_len = [0; 4]; reader.read_exact(&mut outs_len)?; @@ -124,7 +123,7 @@ impl SignableTransaction for Action { outs.push((address, amount)); } - Action::Batch { chain_id, nonce, coin, fee_per_gas, outs } + Action::Batch { chain_id, nonce, coin, fee, outs } } _ => unreachable!(), }) @@ -137,12 +136,12 @@ impl SignableTransaction for Action { writer.write_all(&nonce.to_le_bytes())?; writer.write_all(&key.eth_repr()) } - Self::Batch { chain_id, nonce, coin, fee_per_gas, outs } => { + Self::Batch { chain_id, nonce, coin, fee, outs } => { writer.write_all(&[1])?; writer.write_all(&chain_id.as_le_bytes())?; writer.write_all(&nonce.to_le_bytes())?; coin.write(writer)?; - writer.write_all(&fee_per_gas.as_le_bytes())?; + writer.write_all(&fee.as_le_bytes())?; writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; for (address, amount) in outs { borsh::BorshSerialize::serialize(address, writer)?; diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs index a49ea67f8..3d18a6efe 100644 --- a/processor/ethereum/src/publisher.rs +++ b/processor/ethereum/src/publisher.rs @@ -89,8 +89,8 @@ impl signers::TransactionPublisher for TransactionPublisher< // Convert from an Action (an internal representation of a signable event) to a TxLegacy let tx = match tx.0 { Action::SetKey { chain_id: _, nonce: _, key } => router.update_serai_key(&key, &tx.1), - Action::Batch { chain_id: _, nonce: _, coin, fee_per_gas, outs } => { - router.execute(coin, fee_per_gas, OutInstructions::from(outs.as_ref()), &tx.1) + Action::Batch { chain_id: _, nonce: _, coin, fee, outs } => { + router.execute(coin, fee, OutInstructions::from(outs.as_ref()), &tx.1) } }; diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs index 5a3fd4286..f4c31ec60 100644 --- a/processor/ethereum/src/scheduler.rs +++ b/processor/ethereum/src/scheduler.rs @@ -111,16 +111,19 @@ impl smart_contract_scheduler::SmartContract> for SmartContract { // Push each batch onto the result for outs in batches { - let base_gas = BASE_GAS.div_ceil(u32::try_from(outs.len()).unwrap()); + let mut total_gas = 0; + + let base_gas_per_payment = BASE_GAS.div_ceil(u32::try_from(outs.len()).unwrap()); // Deduce the fee from each out for out in &mut outs { - let payment_gas = base_gas + + let payment_gas = base_gas_per_payment + match out.0 { Address::Address(_) => ADDRESS_PAYMENT_GAS, Address::Contract(deployment) => CONTRACT_PAYMENT_GAS + deployment.gas_limit(), }; + total_gas += payment_gas; - let payment_gas_cost = fee_per_gas * U256::try_from(payment_gas).unwrap(); + let payment_gas_cost = U256::try_from(payment_gas).unwrap() * fee_per_gas; out.1 -= payment_gas_cost; } @@ -128,7 +131,7 @@ impl smart_contract_scheduler::SmartContract> for SmartContract { chain_id: self.chain_id, nonce, coin: coin_to_ethereum_coin(coin), - fee_per_gas, + fee: U256::try_from(total_gas).unwrap() * fee_per_gas, outs, }); nonce += 1; From af9e73f8cb393f6ac94f74b70112397cec07c472 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 00:55:03 -0400 Subject: [PATCH 171/179] Add dummy fee values to the scheduler --- processor/ethereum/src/rpc.rs | 9 +++------ processor/ethereum/src/scheduler.rs | 18 ++++++++++++++---- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs index 0769c5c39..1eaa4988e 100644 --- a/processor/ethereum/src/rpc.rs +++ b/processor/ethereum/src/rpc.rs @@ -18,7 +18,7 @@ use ethereum_erc20::{TopLevelTransfer, Erc20}; use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction, Router}; use crate::{ - TOKENS, InitialSeraiKey, + TOKENS, ETHER_DUST, DAI_DUST, InitialSeraiKey, block::{Epoch, FullEpoch}, }; @@ -207,12 +207,9 @@ impl ScannerFeed for Rpc { fn dust(coin: Coin) -> Amount { assert_eq!(coin.network(), NetworkId::Ethereum); - #[allow(clippy::inconsistent_digit_grouping)] match coin { - // 5 USD if Ether is ~3300 USD - Coin::Ether => Amount(1_500_00), - // 5 DAI - Coin::Dai => Amount(5_000_000_00), + Coin::Ether => ETHER_DUST, + Coin::Dai => DAI_DUST, _ => unreachable!(), } } diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs index f4c31ec60..e8a437c1f 100644 --- a/processor/ethereum/src/scheduler.rs +++ b/processor/ethereum/src/scheduler.rs @@ -77,7 +77,17 @@ impl smart_contract_scheduler::SmartContract> for SmartContract { let Some(outs) = outs.remove(&coin) else { continue }; assert!(!outs.is_empty()); - let fee_per_gas: U256 = todo!("TODO"); + let fee_per_gas = match coin { + // 10 gwei + Coin::Ether => { + U256::try_from(10u64).unwrap() * alloy_core::primitives::utils::Unit::GWEI.wei() + } + // 0.0003 DAI + Coin::Dai => { + U256::try_from(30u64).unwrap() * alloy_core::primitives::utils::Unit::TWEI.wei() + } + _ => unreachable!(), + }; // The gas required to perform any interaction with the Router. const BASE_GAS: u32 = 0; // TODO @@ -96,7 +106,7 @@ impl smart_contract_scheduler::SmartContract> for SmartContract { let mut batches = vec![vec![]]; let mut current_gas = BASE_GAS; for out in outs { - let payment_gas = match out.0 { + let payment_gas = match &out.0 { Address::Address(_) => ADDRESS_PAYMENT_GAS, Address::Contract(deployment) => CONTRACT_PAYMENT_GAS + deployment.gas_limit(), }; @@ -110,14 +120,14 @@ impl smart_contract_scheduler::SmartContract> for SmartContract { } // Push each batch onto the result - for outs in batches { + for mut outs in batches { let mut total_gas = 0; let base_gas_per_payment = BASE_GAS.div_ceil(u32::try_from(outs.len()).unwrap()); // Deduce the fee from each out for out in &mut outs { let payment_gas = base_gas_per_payment + - match out.0 { + match &out.0 { Address::Address(_) => ADDRESS_PAYMENT_GAS, Address::Contract(deployment) => CONTRACT_PAYMENT_GAS + deployment.gas_limit(), }; From 7fd239b1c2550f13773f4ec6fed5008995701fd1 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 00:55:21 -0400 Subject: [PATCH 172/179] Report a Change Output with every Eventuality to ensure we don't fall out of synchrony --- processor/ethereum/src/primitives/block.rs | 36 +++++- processor/ethereum/src/primitives/mod.rs | 9 ++ processor/ethereum/src/primitives/output.rs | 116 +++++++++++++++----- processor/scanner/src/lib.rs | 4 +- 4 files changed, 132 insertions(+), 33 deletions(-) diff --git a/processor/ethereum/src/primitives/block.rs b/processor/ethereum/src/primitives/block.rs index 723e099d4..780837fae 100644 --- a/processor/ethereum/src/primitives/block.rs +++ b/processor/ethereum/src/primitives/block.rs @@ -61,7 +61,29 @@ impl primitives::Block for FullEpoch { // Associate all outputs with the latest active key // We don't associate these with the current key within the SC as that'll cause outputs to be // marked for forwarding if the SC is delayed to actually rotate - self.instructions.iter().cloned().map(|instruction| Output { key, instruction }).collect() + let mut outputs: Vec<_> = self + .instructions + .iter() + .cloned() + .map(|instruction| Output::Output { key, instruction }) + .collect(); + + /* + The scanner requires a change output be associated with every Eventuality that came from + fulfilling payments, unless said Eventuality descends from an Eventuality meeting that + requirement from the same fulfillment. This ensures we have a fully populated Eventualities + set by the time we process the block which has an Eventuality. + + Accordingly, for any block with an Eventuality completion, we claim there's a Change output + so that the block is flagged. Ethereum doesn't actually have Change outputs, yet the scanner + won't report them to Substrate, and the Smart Contract scheduler will drop any/all outputs + passed to it (handwaving their balances as present within the Smart Contract). + */ + if !self.executed.is_empty() { + outputs.push(Output::Eventuality { key, nonce: self.executed.first().unwrap().nonce() }); + } + + outputs } #[allow(clippy::type_complexity)] @@ -85,15 +107,17 @@ impl primitives::Block for FullEpoch { "Router emitted distinct event for nonce {}", executed.nonce() ); + /* The transaction ID is used to determine how internal outputs from this transaction should be handled (if they were actually internal or if they were just to an internal address). - The Ethereum integration doesn't have internal addresses, and this transaction wasn't made - by Serai. It was simply authorized by Serai yet may or may not be associated with other - actions we don't want to flag as our own. + The Ethereum integration doesn't use internal addresses, and only uses internal outputs to + flag a block as having an Eventuality. Those internal outputs will always be scanned, and + while they may be dropped/kept by this ID, the scheduler will then always drop them. + Accordingly, we have free reign as to what to set the transaction ID to. - Accordingly, we set the transaction ID to the nonce. This is unique barring someone finding - the preimage which hashes to this nonce, and won't cause any other data to be associated. + We set the ID to the nonce as it's the most helpful value and unique barring someone + finding the premise for this as a hash. */ let mut tx_id = [0; 32]; tx_id[.. 8].copy_from_slice(executed.nonce().to_le_bytes().as_slice()); diff --git a/processor/ethereum/src/primitives/mod.rs b/processor/ethereum/src/primitives/mod.rs index 00a5980f7..197acf8f5 100644 --- a/processor/ethereum/src/primitives/mod.rs +++ b/processor/ethereum/src/primitives/mod.rs @@ -1,3 +1,5 @@ +use serai_client::primitives::Amount; + pub(crate) mod output; pub(crate) mod transaction; pub(crate) mod machine; @@ -10,3 +12,10 @@ pub(crate) const DAI: [u8; 20] = }; pub(crate) const TOKENS: [[u8; 20]; 1] = [DAI]; + +// 8 decimals, so 1_000_000_00 would be 1 ETH. This is 0.0015 ETH (5 USD if Ether is ~3300 USD). +#[allow(clippy::inconsistent_digit_grouping)] +pub(crate) const ETHER_DUST: Amount = Amount(1_500_00); +// 5 DAI +#[allow(clippy::inconsistent_digit_grouping)] +pub(crate) const DAI_DUST: Amount = Amount(5_000_000_00); diff --git a/processor/ethereum/src/primitives/output.rs b/processor/ethereum/src/primitives/output.rs index 0f3279211..2215c29da 100644 --- a/processor/ethereum/src/primitives/output.rs +++ b/processor/ethereum/src/primitives/output.rs @@ -15,7 +15,7 @@ use serai_client::{ use primitives::{OutputType, ReceivedOutput}; use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction}; -use crate::DAI; +use crate::{DAI, ETHER_DUST}; fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { match coin { @@ -59,58 +59,122 @@ impl AsMut<[u8]> for OutputId { } #[derive(Clone, PartialEq, Eq, Debug)] -pub(crate) struct Output { - pub(crate) key: ::G, - pub(crate) instruction: EthereumInInstruction, +pub(crate) enum Output { + Output { key: ::G, instruction: EthereumInInstruction }, + Eventuality { key: ::G, nonce: u64 }, } impl ReceivedOutput<::G, Address> for Output { type Id = OutputId; type TransactionId = [u8; 32]; - // We only scan external outputs as we don't have branch/change/forwards fn kind(&self) -> OutputType { - OutputType::External + match self { + // All outputs received are External + Output::Output { .. } => OutputType::External, + // Yet upon Eventuality completions, we report a Change output to ensure synchrony per the + // scanner's documented bounds + Output::Eventuality { .. } => OutputType::Change, + } } fn id(&self) -> Self::Id { - let mut id = [0; 40]; - id[.. 32].copy_from_slice(&self.instruction.id.0); - id[32 ..].copy_from_slice(&self.instruction.id.1.to_le_bytes()); - OutputId(id) + match self { + Output::Output { key: _, instruction } => { + let mut id = [0; 40]; + id[.. 32].copy_from_slice(&instruction.id.0); + id[32 ..].copy_from_slice(&instruction.id.1.to_le_bytes()); + OutputId(id) + } + // Yet upon Eventuality completions, we report a Change output to ensure synchrony per the + // scanner's documented bounds + Output::Eventuality { key: _, nonce } => { + let mut id = [0; 40]; + id[.. 8].copy_from_slice(&nonce.to_le_bytes()); + OutputId(id) + } + } } fn transaction_id(&self) -> Self::TransactionId { - self.instruction.id.0 + match self { + Output::Output { key: _, instruction } => instruction.id.0, + Output::Eventuality { key: _, nonce } => { + let mut id = [0; 32]; + id[.. 8].copy_from_slice(&nonce.to_le_bytes()); + id + } + } } fn key(&self) -> ::G { - self.key + match self { + Output::Output { key, .. } | Output::Eventuality { key, .. } => *key, + } } fn presumed_origin(&self) -> Option
{ - Some(Address::from(self.instruction.from)) + match self { + Output::Output { key: _, instruction } => Some(Address::from(instruction.from)), + Output::Eventuality { .. } => None, + } } fn balance(&self) -> Balance { - let coin = coin_to_serai_coin(&self.instruction.coin).unwrap_or_else(|| { - panic!( - "mapping coin from an EthereumInInstruction with coin {}, which we don't handle.", - "this never should have been yielded" - ) - }); - Balance { coin, amount: amount_to_serai_amount(coin, self.instruction.amount) } + match self { + Output::Output { key: _, instruction } => { + let coin = coin_to_serai_coin(&instruction.coin).unwrap_or_else(|| { + panic!( + "mapping coin from an EthereumInInstruction with coin {}, which we don't handle.", + "this never should have been yielded" + ) + }); + Balance { coin, amount: amount_to_serai_amount(coin, instruction.amount) } + } + Output::Eventuality { .. } => Balance { coin: Coin::Ether, amount: ETHER_DUST }, + } } fn data(&self) -> &[u8] { - &self.instruction.data + match self { + Output::Output { key: _, instruction } => &instruction.data, + Output::Eventuality { .. } => &[], + } } fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(self.key.to_bytes().as_ref())?; - self.instruction.write(writer) + match self { + Output::Output { key, instruction } => { + writer.write_all(&[0])?; + writer.write_all(key.to_bytes().as_ref())?; + instruction.write(writer) + } + Output::Eventuality { key, nonce } => { + writer.write_all(&[1])?; + writer.write_all(key.to_bytes().as_ref())?; + writer.write_all(&nonce.to_le_bytes()) + } + } } fn read(reader: &mut R) -> io::Result { - let key = Secp256k1::read_G(reader)?; - let instruction = EthereumInInstruction::read(reader)?; - Ok(Self { key, instruction }) + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + if kind[0] >= 2 { + Err(io::Error::other("unknown Output type"))?; + } + + Ok(match kind[0] { + 0 => { + let key = Secp256k1::read_G(reader)?; + let instruction = EthereumInInstruction::read(reader)?; + Self::Output { key, instruction } + } + 1 => { + let key = Secp256k1::read_G(reader)?; + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + Self::Eventuality { key, nonce } + } + _ => unreachable!(), + }) } } diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 72d661a30..6db079893 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -321,7 +321,9 @@ pub trait Scheduler: 'static + Send { /// /// Any Eventualities returned by this function must include an output-to-Serai (such as a Branch /// or Change), unless they descend from a transaction returned by this function which satisfies - /// that requirement. + /// that requirement. This ensures when we scan outputs from transactions we made, we report the + /// block up to Substrate, and obtain synchrony on all prior blocks (allowing us to identify our + /// own transactions, which we may be prior unaware of due to a lagging view of Substrate). /// /// `active_keys` is the list of active keys, potentially including a key for which we've already /// called `retire_key` on. If so, its stage will be `Finishing` and no further operations will From 3aedbd1b370717077caf48d45e5d4e579514a0d2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 01:01:45 -0400 Subject: [PATCH 173/179] Transfer ETH with CREATE, not prior to CREATE Saves a few thousand gas. --- .../ethereum/router/contracts/Router.sol | 54 +++++++++++-------- 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/processor/ethereum/router/contracts/Router.sol b/processor/ethereum/router/contracts/Router.sol index c4c038e2a..9100f59e6 100644 --- a/processor/ethereum/router/contracts/Router.sol +++ b/processor/ethereum/router/contracts/Router.sol @@ -126,23 +126,28 @@ contract Router { emit InInstruction(msg.sender, coin, amount, instruction); } - // Perform a transfer out - function _transferOut(address to, address coin, uint256 value) private { - /* + /* We on purposely do not check if these calls succeed. A call either succeeded, and there's no problem, or the call failed due to: - A) An insolvency - B) A malicious receiver - C) A non-standard token + A) An insolvency + B) A malicious receiver + C) A non-standard token A is an invariant, B should be dropped, C is something out of the control of this contract. It is again the Serai's network role to not add support for any non-standard tokens, - */ + */ + + // Perform an ERC20 transfer out + function _erc20TransferOut(address to, address coin, uint256 value) private { + coin.call{ gas: 100_000 }(abi.encodeWithSelector(IERC20.transfer.selector, msg.sender, value)); + } + + // Perform an ETH/ERC20 transfer out + function _transferOut(address to, address coin, uint256 value) private { if (coin == address(0)) { // Enough gas to service the transfer and a minimal amount of logic - // TODO: If we're constructing a contract, we can do this at the same time as construction to.call{ value: value, gas: 5_000 }(""); } else { - coin.call{ gas: 100_000 }(abi.encodeWithSelector(IERC20.transfer.selector, msg.sender, value)); + _erc20TransferOut(to, coin, value); } } @@ -151,13 +156,14 @@ contract Router { letting them execute whatever calls they're coded for. Since we can't meter CREATE, we call CREATE from this function which we call not internally, but with CALL (which we can meter). */ - function arbitaryCallOut(bytes memory code) external { + function arbitaryCallOut(bytes memory code) external payable { // Because we're creating a contract, increment our nonce _smartContractNonce += 1; + uint256 msg_value = msg.value; address contractAddress; assembly { - contractAddress := create(0, add(code, 0x20), mload(code)) + contractAddress := create(msg_value, add(code, 0x20), mload(code)) } } @@ -193,18 +199,24 @@ contract Router { abi.decode(transactions[i].destination, (AddressDestination)); _transferOut(destination.destination, coin, transactions[i].value); } else { - // The destination is a piece of initcode. We calculate the hash of the will-be contract, - // transfer to it, and then run the initcode - address nextAddress = - address(uint160(uint256(keccak256(abi.encode(address(this), _smartContractNonce))))); - - // Perform the transfer - _transferOut(nextAddress, coin, transactions[i].value); - - // Perform the calls with a set gas budget + // Prepare for the transfer + uint256 eth_value = 0; + if (coin == address(0)) { + // If it's ETH, we transfer the value with the call + eth_value = transactions[i].value; + } else { + // If it's an ERC20, we calculate the hash of the will-be contract and transfer to it + // before deployment. This avoids needing to deploy, then call again, offering a few + // optimizations + address nextAddress = + address(uint160(uint256(keccak256(abi.encode(address(this), _smartContractNonce))))); + _erc20TransferOut(nextAddress, coin, transactions[i].value); + } + + // Perform the deployment with the defined gas budget (CodeDestination memory destination) = abi.decode(transactions[i].destination, (CodeDestination)); - address(this).call{ gas: destination.gas_limit }( + address(this).call{ gas: destination.gas_limit, value: eth_value }( abi.encodeWithSelector(Router.arbitaryCallOut.selector, destination.code) ); } From b6a408d49d7f07f05506d89b8f365582f070d5c3 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 01:23:26 -0400 Subject: [PATCH 174/179] Don't have the ERC20 collapse the top-level transfer ID to the transaction ID Uses the ID of the transfer event associated with the top-level transfer. --- processor/ethereum/erc20/src/lib.rs | 30 +++++++++-------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/processor/ethereum/erc20/src/lib.rs b/processor/ethereum/erc20/src/lib.rs index 400a5baac..ec33989e7 100644 --- a/processor/ethereum/erc20/src/lib.rs +++ b/processor/ethereum/erc20/src/lib.rs @@ -30,8 +30,8 @@ pub use abi::IERC20::Transfer; /// A top-level ERC20 transfer #[derive(Clone, Debug)] pub struct TopLevelTransfer { - /// The transaction ID which effected this transfer. - pub id: [u8; 32], + /// The ID of the event for this transfer. + pub id: ([u8; 32], u64), /// The address which made the transfer. pub from: [u8; 20], /// The amount transferred. @@ -40,14 +40,6 @@ pub struct TopLevelTransfer { pub data: Vec, } -/// A transaction with a top-level transfer, matched to the log index of the transfer. -pub struct MatchedTopLevelTransfer { - /// The transfer. - pub transfer: TopLevelTransfer, - /// The log index of the transfer. - pub log_index: u64, -} - /// A view for an ERC20 contract. #[derive(Clone, Debug)] pub struct Erc20(Arc>, Address); @@ -62,7 +54,7 @@ impl Erc20 { provider: impl AsRef>, transaction_id: B256, to: Address, - ) -> Result, RpcError> { + ) -> Result, RpcError> { // Fetch the transaction let transaction = provider.as_ref().get_transaction_by_hash(transaction_id).await?.ok_or_else(|| { @@ -132,15 +124,11 @@ impl Erc20 { let encoded = call.abi_encode(); let data = transaction.input.as_ref()[encoded.len() ..].to_vec(); - return Ok(Some(MatchedTopLevelTransfer { - transfer: TopLevelTransfer { - // Since there's only one top-level transfer per TX, set the ID to the TX ID - id: *transaction_id, - from: *log.from.0, - amount: log.value, - data, - }, - log_index, + return Ok(Some(TopLevelTransfer { + id: (*transaction_id, log_index), + from: *log.from.0, + amount: log.value, + data, })); } } @@ -193,7 +181,7 @@ impl Erc20 { // Panicking on a task panic is desired behavior, and we haven't aborted any tasks match top_level_transfer.unwrap() { // Top-level transfer - Ok(Some(top_level_transfer)) => top_level_transfers.push(top_level_transfer.transfer), + Ok(Some(top_level_transfer)) => top_level_transfers.push(top_level_transfer), // Not a top-level transfer Ok(None) => continue, // Failed to get this transaction's information so abort From 5f625eb19b47dfad48da78c7ed06ddfd6fa45e1b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 01:24:28 -0400 Subject: [PATCH 175/179] Have the Router track its deployment block Prevents a consensus split where some nodes would drop transfers if their node didn't think the Router was deployed, and some would handle them. --- .../ethereum/router/contracts/Router.sol | 9 ++++++++ processor/ethereum/router/src/lib.rs | 21 +++++++++++++++++-- processor/ethereum/src/rpc.rs | 12 +++++++++-- 3 files changed, 38 insertions(+), 4 deletions(-) diff --git a/processor/ethereum/router/contracts/Router.sol b/processor/ethereum/router/contracts/Router.sol index 9100f59e6..d82c0d908 100644 --- a/processor/ethereum/router/contracts/Router.sol +++ b/processor/ethereum/router/contracts/Router.sol @@ -7,6 +7,9 @@ import "Schnorr.sol"; // _ is used as a prefix for internal functions and smart-contract-scoped variables contract Router { + // The block at which this contract was deployed. + uint256 private _deploymentBlock; + // Nonce is incremented for each command executed, preventing replays uint256 private _nonce; @@ -63,6 +66,8 @@ contract Router { } constructor(bytes32 initialSeraiKey) _updateSeraiKeyAtEndOfFn(0, initialSeraiKey) { + _deploymentBlock = block.number; + // We consumed nonce 0 when setting the initial Serai key _nonce = 1; // Nonces are incremented by 1 upon account creation, prior to any code execution, per EIP-161 @@ -230,6 +235,10 @@ contract Router { return _nonce; } + function deploymentBlock() external view returns (uint256) { + return _deploymentBlock; + } + function smartContractNonce() external view returns (uint256) { return _smartContractNonce; } diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs index 248523b8d..d78b32183 100644 --- a/processor/ethereum/router/src/lib.rs +++ b/processor/ethereum/router/src/lib.rs @@ -11,7 +11,7 @@ use alloy_consensus::TxLegacy; use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; -use alloy_rpc_types_eth::Filter; +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest, Filter}; use alloy_transport::{TransportErrorKind, RpcError}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; @@ -296,6 +296,23 @@ impl Router { self.1 } + /// Fetch the block this contract was deployed at. + pub async fn deployment_block(&self) -> Result> { + let call = TransactionRequest::default() + .to(self.address()) + .input(TransactionInput::new(abi::deploymentBlockCall::new(()).abi_encode().into())); + let bytes = self.0.call(&call).await?; + let deployment_block = abi::deploymentBlockCall::abi_decode_returns(&bytes, true) + .map_err(|e| { + TransportErrorKind::Custom( + format!("node returned a non-u256 for function returning u256: {e:?}").into(), + ) + })? + ._0; + + Ok(deployment_block.try_into().unwrap()) + } + /// Get the message to be signed in order to update the key for Serai. pub fn update_serai_key_message(chain_id: U256, nonce: u64, key: &PublicKey) -> Vec { ( @@ -420,7 +437,7 @@ impl Router { */ if let Some(matched) = Erc20::match_top_level_transfer(&self.0, tx_hash, self.1).await? { // Mark this log index as used so it isn't used again - transfer_check.insert(matched.log_index); + transfer_check.insert(matched.id.1); } // Find a matching transfer log diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs index 1eaa4988e..a55334848 100644 --- a/processor/ethereum/src/rpc.rs +++ b/processor/ethereum/src/rpc.rs @@ -156,10 +156,12 @@ impl ScannerFeed for Rpc { // The Router wasn't deployed yet so we cannot have any on-chain interactions // If the Router has been deployed by the block we've synced to, it won't have any events // for these blocks anways, so this doesn't risk a consensus split - // TODO: This does as we can have top-level transfers to the router before it's deployed return Ok(FullEpoch { epoch, instructions, executed }); }; + let router_deployment_block = router.deployment_block().await?; + + // TODO: Use a LocalSet and handle all these in parallel let mut to_check = epoch.end_hash; while to_check != epoch.prior_end_hash { let to_check_block = self @@ -177,6 +179,12 @@ impl ScannerFeed for Rpc { })? .header; + // If this is before the Router was deployed, move on + if to_check_block.number < router_deployment_block { + // This is sa + break; + } + instructions.append( &mut router.in_instructions(to_check_block.number, &HashSet::from(TOKENS)).await?, ); @@ -187,7 +195,7 @@ impl ScannerFeed for Rpc { .await? { instructions.push(EthereumInInstruction { - id: (id, u64::MAX), + id, from, coin: EthereumCoin::Erc20(token), amount, From fe1342d64cf31407529f45942eee2707d2430d40 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 02:06:35 -0400 Subject: [PATCH 176/179] Don't track deployment block in the Router This technically has a TOCTOU where we sync an Epoch's metadata (signifying we did sync to that point), then check if the Router was deployed, yet at that very moment the node resets to genesis. By ensuring the Router is deployed, we avoid this (and don't need to track the deployment block in-contract). Also uses a JoinSet to sync the 32 blocks in parallel. --- .../ethereum/router/contracts/Router.sol | 9 -- processor/ethereum/router/src/lib.rs | 19 +---- processor/ethereum/src/rpc.rs | 84 +++++++++++-------- 3 files changed, 49 insertions(+), 63 deletions(-) diff --git a/processor/ethereum/router/contracts/Router.sol b/processor/ethereum/router/contracts/Router.sol index d82c0d908..9100f59e6 100644 --- a/processor/ethereum/router/contracts/Router.sol +++ b/processor/ethereum/router/contracts/Router.sol @@ -7,9 +7,6 @@ import "Schnorr.sol"; // _ is used as a prefix for internal functions and smart-contract-scoped variables contract Router { - // The block at which this contract was deployed. - uint256 private _deploymentBlock; - // Nonce is incremented for each command executed, preventing replays uint256 private _nonce; @@ -66,8 +63,6 @@ contract Router { } constructor(bytes32 initialSeraiKey) _updateSeraiKeyAtEndOfFn(0, initialSeraiKey) { - _deploymentBlock = block.number; - // We consumed nonce 0 when setting the initial Serai key _nonce = 1; // Nonces are incremented by 1 upon account creation, prior to any code execution, per EIP-161 @@ -235,10 +230,6 @@ contract Router { return _nonce; } - function deploymentBlock() external view returns (uint256) { - return _deploymentBlock; - } - function smartContractNonce() external view returns (uint256) { return _smartContractNonce; } diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs index d78b32183..7a7cffd82 100644 --- a/processor/ethereum/router/src/lib.rs +++ b/processor/ethereum/router/src/lib.rs @@ -11,7 +11,7 @@ use alloy_consensus::TxLegacy; use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; -use alloy_rpc_types_eth::{TransactionInput, TransactionRequest, Filter}; +use alloy_rpc_types_eth::Filter; use alloy_transport::{TransportErrorKind, RpcError}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; @@ -296,23 +296,6 @@ impl Router { self.1 } - /// Fetch the block this contract was deployed at. - pub async fn deployment_block(&self) -> Result> { - let call = TransactionRequest::default() - .to(self.address()) - .input(TransactionInput::new(abi::deploymentBlockCall::new(()).abi_encode().into())); - let bytes = self.0.call(&call).await?; - let deployment_block = abi::deploymentBlockCall::abi_decode_returns(&bytes, true) - .map_err(|e| { - TransportErrorKind::Custom( - format!("node returned a non-u256 for function returning u256: {e:?}").into(), - ) - })? - ._0; - - Ok(deployment_block.try_into().unwrap()) - } - /// Get the message to be signed in order to update the key for Serai. pub fn update_serai_key_message(chain_id: U256, nonce: u64, key: &PublicKey) -> Vec { ( diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs index a55334848..7f8a422b4 100644 --- a/processor/ethereum/src/rpc.rs +++ b/processor/ethereum/src/rpc.rs @@ -2,20 +2,23 @@ use core::future::Future; use std::{sync::Arc, collections::HashSet}; use alloy_core::primitives::B256; -use alloy_rpc_types_eth::{BlockTransactionsKind, BlockNumberOrTag}; +use alloy_rpc_types_eth::{Header, BlockTransactionsKind, BlockNumberOrTag}; use alloy_transport::{RpcError, TransportErrorKind}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; use serai_client::primitives::{NetworkId, Coin, Amount}; +use tokio::task::JoinSet; + use serai_db::Db; use scanner::ScannerFeed; use ethereum_schnorr::PublicKey; use ethereum_erc20::{TopLevelTransfer, Erc20}; -use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction, Router}; +#[rustfmt::skip] +use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction, Executed, Router}; use crate::{ TOKENS, ETHER_DUST, DAI_DUST, InitialSeraiKey, @@ -141,8 +144,6 @@ impl ScannerFeed for Rpc { ) -> impl Send + Future> { async move { let epoch = self.unchecked_block_header_by_number(number).await?; - let mut instructions = vec![]; - let mut executed = vec![]; let Some(router) = Router::new( self.provider.clone(), @@ -153,16 +154,42 @@ impl ScannerFeed for Rpc { ) .await? else { - // The Router wasn't deployed yet so we cannot have any on-chain interactions - // If the Router has been deployed by the block we've synced to, it won't have any events - // for these blocks anways, so this doesn't risk a consensus split - return Ok(FullEpoch { epoch, instructions, executed }); + Err(TransportErrorKind::Custom("router wasn't deployed on-chain yet".to_string().into()))? }; - let router_deployment_block = router.deployment_block().await?; + async fn sync_block( + provider: Arc>, + router: Router, + block: Header, + ) -> Result<(Vec, Vec), RpcError> { + let mut instructions = router.in_instructions(block.number, &HashSet::from(TOKENS)).await?; + + for token in TOKENS { + for TopLevelTransfer { id, from, amount, data } in Erc20::new(provider.clone(), token) + .top_level_transfers(block.number, router.address()) + .await? + { + instructions.push(EthereumInInstruction { + id, + from, + coin: EthereumCoin::Erc20(token), + amount, + data, + }); + } + } + + let executed = router.executed(block.number).await?; + + Ok((instructions, executed)) + } - // TODO: Use a LocalSet and handle all these in parallel + // We use JoinSet here to minimize the latency of the variety of requests we make. For each + // JoinError that may occur, we unwrap it as no underlying tasks should panic + let mut join_set = JoinSet::new(); let mut to_check = epoch.end_hash; + // TODO: This makes 32 sequential requests. We should run them in parallel using block + // nunbers while to_check != epoch.prior_end_hash { let to_check_block = self .provider @@ -179,34 +206,19 @@ impl ScannerFeed for Rpc { })? .header; - // If this is before the Router was deployed, move on - if to_check_block.number < router_deployment_block { - // This is sa - break; - } - - instructions.append( - &mut router.in_instructions(to_check_block.number, &HashSet::from(TOKENS)).await?, - ); - for token in TOKENS { - for TopLevelTransfer { id, from, amount, data } in - Erc20::new(self.provider.clone(), token) - .top_level_transfers(to_check_block.number, router.address()) - .await? - { - instructions.push(EthereumInInstruction { - id, - from, - coin: EthereumCoin::Erc20(token), - amount, - data, - }); - } - } + // Update the next block to check + to_check = *to_check_block.parent_hash; - executed.append(&mut router.executed(to_check_block.number).await?); + // Spawn a task to sync this block + join_set.spawn(sync_block(self.provider.clone(), router.clone(), to_check_block)); + } - to_check = *to_check_block.parent_hash; + let mut instructions = vec![]; + let mut executed = vec![]; + while let Some(instructions_and_executed) = join_set.join_next().await { + let (mut these_instructions, mut these_executed) = instructions_and_executed.unwrap()?; + instructions.append(&mut these_instructions); + executed.append(&mut these_executed); } Ok(FullEpoch { epoch, instructions, executed }) From 5a2a27899e81ce0c59da9922cc227cb53edc9333 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 02:12:26 -0400 Subject: [PATCH 177/179] Misc comments --- processor/ethereum/TODO/old_processor.rs | 14 -------------- processor/ethereum/deployer/contracts/Deployer.sol | 3 +++ processor/monero/src/scheduler.rs | 1 - processor/scanner/src/lib.rs | 1 + 4 files changed, 4 insertions(+), 15 deletions(-) diff --git a/processor/ethereum/TODO/old_processor.rs b/processor/ethereum/TODO/old_processor.rs index 50250c435..a7e85a5ce 100644 --- a/processor/ethereum/TODO/old_processor.rs +++ b/processor/ethereum/TODO/old_processor.rs @@ -53,20 +53,6 @@ TODO } } - #[cfg(test)] - async fn get_block_number(&self, id: &>::Id) -> usize { - self - .provider - .get_block(B256::from(*id).into(), BlockTransactionsKind::Hashes) - .await - .unwrap() - .unwrap() - .header - .number - .try_into() - .unwrap() - } - #[cfg(test)] async fn get_transaction_by_eventuality( &self, diff --git a/processor/ethereum/deployer/contracts/Deployer.sol b/processor/ethereum/deployer/contracts/Deployer.sol index 2d4904e40..a7dac1d39 100644 --- a/processor/ethereum/deployer/contracts/Deployer.sol +++ b/processor/ethereum/deployer/contracts/Deployer.sol @@ -31,6 +31,9 @@ pragma solidity ^0.8.26; The alternative would be to have a council publish the Serai key on-Ethereum, with Serai verifying the published result. This would introduce a DoS risk in the council not publishing the correct key/not publishing any key. + + This design does not work with designs expecting initialization (which may require re-deploying + the same code until the initialization successfully goes through, without being sniped). */ contract Deployer { diff --git a/processor/monero/src/scheduler.rs b/processor/monero/src/scheduler.rs index 667840f6b..489db8105 100644 --- a/processor/monero/src/scheduler.rs +++ b/processor/monero/src/scheduler.rs @@ -88,7 +88,6 @@ async fn signable_transaction( // It is a reused value (with later code), but that's not an issue. Just an oddity &mut ChaCha20Rng::from_seed(id), &rpc.rpc, - // TODO: Have Decoys take RctType match rct_type { RctType::ClsagBulletproof => 11, RctType::ClsagBulletproofPlus => 16, diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 6db079893..5046753cf 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -260,6 +260,7 @@ impl SchedulerUpdate { pub type KeyScopedEventualities = HashMap, Vec>>; /// The object responsible for accumulating outputs and planning new transactions. +// TODO: Move this to Scheduler primitives pub trait Scheduler: 'static + Send { /// An error encountered when handling updates/payments. /// From 862ab5cb4d667eb0fbc3aa7f33a1fb4d9d3546e5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 02:20:59 -0400 Subject: [PATCH 178/179] Move old processor/src directory to processor/TODO --- processor/{src => TODO}/main.rs | 16 ---------------- processor/{src => TODO}/tests/addresses.rs | 2 ++ processor/{src => TODO}/tests/batch_signer.rs | 2 ++ processor/{src => TODO}/tests/cosigner.rs | 2 ++ processor/{src => TODO}/tests/key_gen.rs | 2 ++ processor/{src => TODO}/tests/literal/mod.rs | 2 ++ processor/{src => TODO}/tests/mod.rs | 2 ++ processor/{src => TODO}/tests/scanner.rs | 2 ++ processor/{src => TODO}/tests/signer.rs | 2 ++ processor/{src => TODO}/tests/wallet.rs | 2 ++ 10 files changed, 18 insertions(+), 16 deletions(-) rename processor/{src => TODO}/main.rs (74%) rename processor/{src => TODO}/tests/addresses.rs (99%) rename processor/{src => TODO}/tests/batch_signer.rs (99%) rename processor/{src => TODO}/tests/cosigner.rs (99%) rename processor/{src => TODO}/tests/key_gen.rs (99%) rename processor/{src => TODO}/tests/literal/mod.rs (99%) rename processor/{src => TODO}/tests/mod.rs (99%) rename processor/{src => TODO}/tests/scanner.rs (99%) rename processor/{src => TODO}/tests/signer.rs (99%) rename processor/{src => TODO}/tests/wallet.rs (99%) diff --git a/processor/src/main.rs b/processor/TODO/main.rs similarity index 74% rename from processor/src/main.rs rename to processor/TODO/main.rs index b4a5053a4..1458a7fc3 100644 --- a/processor/src/main.rs +++ b/processor/TODO/main.rs @@ -59,19 +59,3 @@ async fn handle_coordinator_msg( } } } - -#[tokio::main] -async fn main() { - match network_id { - #[cfg(feature = "ethereum")] - NetworkId::Ethereum => { - let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") - .expect("ethereum relayer hostname wasn't specified") - .to_string(); - let relayer_port = - env::var("ETHEREUM_RELAYER_PORT").expect("ethereum relayer port wasn't specified"); - let relayer_url = relayer_hostname + ":" + &relayer_port; - run(db.clone(), Ethereum::new(db, url, relayer_url).await, coordinator).await - } - } -} diff --git a/processor/src/tests/addresses.rs b/processor/TODO/tests/addresses.rs similarity index 99% rename from processor/src/tests/addresses.rs rename to processor/TODO/tests/addresses.rs index 3d4d6d4c1..1a06963a0 100644 --- a/processor/src/tests/addresses.rs +++ b/processor/TODO/tests/addresses.rs @@ -1,3 +1,5 @@ +// TODO + use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; diff --git a/processor/src/tests/batch_signer.rs b/processor/TODO/tests/batch_signer.rs similarity index 99% rename from processor/src/tests/batch_signer.rs rename to processor/TODO/tests/batch_signer.rs index dc45ff312..cc5885fc2 100644 --- a/processor/src/tests/batch_signer.rs +++ b/processor/TODO/tests/batch_signer.rs @@ -1,3 +1,5 @@ +// TODO + use std::collections::HashMap; use rand_core::{RngCore, OsRng}; diff --git a/processor/src/tests/cosigner.rs b/processor/TODO/tests/cosigner.rs similarity index 99% rename from processor/src/tests/cosigner.rs rename to processor/TODO/tests/cosigner.rs index a66161bf7..98116bc35 100644 --- a/processor/src/tests/cosigner.rs +++ b/processor/TODO/tests/cosigner.rs @@ -1,3 +1,5 @@ +// TODO + use std::collections::HashMap; use rand_core::{RngCore, OsRng}; diff --git a/processor/src/tests/key_gen.rs b/processor/TODO/tests/key_gen.rs similarity index 99% rename from processor/src/tests/key_gen.rs rename to processor/TODO/tests/key_gen.rs index 43f0de058..116db11e5 100644 --- a/processor/src/tests/key_gen.rs +++ b/processor/TODO/tests/key_gen.rs @@ -1,3 +1,5 @@ +// TODO + use std::collections::HashMap; use zeroize::Zeroizing; diff --git a/processor/src/tests/literal/mod.rs b/processor/TODO/tests/literal/mod.rs similarity index 99% rename from processor/src/tests/literal/mod.rs rename to processor/TODO/tests/literal/mod.rs index d45649d59..b1285e634 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/TODO/tests/literal/mod.rs @@ -1,3 +1,5 @@ +// TODO + use dockertest::{ PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, TestBodySpecification, DockerOperations, DockerTest, diff --git a/processor/src/tests/mod.rs b/processor/TODO/tests/mod.rs similarity index 99% rename from processor/src/tests/mod.rs rename to processor/TODO/tests/mod.rs index 7ab57bdef..4691e523c 100644 --- a/processor/src/tests/mod.rs +++ b/processor/TODO/tests/mod.rs @@ -1,3 +1,5 @@ +// TODO + use std::sync::OnceLock; mod key_gen; diff --git a/processor/src/tests/scanner.rs b/processor/TODO/tests/scanner.rs similarity index 99% rename from processor/src/tests/scanner.rs rename to processor/TODO/tests/scanner.rs index a40e465c8..6ad87f785 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/TODO/tests/scanner.rs @@ -1,3 +1,5 @@ +// TODO + use core::{pin::Pin, time::Duration, future::Future}; use std::sync::Arc; diff --git a/processor/src/tests/signer.rs b/processor/TODO/tests/signer.rs similarity index 99% rename from processor/src/tests/signer.rs rename to processor/TODO/tests/signer.rs index 6b4456081..e35a048b0 100644 --- a/processor/src/tests/signer.rs +++ b/processor/TODO/tests/signer.rs @@ -1,3 +1,5 @@ +// TODO + use core::{pin::Pin, future::Future}; use std::collections::HashMap; diff --git a/processor/src/tests/wallet.rs b/processor/TODO/tests/wallet.rs similarity index 99% rename from processor/src/tests/wallet.rs rename to processor/TODO/tests/wallet.rs index 0451f30c3..f78a16f5c 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/TODO/tests/wallet.rs @@ -1,3 +1,5 @@ +// TODO + use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; From d40a77211919e992e0b1d90b9ab1f5cdd65b0cb6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 20 Sep 2024 02:30:08 -0400 Subject: [PATCH 179/179] machete, drain > mem::swap for clarity reasons --- Cargo.lock | 2 -- processor/bin/src/lib.rs | 18 ++++++++---------- processor/ethereum/Cargo.toml | 1 - processor/ethereum/router/Cargo.toml | 1 - 4 files changed, 8 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 12da8dd69..131081ab0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8326,7 +8326,6 @@ version = "0.1.0" name = "serai-ethereum-processor" version = "0.1.0" dependencies = [ - "alloy-consensus", "alloy-core", "alloy-provider", "alloy-rlp", @@ -8716,7 +8715,6 @@ dependencies = [ "build-solidity-contracts", "ethereum-schnorr-contract", "group", - "k256", "serai-client", "serai-processor-ethereum-deployer", "serai-processor-ethereum-erc20", diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs index 86a3a0cd7..7d98f8127 100644 --- a/processor/bin/src/lib.rs +++ b/processor/bin/src/lib.rs @@ -284,21 +284,19 @@ pub async fn main_loop< let key_to_activate = KeyToActivate::>::try_recv(txn.as_mut().unwrap()).map(|key| key.0); - /* - `acknowledge_batch` takes burns to optimize handling returns with standard payments. - That's why handling these with a Batch (and not waiting until the following potential - `queue_burns` call makes sense. As for which Batch, the first is equally valid unless - we want to start introspecting (and should be our only Batch anyways). - */ - let mut this_batchs_burns = vec![]; - std::mem::swap(&mut burns, &mut this_batchs_burns); - // This is a cheap call as it internally just queues this to be done later let _: () = scanner.acknowledge_batch( txn.take().unwrap(), id, in_instructions, - this_batchs_burns, + /* + `acknowledge_batch` takes burns to optimize handling returns with standard + payments. That's why handling these with a Batch (and not waiting until the + following potential `queue_burns` call makes sense. As for which Batch, the first + is equally valid unless we want to start introspecting (and should be our only + Batch anyways). + */ + burns.drain(..).collect(), key_to_activate, ); } diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml index c2a6f581d..139786316 100644 --- a/processor/ethereum/Cargo.toml +++ b/processor/ethereum/Cargo.toml @@ -32,7 +32,6 @@ k256 = { version = "^0.13.1", default-features = false, features = ["std"] } alloy-core = { version = "0.8", default-features = false } alloy-rlp = { version = "0.3", default-features = false } -alloy-consensus = { version = "0.3", default-features = false } alloy-rpc-types-eth = { version = "0.3", default-features = false } alloy-transport = { version = "0.3", default-features = false } diff --git a/processor/ethereum/router/Cargo.toml b/processor/ethereum/router/Cargo.toml index e8884eae6..d21a26d9c 100644 --- a/processor/ethereum/router/Cargo.toml +++ b/processor/ethereum/router/Cargo.toml @@ -18,7 +18,6 @@ workspace = true [dependencies] group = { version = "0.13", default-features = false } -k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } alloy-core = { version = "0.8", default-features = false } alloy-consensus = { version = "0.3", default-features = false }