diff --git a/.github/workflows/fendermint-publish.yaml b/.github/workflows/fendermint-publish.yaml index 5ffd336dc..0f1cbc053 100644 --- a/.github/workflows/fendermint-publish.yaml +++ b/.github/workflows/fendermint-publish.yaml @@ -80,10 +80,6 @@ jobs: echo "IMAGE_TAG=$IMAGE_ID:$VERSION" >> $GITHUB_OUTPUT - # Pull the latest docker image. Its layers might be reused during the build. - - name: Docker Pull - run: docker pull ghcr.io/consensus-shipyard/fendermint:latest - - name: Docker Deps run: | cd fendermint && make docker-deps diff --git a/.github/workflows/fendermint-test.yaml b/.github/workflows/fendermint-test.yaml index 62b6c71d3..1dcfeda4e 100644 --- a/.github/workflows/fendermint-test.yaml +++ b/.github/workflows/fendermint-test.yaml @@ -64,6 +64,18 @@ jobs: RUSTFLAGS: -Dwarnings steps: + # https://github.com/marketplace/actions/free-disk-space-ubuntu + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: false + large-packages: false + swap-storage: false + docker-images: false + android: true + dotnet: true + haskell: true + - name: Check out the project uses: actions/checkout@v3 with: @@ -90,11 +102,7 @@ jobs: path: ./contracts/out key: contracts-abi-${{ hashFiles('./contracts/src/**/*.sol') }} - # Pull the latest docker image. Its layers might be reused during the build. - - name: Docker Pull - run: docker pull ghcr.io/consensus-shipyard/fendermint:latest - - name: ${{ matrix.make.name }} env: PROMTAIL_CLIENT_URL: ${{ secrets.PROMTAIL_CLIENT_URL }} - run: cd fendermint && make ${{ matrix.make.task }} \ No newline at end of file + run: cd fendermint && make ${{ matrix.make.task }} diff --git a/Cargo.lock b/Cargo.lock index 9c0dafdcb..084149377 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3028,6 +3028,49 @@ dependencies = [ "serde_json", ] +[[package]] +name = "fendermint_graph_test" +version = "0.1.0" + +[[package]] +name = "fendermint_materializer" +version = "0.1.0" +dependencies = [ + "anyhow", + "arbitrary", + "async-recursion", + "async-trait", + "bollard", + "env_logger 0.10.2", + "ethers", + "fendermint_crypto", + "fendermint_materializer", + "fendermint_testing", + "fendermint_vm_actor_interface", + "fendermint_vm_core", + "fendermint_vm_encoding", + "fendermint_vm_genesis", + "futures", + "fvm_shared", + "hex", + "ipc-api", + "lazy_static", + "multihash 0.18.1", + "quickcheck", + "quickcheck_async", + "quickcheck_macros", + "serde", + "serde_json", + "serde_with 2.3.3", + "serde_yaml", + "serial_test", + "tempfile", + "tendermint-rpc", + "tokio", + "toml 0.8.10", + "tracing", +] + [[package]] name = "fendermint_rocksdb" version = "0.1.0" @@ -3118,44 +3161,6 @@ dependencies = [ "toml 0.8.10", ] -[[package]] -name = "fendermint_testing_materializer" -version = "0.1.0" -dependencies = [ - "anyhow", - "arbitrary", - "async-recursion", - "async-trait", - "bollard", - "env_logger 0.10.2", - "ethers", - "fendermint_crypto", - "fendermint_testing", - "fendermint_testing_materializer", - "fendermint_vm_actor_interface", - "fendermint_vm_core", - "fendermint_vm_encoding", - "fendermint_vm_genesis", - "futures", - "fvm_shared", - "hex", - "ipc-api", - "lazy_static", - "multihash 0.18.1", - "quickcheck", - "quickcheck_async", - "quickcheck_macros", - "serde", - "serde_json", - "serde_with 2.3.3", - "serde_yaml", - "tempfile", - "tendermint-rpc", - "tokio", - "toml 0.8.10", - "tracing", -] - [[package]] name = "fendermint_vm_actor_interface" version = "0.1.0" @@ -4252,10 +4257,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "graph-test" -version = "0.1.0" - [[package]] name = "group" version = "0.12.1" diff --git a/fendermint/.gitignore b/fendermint/.gitignore index f2d32d68b..51b2a211f 100644 --- a/fendermint/.gitignore +++ b/fendermint/.gitignore @@ -4,6 +4,7 @@ docker/.artifacts docker/Dockerfile cometbft test-network +testing/materializer/tests/docker-materializer-data .idea .make .contracts-gen diff --git a/fendermint/Makefile b/fendermint/Makefile index 2c0c6db8c..b9160e198 100644 --- a/fendermint/Makefile +++ b/fendermint/Makefile @@ -38,21 +38,25 @@ install: $(IPC_ACTORS_GEN) cargo install --locked --path app # Using --release for testing because wasm can otherwise be slow. +# Excluding the materializer tests because they rely on docker; we could test with --lib test: $(BUILTIN_ACTORS_BUNDLE) $(IPC_ACTORS_GEN) FM_BUILTIN_ACTORS_BUNDLE=$(BUILTIN_ACTORS_BUNDLE) \ FM_CUSTOM_ACTORS_BUNDLE=$(CUSTOM_ACTORS_BUNDLE) \ FM_CONTRACTS_DIR=$(IPC_ACTORS_OUT) \ - cargo test --release $(PACKAGE) + cargo test --release $(shell echo $(PACKAGE) | sed 's/--package fendermint_materializer//g') +# Not using --release beause the build has been done in docker and the wasm code runs inside the container. e2e: docker-build | cargo-make cd testing/smoke-test && cargo make --profile $(PROFILE) cd testing/snapshot-test && cargo make --profile $(PROFILE) cd testing/graph-test && cargo make --profile $(PROFILE) + PROFILE=$(PROFILE) cargo test --package fendermint_materializer clean: cargo clean rm $(BUILTIN_ACTORS_BUNDLE) rm $(CUSTOM_ACTORS_BUNDLE) + rm -rf testing/materializer/tests/docker-materializer lint: \ check-fmt \ diff --git a/fendermint/abci/Cargo.toml b/fendermint/abci/Cargo.toml index a468fc79c..e3388d04b 100644 --- a/fendermint/abci/Cargo.toml +++ b/fendermint/abci/Cargo.toml @@ -10,6 +10,7 @@ license.workspace = true async-trait = { workspace = true } futures = { workspace = true } tower = "0.4" +tracing = { workspace = true } tower-abci = { workspace = true } tendermint = { workspace = true } @@ -20,5 +21,4 @@ async-stm = { workspace = true } im = { workspace = true } structopt = "0.3" tokio = { workspace = true } -tracing = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/fendermint/abci/src/application.rs b/fendermint/abci/src/application.rs index 400a32bcd..1e9b0fe18 100644 --- a/fendermint/abci/src/application.rs +++ b/fendermint/abci/src/application.rs @@ -162,28 +162,34 @@ where let res = async move { let res = match req { - Request::Echo(r) => Response::Echo(app.echo(r).await?), - Request::Info(r) => Response::Info(app.info(r).await?), - Request::InitChain(r) => Response::InitChain(app.init_chain(r).await?), - Request::Query(r) => Response::Query(app.query(r).await?), - Request::CheckTx(r) => Response::CheckTx(app.check_tx(r).await?), + Request::Echo(r) => Response::Echo(log_error(app.echo(r).await)?), + Request::Info(r) => Response::Info(log_error(app.info(r).await)?), + Request::InitChain(r) => Response::InitChain(log_error(app.init_chain(r).await)?), + Request::Query(r) => Response::Query(log_error(app.query(r).await)?), + Request::CheckTx(r) => Response::CheckTx(log_error(app.check_tx(r).await)?), Request::PrepareProposal(r) => { - Response::PrepareProposal(app.prepare_proposal(r).await?) + Response::PrepareProposal(log_error(app.prepare_proposal(r).await)?) } Request::ProcessProposal(r) => { - Response::ProcessProposal(app.process_proposal(r).await?) + Response::ProcessProposal(log_error(app.process_proposal(r).await)?) + } + Request::BeginBlock(r) => { + Response::BeginBlock(log_error(app.begin_block(r).await)?) + } + Request::DeliverTx(r) => Response::DeliverTx(log_error(app.deliver_tx(r).await)?), + Request::EndBlock(r) => Response::EndBlock(log_error(app.end_block(r).await)?), + Request::Commit => Response::Commit(log_error(app.commit().await)?), + Request::ListSnapshots => { + Response::ListSnapshots(log_error(app.list_snapshots().await)?) + } + Request::OfferSnapshot(r) => { + Response::OfferSnapshot(log_error(app.offer_snapshot(r).await)?) } - Request::BeginBlock(r) => Response::BeginBlock(app.begin_block(r).await?), - Request::DeliverTx(r) => Response::DeliverTx(app.deliver_tx(r).await?), - Request::EndBlock(r) => Response::EndBlock(app.end_block(r).await?), - Request::Commit => Response::Commit(app.commit().await?), - Request::ListSnapshots => Response::ListSnapshots(app.list_snapshots().await?), - Request::OfferSnapshot(r) => Response::OfferSnapshot(app.offer_snapshot(r).await?), Request::LoadSnapshotChunk(r) => { - Response::LoadSnapshotChunk(app.load_snapshot_chunk(r).await?) + Response::LoadSnapshotChunk(log_error(app.load_snapshot_chunk(r).await)?) } Request::ApplySnapshotChunk(r) => { - Response::ApplySnapshotChunk(app.apply_snapshot_chunk(r).await?) + Response::ApplySnapshotChunk(log_error(app.apply_snapshot_chunk(r).await)?) } Request::Flush => panic!("Flush should be handled by the Server!"), }; @@ -192,3 +198,10 @@ where res.boxed() } } + +fn log_error(res: AbciResult) -> AbciResult { + if let Err(ref e) = res { + tracing::error!("failed to execute ABCI request: {e:#}"); + } + res +} diff --git a/fendermint/docker/runner.Dockerfile b/fendermint/docker/runner.Dockerfile index d02c443eb..487b7b38f 100644 --- a/fendermint/docker/runner.Dockerfile +++ b/fendermint/docker/runner.Dockerfile @@ -15,6 +15,7 @@ ENV HOME=$FM_HOME_DIR WORKDIR $FM_HOME_DIR EXPOSE 26658 +EXPOSE 8445 ENTRYPOINT ["docker-entry.sh"] CMD ["run"] diff --git a/fendermint/eth/api/examples/ethers.rs b/fendermint/eth/api/examples/ethers.rs index 3c6499e97..944a7e874 100644 --- a/fendermint/eth/api/examples/ethers.rs +++ b/fendermint/eth/api/examples/ethers.rs @@ -81,11 +81,11 @@ pub struct Options { /// Secret key used to send funds, expected to be in Base64 format. /// /// Assumed to exist with a non-zero balance. - #[arg(long, short)] + #[arg(long)] pub secret_key_from: PathBuf, /// Secret key used to receive funds, expected to be in Base64 format. - #[arg(long, short)] + #[arg(long)] pub secret_key_to: PathBuf, /// Enable DEBUG logs. diff --git a/fendermint/testing/graph-test/Cargo.toml b/fendermint/testing/graph-test/Cargo.toml index c17d99e2d..1d9a436c4 100644 --- a/fendermint/testing/graph-test/Cargo.toml +++ b/fendermint/testing/graph-test/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "graph-test" +name = "fendermint_graph_test" version = "0.1.0" edition = "2021" diff --git a/fendermint/testing/graph-test/Makefile.toml b/fendermint/testing/graph-test/Makefile.toml index ce4ea7967..ec47e8206 100644 --- a/fendermint/testing/graph-test/Makefile.toml +++ b/fendermint/testing/graph-test/Makefile.toml @@ -30,7 +30,7 @@ run_task = { name = [ [tasks.greeter-example] script = """ cd ${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}/fendermint -cargo run -p fendermint_eth_api --release --example greeter -- \ +cargo run -p fendermint_eth_api --example greeter -- \ --secret-key testing/graph-test/test-data/keys/veronica.sk \ --out ${TEST_DATA_DIR}/greeter.json \ ${VERBOSITY} diff --git a/fendermint/testing/materializer/Cargo.toml b/fendermint/testing/materializer/Cargo.toml index aa89ff08e..7808f15c1 100644 --- a/fendermint/testing/materializer/Cargo.toml +++ b/fendermint/testing/materializer/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "fendermint_testing_materializer" +name = "fendermint_materializer" description = "Toolkit to instantiate testnets" version = "0.1.0" authors.workspace = true @@ -14,8 +14,10 @@ async-recursion = { workspace = true } async-trait = { workspace = true } bollard = { workspace = true } ethers = { workspace = true } +futures = { workspace = true } fvm_shared = { workspace = true } hex = { workspace = true } +lazy_static = { workspace = true } multihash = { workspace = true } serde = { workspace = true } serde_with = { workspace = true } @@ -25,7 +27,6 @@ tendermint-rpc = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } -lazy_static = { workspace = true, optional = true } arbitrary = { workspace = true, optional = true } quickcheck = { workspace = true, optional = true } @@ -47,12 +48,13 @@ quickcheck_async = { workspace = true } quickcheck_macros = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } +serial_test = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } toml = { workspace = true } # Enable arb on self for tests. -fendermint_testing_materializer = { path = ".", features = ["arb"] } +fendermint_materializer = { path = ".", features = ["arb"] } fendermint_testing = { path = "..", features = ["golden"] } [features] @@ -60,7 +62,6 @@ default = [] arb = [ "arbitrary", "quickcheck", - "lazy_static", "fvm_shared/arb", "fendermint_testing/arb", "fendermint_vm_genesis/arb", diff --git a/fendermint/testing/materializer/src/docker/container.rs b/fendermint/testing/materializer/src/docker/container.rs index dc13c2a50..6a0106834 100644 --- a/fendermint/testing/materializer/src/docker/container.rs +++ b/fendermint/testing/materializer/src/docker/container.rs @@ -2,31 +2,53 @@ // SPDX-License-Identifier: Apache-2.0, MIT use anyhow::{anyhow, Context}; +use futures::StreamExt; use std::collections::HashMap; use bollard::{ - container::{ListContainersOptions, RemoveContainerOptions, StopContainerOptions}, + container::{ListContainersOptions, LogsOptions}, + secret::{ContainerInspectResponse, ContainerStateStatusEnum}, service::ContainerSummary, + Docker, }; -use super::{DockerConstruct, DockerWithDropHandle}; +use super::{ + dropper::{DropCommand, DropHandle}, + DockerConstruct, +}; /// Time to wait before killing the container if it doesn't want to stop. const KILL_TIMEOUT_SECS: i64 = 5; pub struct DockerContainer { - pub dh: DockerWithDropHandle, - pub container: DockerConstruct, + docker: Docker, + dropper: DropHandle, + container: DockerConstruct, } impl DockerContainer { + pub fn new(docker: Docker, dropper: DropHandle, container: DockerConstruct) -> Self { + Self { + docker, + dropper, + container, + } + } + + pub fn hostname(&self) -> &str { + &self.container.name + } + /// Get a container by name, if it exists. - pub async fn get(dh: &DockerWithDropHandle, name: String) -> anyhow::Result> { + pub async fn get( + docker: Docker, + dropper: DropHandle, + name: String, + ) -> anyhow::Result> { let mut filters = HashMap::new(); filters.insert("name".to_string(), vec![name.clone()]); - let containers: Vec = dh - .docker + let containers: Vec = docker .list_containers(Some(ListContainersOptions { all: true, filters, @@ -44,7 +66,8 @@ impl DockerContainer { .ok_or_else(|| anyhow!("docker container {name} has no id"))?; Ok(Some(Self { - dh: dh.clone(), + docker, + dropper, container: DockerConstruct { id, name, @@ -54,47 +77,78 @@ impl DockerContainer { } } } + + /// Start the container, unless it's already running. + pub async fn start(&self) -> anyhow::Result<()> { + let inspect: ContainerInspectResponse = self + .docker + .inspect_container(&self.container.id, None) + .await + .with_context(|| { + format!( + "failed to inspect container: {} ({})", + self.container.name, self.container.id, + ) + })?; + + // Idempotency; we could be re-running the materializer after it failed somewhere along testnet creation. + if let Some(ContainerStateStatusEnum::RUNNING) = inspect.state.and_then(|s| s.status) { + return Ok(()); + } + + eprintln!( + "STARTING CONTAINER: {} ({})", + self.container.name, self.container.id + ); + + self.docker + .start_container::<&str>(&self.container.id, None) + .await + .with_context(|| { + format!( + "failed to start container: {} ({})", + self.container.name, self.container.id + ) + })?; + + Ok(()) + } + + /// Simplistic way of collecting logs of containers used in the test, + /// mostly to debug build failures on CI. + pub async fn logs(&self) -> Vec { + let mut log_stream = self.docker.logs::<&str>( + &self.container.name, + Some(LogsOptions { + stdout: true, + stderr: true, + follow: false, + ..Default::default() + }), + ); + + let mut out = Vec::new(); + while let Some(Ok(log)) = log_stream.next().await { + out.push(log.to_string().trim().to_string()); + } + out + } } impl Drop for DockerContainer { fn drop(&mut self) { - if !self.container.external { - let container_name = self.container.name.clone(); - let docker = self.dh.docker.clone(); - self.dh.drop_handle.spawn(async move { - if let Err(e) = docker - .stop_container( - &container_name, - Some(StopContainerOptions { - t: KILL_TIMEOUT_SECS, - }), - ) - .await - { - tracing::error!( - error = e.to_string(), - container_name, - "failed to stop docker container" - ); - } - if let Err(e) = docker - .remove_container( - &container_name, - Some(RemoveContainerOptions { - force: true, - v: true, - link: true, - }), - ) - .await - { - tracing::error!( - error = e.to_string(), - container_name, - "failed to remove docker container" - ); - } - }); + if self.container.external { + return; + } + if self + .dropper + .send(DropCommand::DropContainer(self.container.name.clone())) + .is_err() + { + tracing::error!( + container_name = self.container.name, + "dropper no longer listening" + ); } } } diff --git a/fendermint/testing/materializer/src/docker/dropper.rs b/fendermint/testing/materializer/src/docker/dropper.rs new file mode 100644 index 000000000..279edce53 --- /dev/null +++ b/fendermint/testing/materializer/src/docker/dropper.rs @@ -0,0 +1,84 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use bollard::{ + container::{RemoveContainerOptions, StopContainerOptions}, + Docker, +}; + +/// Timeout before we kill the container if it doesn't want to stop. +const KILL_TIMEOUT_SECS: i64 = 5; + +/// Commands to destroy docker constructs when they go out of scope. +pub enum DropCommand { + DropNetwork(String), + DropContainer(String), +} + +pub type DropHandle = tokio::sync::mpsc::UnboundedSender; + +/// Start a background task to remove docker constructs. +/// +/// The loop will exit when all clones of the sender channel have been dropped. +pub fn start(docker: Docker) -> DropHandle { + let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel(); + + tokio::task::spawn(async move { + while let Some(cmd) = rx.recv().await { + match cmd { + DropCommand::DropNetwork(id) => { + eprintln!("dropping docker network {id}"); + if let Err(e) = docker.remove_network(&id).await { + eprintln!("failed to remove docker network: {e}"); + tracing::error!( + error = e.to_string(), + id, + "failed to remove docker network" + ); + } + } + DropCommand::DropContainer(id) => { + eprintln!("dropping docker container {id}"); + + if let Err(e) = docker + .stop_container( + &id, + Some(StopContainerOptions { + t: KILL_TIMEOUT_SECS, + }), + ) + .await + { + tracing::error!( + error = e.to_string(), + id, + "failed to stop docker container" + ); + } + + if let Err(e) = docker + .remove_container( + &id, + Some(RemoveContainerOptions { + force: true, + v: true, + ..Default::default() + }), + ) + .await + { + eprintln!("failed to remove container: {e}"); + + tracing::error!( + error = e.to_string(), + id, + "failed to remove docker container" + ); + } + } + } + } + }); + + tx +} diff --git a/fendermint/testing/materializer/src/docker/mod.rs b/fendermint/testing/materializer/src/docker/mod.rs index 97c8f4ea9..ae6cba86f 100644 --- a/fendermint/testing/materializer/src/docker/mod.rs +++ b/fendermint/testing/materializer/src/docker/mod.rs @@ -1,9 +1,14 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::Context; +use anyhow::{bail, Context}; use async_trait::async_trait; -use bollard::Docker; +use bollard::{ + container::{ListContainersOptions, RemoveContainerOptions}, + network::ListNetworksOptions, + secret::{ContainerSummary, Network}, + Docker, +}; use ethers::{ core::rand::{rngs::StdRng, SeedableRng}, types::H160, @@ -18,7 +23,7 @@ use fvm_shared::{bigint::Zero, econ::TokenAmount, version::NetworkVersion}; use ipc_api::subnet_id::SubnetID; use serde::{Deserialize, Serialize}; use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashMap}, path::{Path, PathBuf}, }; use tendermint_rpc::Url; @@ -34,9 +39,11 @@ use crate::{ }; mod container; +mod dropper; mod network; mod node; mod relayer; +mod runner; pub use network::DockerNetwork; pub use node::DockerNode; @@ -50,6 +57,16 @@ const DOCKER_ENTRY_FILE_NAME: &str = "docker-entry.sh"; const PORT_RANGE_START: u32 = 30000; const PORT_RANGE_SIZE: u32 = 100; +type Volumes = Vec<(PathBuf, &'static str)>; +type EnvVars = BTreeMap<&'static str, String>; + +#[macro_export] +macro_rules! env_vars { + ( $($key:literal => $value:expr),* $(,)? ) => { + BTreeMap::from([ $( ($key, $value.to_string()) ),* ]) + }; +} + pub struct DockerMaterials; impl Materials for DockerMaterials { @@ -64,6 +81,7 @@ impl Materials for DockerMaterials { } /// A thing constructed by docker. +#[derive(Debug, Clone)] pub struct DockerConstruct { /// Unique ID of the thing. pub id: String, @@ -74,31 +92,6 @@ pub struct DockerConstruct { pub external: bool, } -#[derive(Clone)] -pub struct DockerWithDropHandle { - /// Docker client. - pub docker: Docker, - /// Handle to a single threaded runtime to perform drop tasks. - pub drop_handle: tokio::runtime::Handle, -} - -impl DockerWithDropHandle { - /// Create with the handle of a given runtime. - pub fn from_runtime(docker: Docker, runtime: &tokio::runtime::Runtime) -> Self { - Self { - docker, - drop_handle: runtime.handle().clone(), - } - } - /// Create with the handle of the current runtime, for testing purposes. - pub fn from_current(docker: Docker) -> Self { - Self { - docker, - drop_handle: tokio::runtime::Handle::current(), - } - } -} - /// Allocated (inclusive) range we can use to expose containers' ports on the host. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DockerPortRange { @@ -143,7 +136,7 @@ pub struct DockerMaterializer { dir: PathBuf, rng: StdRng, docker: bollard::Docker, - drop_runtime: tokio::runtime::Runtime, + dropper: dropper::DropHandle, state: DockerMaterializerState, } @@ -155,10 +148,7 @@ impl DockerMaterializer { Docker::connect_with_local_defaults().context("failed to connect to Docker")?; // Create a runtime for the execution of drop tasks. - let drop_runtime = tokio::runtime::Builder::new_multi_thread() - .worker_threads(1) - .build() - .context("failed to create tokio Runtime")?; + let dropper = dropper::start(docker.clone()); // Read in the state if it exists, otherwise create a default one. let state = import_json(dir.join(STATE_JSON_FILE_NAME)) @@ -169,7 +159,7 @@ impl DockerMaterializer { dir: dir.into(), rng: StdRng::seed_from_u64(seed), docker, - drop_runtime, + dropper, state, }; @@ -179,8 +169,70 @@ impl DockerMaterializer { Ok(m) } - fn docker_with_drop_handle(&self) -> DockerWithDropHandle { - DockerWithDropHandle::from_runtime(self.docker.clone(), &self.drop_runtime) + /// Remove all traces of a testnet. + pub async fn remove(&mut self, testnet_name: &TestnetName) -> anyhow::Result<()> { + let testnet = testnet_name.path_string(); + + let mut filters = HashMap::new(); + filters.insert("label".to_string(), vec![format!("testnet={}", testnet)]); + + let containers: Vec = self + .docker + .list_containers(Some(ListContainersOptions { + all: true, + filters, + ..Default::default() + })) + .await + .context("failed to list docker containers")?; + + let ids = containers.into_iter().filter_map(|c| c.id); + + for id in ids { + eprintln!("removing docker container {id}"); + self.docker + .remove_container( + &id, + Some(RemoveContainerOptions { + force: true, + v: true, + ..Default::default() + }), + ) + .await + .with_context(|| format!("failed to remove container {id}"))?; + } + + let mut filters = HashMap::new(); + filters.insert("name".to_string(), vec![testnet]); + + let networks: Vec = self + .docker + .list_networks(Some(ListNetworksOptions { filters })) + .await + .context("failed to list networks")?; + + let ids = networks.into_iter().filter_map(|n| n.id); + + for id in ids { + eprintln!("removing docker network {id}"); + self.docker + .remove_network(&id) + .await + .context("failed to remove network")?; + } + + let dir = self.dir.join(testnet_name.path()); + if let Err(e) = std::fs::remove_dir_all(&dir) { + if !e.to_string().contains("No such file") { + bail!( + "failed to remove testnet directory {}: {e:?}", + dir.to_string_lossy() + ); + } + }; + + Ok(()) } /// Path to a directory based on a resource name. @@ -274,7 +326,12 @@ impl Materializer for DockerMaterializer { &mut self, testnet_name: &TestnetName, ) -> anyhow::Result<::Network> { - DockerNetwork::get_or_create(self.docker_with_drop_handle(), testnet_name.clone()).await + DockerNetwork::get_or_create( + self.docker.clone(), + self.dropper.clone(), + testnet_name.clone(), + ) + .await } /// Create a new key-value pair, or return an existing one. @@ -337,13 +394,13 @@ impl Materializer for DockerMaterializer { balances: BTreeMap<&'a DefaultAccount, Balance>, ) -> anyhow::Result { self.get_or_create_genesis(subnet_name, || { - let chain_name = subnet_name.path().to_string_lossy().to_string(); + let chain_name = subnet_name.path_string(); let chain_id = chainid::from_str_hashed(&chain_name)?; // TODO: Some of these hardcoded values can go into the manifest. let genesis = Genesis { chain_name, timestamp: Timestamp::current(), - network_version: NetworkVersion::MAX, + network_version: NetworkVersion::V21, base_fee: TokenAmount::zero(), power_scale: 3, validators: validators @@ -366,7 +423,8 @@ impl Materializer for DockerMaterializer { ipc: Some(IpcParams { gateway: GatewayParams { subnet_id: SubnetID::new_root(chain_id.into()), - bottom_up_check_period: 0, + // TODO: The gateway constructor doesn't allow 0 bottom-up-checkpoint-period even on the rootnet! + bottom_up_check_period: 1, majority_percentage: 67, active_validators_limit: 100, }, @@ -396,7 +454,8 @@ impl Materializer for DockerMaterializer { // and run three different containers. DockerNode::get_or_create( &self.dir, - self.docker_with_drop_handle(), + self.docker.clone(), + self.dropper.clone(), node_name, node_config, port_range, @@ -413,8 +472,8 @@ impl Materializer for DockerMaterializer { where 's: 'a, { - // Overwrite the env file which has seed addresses. - todo!("docker-compose up") + // Overwrite the env file which has seed addresses, then start the node (unless it's already running). + node.start(seed_nodes).await } async fn create_subnet<'s, 'a>( diff --git a/fendermint/testing/materializer/src/docker/network.rs b/fendermint/testing/materializer/src/docker/network.rs index 40c5cd3ad..ca2e59863 100644 --- a/fendermint/testing/materializer/src/docker/network.rs +++ b/fendermint/testing/materializer/src/docker/network.rs @@ -7,14 +7,19 @@ use anyhow::{anyhow, Context}; use bollard::{ network::{CreateNetworkOptions, ListNetworksOptions}, service::{Network, NetworkCreateResponse}, + Docker, }; use crate::TestnetName; -use super::{DockerConstruct, DockerWithDropHandle}; +use super::{ + dropper::{DropCommand, DropHandle}, + DockerConstruct, +}; pub struct DockerNetwork { - dh: DockerWithDropHandle, + docker: Docker, + dropper: DropHandle, /// There is a single docker network created for the entire testnet. testnet_name: TestnetName, network: DockerConstruct, @@ -25,31 +30,30 @@ impl DockerNetwork { &self.testnet_name } - pub fn network(&self) -> &DockerConstruct { - &self.network + pub fn network_name(&self) -> &str { + &self.network.name } /// Check if an externally managed network already exists; /// if not, create a new docker network for the testnet. pub async fn get_or_create( - dh: DockerWithDropHandle, + docker: Docker, + dropper: DropHandle, testnet_name: TestnetName, ) -> anyhow::Result { - let network_name = testnet_name.path().to_string_lossy().to_string(); + let network_name = testnet_name.path_string(); let mut filters = HashMap::new(); filters.insert("name".to_string(), vec![network_name.clone()]); - let networks: Vec = dh - .docker + let networks: Vec = docker .list_networks(Some(ListNetworksOptions { filters })) .await .context("failed to list docker networks")?; let (id, external) = match networks.first() { None => { - let network: NetworkCreateResponse = dh - .docker + let network: NetworkCreateResponse = docker .create_network(CreateNetworkOptions { name: network_name.clone(), ..Default::default() @@ -75,7 +79,8 @@ impl DockerNetwork { }; Ok(Self { - dh, + docker, + dropper, testnet_name, network: DockerConstruct { id, @@ -88,18 +93,18 @@ impl DockerNetwork { impl Drop for DockerNetwork { fn drop(&mut self) { - if !self.network.external { - let network_name = self.network.name.clone(); - let docker = self.dh.docker.clone(); - self.dh.drop_handle.spawn(async move { - if let Err(e) = docker.remove_network(&network_name).await { - tracing::error!( - error = e.to_string(), - network_name, - "failed to remove docker network" - ); - } - }); + if self.network.external { + return; + } + if self + .dropper + .send(DropCommand::DropNetwork(self.network.name.clone())) + .is_err() + { + tracing::error!( + network_name = self.network.name, + "dropper no longer listening" + ); } } } @@ -110,20 +115,20 @@ mod tests { use std::time::Duration; use super::DockerNetwork; - use crate::{docker::DockerWithDropHandle, TestnetName}; + use crate::{docker::dropper, TestnetName}; #[tokio::test] async fn test_network() { let tn = TestnetName::new("test-network"); let docker = Docker::connect_with_local_defaults().expect("failed to connect to docker"); - let dh = DockerWithDropHandle::from_current(docker.clone()); + let dropper = dropper::start(docker.clone()); - let n1 = DockerNetwork::get_or_create(dh.clone(), tn.clone()) + let n1 = DockerNetwork::get_or_create(docker.clone(), dropper.clone(), tn.clone()) .await .expect("failed to create network"); - let n2 = DockerNetwork::get_or_create(dh.clone(), tn.clone()) + let n2 = DockerNetwork::get_or_create(docker.clone(), dropper.clone(), tn.clone()) .await .expect("failed to get network"); diff --git a/fendermint/testing/materializer/src/docker/node.rs b/fendermint/testing/materializer/src/docker/node.rs index 71df4e928..1863171cd 100644 --- a/fendermint/testing/materializer/src/docker/node.rs +++ b/fendermint/testing/materializer/src/docker/node.rs @@ -2,29 +2,27 @@ // SPDX-License-Identifier: Apache-2.0, MIT use std::{ - collections::{BTreeMap, HashMap}, + collections::BTreeMap, os::unix::fs::MetadataExt, path::{Path, PathBuf}, + str::FromStr, }; -use anyhow::{anyhow, Context}; -use bollard::{ - container::{Config, CreateContainerOptions, RemoveContainerOptions}, - service::{HostConfig, PortBinding}, - Docker, -}; +use anyhow::{anyhow, bail, Context}; +use bollard::Docker; use ethers::types::H160; use lazy_static::lazy_static; use super::{ - container::DockerContainer, DockerConstruct, DockerMaterials, DockerNetwork, DockerPortRange, - DockerWithDropHandle, + container::DockerContainer, dropper::DropHandle, runner::DockerRunner, DockerMaterials, + DockerPortRange, EnvVars, Volumes, }; use crate::{ docker::DOCKER_ENTRY_FILE_NAME, + env_vars, materializer::{NodeConfig, TargetConfig}, materials::export_file, - NodeName, ResourceHash, + HasCometBftApi, HasEthApi, NodeName, ResourceHash, }; // TODO: Add these to the materializer. @@ -42,6 +40,9 @@ const STATIC_ENV: &str = "static.env"; /// These go into a separate file just so it's easy to recreate them. const DYNAMIC_ENV: &str = "dynamic.env"; +const COMETBFT_NODE_ID: &str = "cometbft-node-id"; +const FENDERMINT_PEER_ID: &str = "fendermint-peer-id"; + const RESOLVER_P2P_PORT: u32 = 26655; const COMETBFT_P2P_PORT: u32 = 26656; const COMETBFT_RPC_PORT: u32 = 26657; @@ -54,15 +55,6 @@ lazy_static! { static ref DOCKER_ENTRY_PATH: String = format!("/opt/docker/{DOCKER_ENTRY_FILE_NAME}"); } -type EnvVars = BTreeMap<&'static str, String>; -type Volumes = Vec<(PathBuf, &'static str)>; - -macro_rules! env_vars { - ( $($key:literal => $value:expr),* $(,)? ) => { - BTreeMap::from([ $( ($key, $value.to_string()) ),* ]) - }; -} - /// A Node consists of multiple docker containers. pub struct DockerNode { /// Logical name of the node in the subnet hierarchy. @@ -79,7 +71,8 @@ pub struct DockerNode { impl DockerNode { pub async fn get_or_create<'a>( root: impl AsRef, - dh: DockerWithDropHandle, + docker: Docker, + dropper: DropHandle, node_name: &NodeName, node_config: NodeConfig<'a, DockerMaterials>, port_range: DockerPortRange, @@ -88,9 +81,12 @@ impl DockerNode { let cometbft_name = container_name(node_name, "cometbft"); let ethapi_name = container_name(node_name, "ethapi"); - let fendermint = DockerContainer::get(&dh, fendermint_name.clone()).await?; - let cometbft = DockerContainer::get(&dh, cometbft_name.clone()).await?; - let ethapi = DockerContainer::get(&dh, ethapi_name.clone()).await?; + let fendermint = + DockerContainer::get(docker.clone(), dropper.clone(), fendermint_name.clone()).await?; + let cometbft = + DockerContainer::get(docker.clone(), dropper.clone(), cometbft_name.clone()).await?; + let ethapi = + DockerContainer::get(docker.clone(), dropper.clone(), ethapi_name.clone()).await?; // Directory for the node's data volumes let node_dir = root.as_ref().join(node_name); @@ -99,6 +95,17 @@ impl DockerNode { // Get the current user ID to use with docker containers. let user = node_dir.metadata()?.uid(); + let make_runner = |image, volumes| { + DockerRunner::new( + docker.clone(), + dropper.clone(), + node_name.clone(), + user, + image, + volumes, + ) + }; + // Create a directory for keys let keys_dir = node_dir.join("keys"); if !keys_dir.exists() { @@ -109,79 +116,96 @@ impl DockerNode { let cometbft_dir = node_dir.join("cometbft"); if !cometbft_dir.exists() { std::fs::create_dir(&cometbft_dir)?; + } - // We'll need to run some cometbft and fendermint commands. - // NOTE: Currently the Fendermint CLI commands live in the - // `app` crate in a way that they can't be imported. We - // could move them to the `lib.rs` from `main.rs` and - // then we wouldn't need docker for some of these steps. - // However, at least this way they are tested. - - let cometbft_runner = DockerRunner::new( - &dh, - node_name, - COMETBFT_IMAGE, - user, - vec![(cometbft_dir.clone(), "/cometbft")], - ); - - let fendermint_runner = DockerRunner::new( - &dh, - node_name, - FENDERMINT_IMAGE, - user, - vec![ - (keys_dir.clone(), "/fendermint/keys"), - (cometbft_dir.clone(), "/cometbft"), - (node_config.genesis.path.clone(), "/fendermint/genesis.json"), - ], - ); + // Create a directory for fendermint + let fendermint_dir = node_dir.join("fendermint"); + if !fendermint_dir.exists() { + std::fs::create_dir(&fendermint_dir)?; + std::fs::create_dir(fendermint_dir.join("data"))?; + std::fs::create_dir(fendermint_dir.join("logs"))?; + std::fs::create_dir(fendermint_dir.join("snapshots"))?; + } + // We'll need to run some cometbft and fendermint commands. + // NOTE: Currently the Fendermint CLI commands live in the + // `app` crate in a way that they can't be imported. We + // could move them to the `lib.rs` from `main.rs` and + // then we wouldn't need docker for some of these steps. + // However, at least this way they are tested. + + let cometbft_runner = + make_runner(COMETBFT_IMAGE, vec![(cometbft_dir.clone(), "/cometbft")]); + + let fendermint_runner = make_runner( + FENDERMINT_IMAGE, + vec![ + (keys_dir.clone(), "/fendermint/keys"), + (cometbft_dir.clone(), "/cometbft"), + (node_config.genesis.path.clone(), "/fendermint/genesis.json"), + ], + ); + + // Only run init once, just in case it would overwrite previous values. + if !cometbft_dir.join("config").exists() { // Init cometbft to establish the network key. cometbft_runner .run_cmd("init") .await .context("cannot init cometbft")?; + } - // Convert fendermint genesis to cometbft. - fendermint_runner + // Capture the cometbft node identity. + let cometbft_node_id = cometbft_runner + .run_cmd("show-node-id") + .await + .context("cannot show node ID")? + .into_iter() + .last() + .ok_or_else(|| anyhow!("empty cometbft node ID")) + .and_then(parse_cometbft_node_id)?; + + export_file(keys_dir.join(COMETBFT_NODE_ID), cometbft_node_id)?; + + // Convert fendermint genesis to cometbft. + fendermint_runner .run_cmd( - "genesis --genesis-file /fendermint/genesis.json \ - into-tendermint --out /cometbft/config/genesis.json", + "genesis --genesis-file /fendermint/genesis.json into-tendermint --out /cometbft/config/genesis.json", ) .await .context("failed to convert genesis")?; - // Convert validator private key to cometbft. - if let Some(v) = node_config.validator { - let validator_key_path = v.secret_key_path(); - std::fs::copy(validator_key_path, keys_dir.join("validator_key.sk")) - .context("failed to copy validator key")?; + // Convert validator private key to cometbft. + if let Some(v) = node_config.validator { + let validator_key_path = v.secret_key_path(); + std::fs::copy(validator_key_path, keys_dir.join("validator_key.sk")) + .context("failed to copy validator key")?; - fendermint_runner + fendermint_runner .run_cmd( - "key into-tendermint --secret-key /fendermint/keys/validator_key.sk \ - --out cometbft/config/priv_validator_key.json", + "key into-tendermint --secret-key /fendermint/keys/validator_key.sk --out /cometbft/config/priv_validator_key.json", ) .await .context("failed to convert validator key")?; - } - - // Create a network key for the resolver. - fendermint_runner - .run_cmd("key gen --out-dir /fendermint/keys --name network_key") - .await - .context("failed to create network key")?; } - // Create a directory for fendermint - let fendermint_dir = node_dir.join("fendermint"); - if !fendermint_dir.exists() { - std::fs::create_dir(&fendermint_dir)?; - std::fs::create_dir(fendermint_dir.join("data"))?; - std::fs::create_dir(fendermint_dir.join("logs"))?; - std::fs::create_dir(fendermint_dir.join("snapshots"))?; - } + // Create a network key for the resolver. + fendermint_runner + .run_cmd("key gen --out-dir /fendermint/keys --name network_key") + .await + .context("failed to create network key")?; + + // Capture the fendermint node identity. + let fendermint_peer_id = fendermint_runner + .run_cmd("key show-peer-id --public-key /fendermint/keys/network_key.pk") + .await + .context("cannot show peer ID")? + .into_iter() + .last() + .ok_or_else(|| anyhow!("empty fendermint peer ID")) + .and_then(parse_fendermint_peer_id)?; + + export_file(keys_dir.join(FENDERMINT_PEER_ID), fendermint_peer_id)?; // If there is no static env var file, create one with all the common variables. let static_env = node_dir.join(STATIC_ENV); @@ -193,21 +217,25 @@ impl DockerNode { .ok_or_else(|| anyhow!("ipc config missing"))?; let resolver_host_port: u32 = port_range.from; + let network = match fvm_shared::address::current_network() { + fvm_shared::address::Network::Mainnet => "mainnet", + fvm_shared::address::Network::Testnet => "testnet", + }; let mut env: EnvVars = env_vars![ "LOG_LEVEL" => "info", "RUST_BACKTRACE" => 1, - "FM_NETWORK " => "testnet", + "FM_NETWORK" => network, "FM_DATA_DIR" => "/fendermint/data", "FM_LOG_DIR" => "/fendermint/logs", "FM_SNAPSHOTS_DIR" => "/fendermint/snapshots", "FM_CHAIN_NAME" => genesis.chain_name.clone(), "FM_IPC_SUBNET_ID" => ipc.gateway.subnet_id, "FM_RESOLVER__NETWORK__LOCAL_KEY" => "/fendermint/keys/network_key.sk", - "FM_RESOLVER__CONNECTION__LISTEN_ADDR" => format!("/ip4/0.0.0.0/tcp/${RESOLVER_P2P_PORT}"), - "FM_TENDERMINT_RPC_URL" => format!("http://${cometbft_name}:{COMETBFT_RPC_PORT}"), - "TENDERMINT_RPC_URL" => format!("http://${cometbft_name}:{COMETBFT_RPC_PORT}"), - "TENDERMINT_WS_URL" => format!("ws://${cometbft_name}:{COMETBFT_RPC_PORT}/websocket"), + "FM_RESOLVER__CONNECTION__LISTEN_ADDR" => format!("/ip4/0.0.0.0/tcp/{RESOLVER_P2P_PORT}"), + "FM_TENDERMINT_RPC_URL" => format!("http://{cometbft_name}:{COMETBFT_RPC_PORT}"), + "TENDERMINT_RPC_URL" => format!("http://{cometbft_name}:{COMETBFT_RPC_PORT}"), + "TENDERMINT_WS_URL" => format!("ws://{cometbft_name}:{COMETBFT_RPC_PORT}/websocket"), "FM_ABCI__LISTEN__PORT" => FENDERMINT_ABCI_PORT, "FM_ETH__LISTEN__PORT" => ETHAPI_RPC_PORT, ]; @@ -245,7 +273,7 @@ impl DockerNode { })?; env_vars![ "FM_IPC__TOPDOWN__CHAIN_HEAD_DELAY" => 1, - "FM_IPC__TOPDOWN__PARENT_HTTP_ENDPOINT" => format!("http://{}:{ETHAPI_RPC_PORT}", parent_ethapi.container.name), + "FM_IPC__TOPDOWN__PARENT_HTTP_ENDPOINT" => format!("http://{}:{ETHAPI_RPC_PORT}", parent_ethapi.hostname()), "FM_IPC__TOPDOWN__PARENT_REGISTRY" => registry, "FM_IPC__TOPDOWN__PARENT_GATEWAY" => gateway, "FM_IPC__TOPDOWN__EXPONENTIAL_BACK_OFF" => 5, @@ -274,8 +302,6 @@ impl DockerNode { let dynamic_env = node_dir.join(DYNAMIC_ENV); if !dynamic_env.exists() { // The values will be assigned when the node is started. - // --env FM_RESOLVER__DISCOVERY__STATIC_ADDRESSES=${RESOLVER_BOOTSTRAPS} - // --env CMT_P2P_SEEDS export_env(&dynamic_env, &Default::default())?; } @@ -294,21 +320,20 @@ impl DockerNode { // Wrap an entry point with the docker entry script. let entrypoint = |ep: &str| { - format!( - "{} '{ep}' {} {}", - *DOCKER_ENTRY_PATH, *STATIC_ENV_PATH, *DYNAMIC_ENV_PATH - ) + vec![ + DOCKER_ENTRY_PATH.to_string(), + ep.to_string(), + STATIC_ENV_PATH.to_string(), + DYNAMIC_ENV_PATH.to_string(), + ] }; // Create a fendermint container mounting: let fendermint = match fendermint { Some(c) => c, None => { - let creator = DockerRunner::new( - &dh, - node_name, + let creator = make_runner( FENDERMINT_IMAGE, - user, volumes(vec![ (keys_dir.clone(), "/fendermint/keys"), (fendermint_dir.join("data"), "/fendermint/data"), @@ -333,11 +358,8 @@ impl DockerNode { let cometbft = match cometbft { Some(c) => c, None => { - let creator = DockerRunner::new( - &dh, - node_name, + let creator = make_runner( COMETBFT_IMAGE, - user, volumes(vec![(cometbft_dir.clone(), "/cometbft")]), ); @@ -359,8 +381,7 @@ impl DockerNode { // Create a ethapi container let ethapi = match ethapi { None if node_config.ethapi => { - let creator = - DockerRunner::new(&dh, node_name, FENDERMINT_IMAGE, user, volumes(vec![])); + let creator = make_runner(FENDERMINT_IMAGE, volumes(vec![])); let c = creator .create( @@ -387,6 +408,82 @@ impl DockerNode { path: node_dir, }) } + + pub async fn start(&self, seed_nodes: &[&Self]) -> anyhow::Result<()> { + let cometbft_seeds = collect_seeds(seed_nodes, |n| { + let host = &n.cometbft.hostname(); + let id = n.cometbft_node_id()?; + Ok(format!("{id}@{host}:{COMETBFT_P2P_PORT}")) + })?; + + let resolver_seeds = collect_seeds(seed_nodes, |n| { + let host = &n.fendermint.hostname(); + let id = n.fendermint_peer_id()?; + Ok(format!("/dns/{host}/tcp/{RESOLVER_P2P_PORT}/p2p/{id}")) + })?; + + let env = env_vars! [ + "CMT_P2P_SEEDS" => cometbft_seeds, + "FM_RESOLVER__DISCOVERY__STATIC_ADDRESSES" => resolver_seeds, + ]; + + export_env(self.path.join(DYNAMIC_ENV), &env)?; + + // Start all three containers. + self.fendermint.start().await?; + self.cometbft.start().await?; + if let Some(ref ethapi) = self.ethapi { + ethapi.start().await?; + } + + Ok(()) + } + + /// Read the CometBFT node ID from the file we persisted during creation. + pub fn cometbft_node_id(&self) -> anyhow::Result { + read_file(self.path.join("keys").join(COMETBFT_NODE_ID)) + } + + /// Read the libp2p peer ID from the file we persisted during creation. + pub fn fendermint_peer_id(&self) -> anyhow::Result { + read_file(self.path.join("keys").join(FENDERMINT_PEER_ID)) + } + + pub async fn fendermint_logs(&self) -> Vec { + self.fendermint.logs().await + } + + pub async fn cometbft_logs(&self) -> Vec { + self.cometbft.logs().await + } + + pub async fn ethapi_logs(&self) -> Vec { + match self.ethapi { + None => Vec::new(), + Some(ref c) => c.logs().await, + } + } +} + +impl HasEthApi for DockerNode { + fn ethapi_http_endpoint(&self) -> Option { + self.ethapi.as_ref().map(|_| { + format!( + "http://127.0.0.1:{}", + self.port_range.ethapi_rpc_host_port() + ) + }) + } +} + +impl HasCometBftApi for DockerNode { + fn cometbft_http_endpoint(&self) -> tendermint_rpc::Url { + tendermint_rpc::Url::from_str(&format!( + "http://127.0.0.1:{}", + self.port_range.cometbft_rpc_host_port() + )) + .unwrap() + } } /// Create a container name from a node name and a logical container name, e.g. "cometbft" @@ -401,176 +498,108 @@ fn container_name(node_name: &NodeName, container: &str) -> String { .unwrap_or_default() .to_string_lossy() .to_string(); - let hash = ResourceHash::digest(node_name.path().to_string_lossy().to_string()); + let hash = ResourceHash::digest(node_name.path_string()); let hash = hash.to_string(); let hash = &hash.as_str()[..6]; format!("{node_id}-{container}-{}", hash) } -struct DockerRunner<'a> { - dh: &'a DockerWithDropHandle, - node_name: NodeName, - image: String, - user: u32, - volumes: Volumes, +/// Collect comma separated values from seeds nodes. +fn collect_seeds(seed_nodes: &[&DockerNode], f: F) -> anyhow::Result +where + F: Fn(&DockerNode) -> anyhow::Result, +{ + let ss = seed_nodes + .iter() + .map(|n| f(n)) + .collect::>>() + .context("failed to collect seeds")?; + + Ok(ss.join(",")) } -impl<'a> DockerRunner<'a> { - pub fn new( - dh: &'a DockerWithDropHandle, - node_name: &NodeName, - image: &str, - user: u32, - volumes: Volumes, - ) -> Self { - Self { - dh, - node_name: node_name.clone(), - image: image.to_string(), - user, - volumes, - } - } +fn export_env(file_path: impl AsRef, env: &EnvVars) -> anyhow::Result<()> { + let env = env + .iter() + .map(|(k, v)| format!("{k}={v}")) + .collect::>(); + + export_file(file_path, env.join("\n")) +} + +fn read_file(file_path: impl AsRef) -> anyhow::Result { + std::fs::read_to_string(&file_path) + .with_context(|| format!("failed to read {}", file_path.as_ref().to_string_lossy())) +} - fn docker(&self) -> &Docker { - &self.dh.docker +fn parse_cometbft_node_id(value: impl AsRef) -> anyhow::Result { + let value = value.as_ref().trim().to_string(); + if hex::decode(&value).is_err() { + bail!("failed to parse CometBFT node ID: {value}"); } + Ok(value) +} - // Tag containers with resource names. - fn labels(&self) -> HashMap { - [ - ("testnet", self.node_name.testnet().path()), - ("node", self.node_name.path()), - ] - .into_iter() - .map(|(n, p)| (n.to_string(), p.to_string_lossy().to_string())) - .collect() +/// libp2p peer ID is base58 encoded. +fn parse_fendermint_peer_id(value: impl AsRef) -> anyhow::Result { + let value = value.as_ref().trim().to_string(); + // We could match the regex + if value.len() != 53 { + bail!("failed to parse Fendermint peer ID: {value}"); } + Ok(value) +} - /// Run a short lived container. - pub async fn run_cmd(&self, cmd: &str) -> anyhow::Result<()> { - let config = Config { - image: Some(self.image.clone()), - user: Some(self.user.to_string()), - cmd: Some(vec![cmd.to_string()]), - attach_stderr: Some(true), - attach_stdout: Some(true), - labels: Some(self.labels()), - host_config: Some(HostConfig { - auto_remove: Some(true), - init: Some(true), - binds: Some( - self.volumes - .iter() - .map(|(h, c)| format!("{}:{c}", h.to_string_lossy())) - .collect(), - ), - ..Default::default() - }), - ..Default::default() - }; +#[cfg(test)] +mod tests { + use super::{DockerRunner, COMETBFT_IMAGE}; + use crate::{ + docker::{dropper, node::parse_cometbft_node_id}, + TestnetName, + }; + use bollard::Docker; - let id = self - .docker() - .create_container::<&str, _>(None, config) - .await - .context("failed to create container")? - .id; + fn make_runner() -> DockerRunner { + let nn = TestnetName::new("test-network").root().node("test-node"); + let docker = Docker::connect_with_local_defaults().expect("failed to connect to docker"); + let dropper = dropper::start(docker.clone()); + DockerRunner::new(docker, dropper, nn, 0, COMETBFT_IMAGE, Vec::new()) + } - self.docker() - .start_container::<&str>(&id, None) + #[tokio::test] + async fn test_docker_run_output() { + let runner = make_runner(); + // Based on my manual testing, this will initialise the config and then show the ID: + // `docker run --rm cometbft/cometbft:v0.37.x show-node-id` + let logs = runner + .run_cmd("show-node-id") .await - .context("failed to start container")?; + .expect("failed to show ID"); - // TODO: Output? + assert!(!logs.is_empty()); - self.docker() - .remove_container( - &id, - Some(RemoveContainerOptions { - force: true, - ..Default::default() - }), - ) - .await?; - - Ok(()) + assert!( + parse_cometbft_node_id(logs.last().unwrap()).is_ok(), + "last line is a node ID" + ); } - /// Create a container to be started later. - pub async fn create( - &self, - name: String, - network: &DockerNetwork, - // Host <-> Container port mappings - ports: Vec<(u32, u32)>, - entrypoint: String, - ) -> anyhow::Result { - let config = Config { - hostname: Some(name.clone()), - image: Some(self.image.clone()), - user: Some(self.user.to_string()), - entrypoint: Some(vec![entrypoint]), - cmd: None, - host_config: Some(HostConfig { - init: Some(true), - binds: Some( - self.volumes - .iter() - .map(|(h, c)| format!("{}:{c}", h.to_string_lossy())) - .collect(), - ), - port_bindings: Some( - ports - .into_iter() - .flat_map(|(h, c)| { - let binding = PortBinding { - host_ip: None, - host_port: Some(h.to_string()), - }; - // Emitting both TCP and UDP, just in case. - vec![ - (format!("{c}/tcp"), Some(vec![binding.clone()])), - (format!("{c}/udp"), Some(vec![binding])), - ] - }) - .collect(), - ), - network_mode: Some(network.network().name.clone()), - ..Default::default() - }), - ..Default::default() - }; + #[tokio::test] + async fn test_docker_run_error() { + let runner = make_runner(); - let id = self - .docker() - .create_container::( - Some(CreateContainerOptions { - name: name.clone(), - ..Default::default() - }), - config, - ) + let _err = runner + .run_cmd("show-peer-id") .await - .context("failed to create container")? - .id; - - Ok(DockerContainer { - dh: self.dh.clone(), - container: DockerConstruct { - id, - name, - external: false, - }, - }) + .expect_err("wrong command should fail"); } -} - -fn export_env(file_path: impl AsRef, env: &EnvVars) -> anyhow::Result<()> { - let env = env - .iter() - .map(|(k, v)| format!("{k}={v}")) - .collect::>(); - export_file(file_path, env.join("\n")) + #[test] + fn test_valid_cometbft_id() { + assert!( + parse_cometbft_node_id("eb9470dd3bfa7311f1de3f3d3d69a628531adcfe").is_ok(), + "sample ID is valid" + ); + assert!(parse_cometbft_node_id("I[2024-02-23|14:20:21.724] Generated genesis file module=main path=/cometbft/config/genesis.json").is_err(), "logs aren't valid"); + } } diff --git a/fendermint/testing/materializer/src/docker/runner.rs b/fendermint/testing/materializer/src/docker/runner.rs new file mode 100644 index 000000000..40052337d --- /dev/null +++ b/fendermint/testing/materializer/src/docker/runner.rs @@ -0,0 +1,241 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use anyhow::{bail, Context}; +use bollard::{ + container::{ + AttachContainerOptions, AttachContainerResults, Config, CreateContainerOptions, + RemoveContainerOptions, + }, + network::ConnectNetworkOptions, + secret::{ContainerInspectResponse, HostConfig, PortBinding}, + Docker, +}; +use futures::StreamExt; + +use crate::NodeName; + +use super::{ + container::DockerContainer, dropper::DropHandle, DockerConstruct, DockerNetwork, Volumes, +}; + +pub struct DockerRunner { + docker: Docker, + dropper: DropHandle, + node_name: NodeName, + user: u32, + image: String, + volumes: Volumes, +} + +impl DockerRunner { + pub fn new( + docker: Docker, + dropper: DropHandle, + node_name: NodeName, + user: u32, + image: &str, + volumes: Volumes, + ) -> Self { + Self { + docker, + dropper, + node_name, + user, + image: image.to_string(), + volumes, + } + } + + // Tag containers with resource names. + fn labels(&self) -> HashMap { + [ + ("testnet", self.node_name.testnet().path()), + ("node", self.node_name.path()), + ] + .into_iter() + .map(|(n, p)| (n.to_string(), p.to_string_lossy().to_string())) + .collect() + } + + /// Run a short lived container. + pub async fn run_cmd(&self, cmd: &str) -> anyhow::Result> { + let cmdv = cmd.split(' ').map(|s| s.to_string()).collect(); + let config = Config { + image: Some(self.image.clone()), + user: Some(self.user.to_string()), + cmd: Some(cmdv), + attach_stderr: Some(true), + attach_stdout: Some(true), + tty: Some(true), + labels: Some(self.labels()), + host_config: Some(HostConfig { + // We'll remove it explicitly at the end after collecting the output. + auto_remove: Some(false), + init: Some(true), + binds: Some( + self.volumes + .iter() + .map(|(h, c)| format!("{}:{c}", h.to_string_lossy())) + .collect(), + ), + ..Default::default() + }), + ..Default::default() + }; + + let id = self + .docker + .create_container::<&str, _>(None, config) + .await + .context("failed to create container")? + .id; + + let AttachContainerResults { mut output, .. } = self + .docker + .attach_container::( + &id, + Some(AttachContainerOptions { + stdout: Some(true), + stderr: Some(true), + stream: Some(true), + ..Default::default() + }), + ) + .await + .context("failed to attach to container")?; + + self.docker + .start_container::<&str>(&id, None) + .await + .context("failed to start container")?; + + // Collect docker attach output + let mut out = Vec::new(); + while let Some(Ok(output)) = output.next().await { + out.push(output.to_string()); + } + + eprintln!("NODE: {}", self.node_name); + eprintln!("CMD: {cmd}"); + for o in out.iter() { + eprint!("OUT: {o}"); + } + eprintln!("---"); + + let inspect: ContainerInspectResponse = self + .docker + .inspect_container(&id, None) + .await + .context("failed to inspect container")?; + + self.docker + .remove_container( + &id, + Some(RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await?; + + if let Some(state) = inspect.state { + let exit_code = state.exit_code.unwrap_or_default(); + if exit_code != 0 { + bail!( + "ctonainer exited with code {exit_code}: {}", + state.error.unwrap_or_default() + ); + } + } + + Ok(out) + } + + /// Create a container to be started later. + pub async fn create( + &self, + name: String, + network: &DockerNetwork, + // Host <-> Container port mappings + ports: Vec<(u32, u32)>, + entrypoint: Vec, + ) -> anyhow::Result { + let config = Config { + hostname: Some(name.clone()), + image: Some(self.image.clone()), + user: Some(self.user.to_string()), + entrypoint: Some(entrypoint), + labels: Some(self.labels()), + cmd: None, + host_config: Some(HostConfig { + init: Some(true), + binds: Some( + self.volumes + .iter() + .map(|(h, c)| format!("{}:{c}", h.to_string_lossy())) + .collect(), + ), + port_bindings: Some( + ports + .into_iter() + .flat_map(|(h, c)| { + let binding = PortBinding { + host_ip: None, + host_port: Some(h.to_string()), + }; + // Emitting both TCP and UDP, just in case. + vec![ + (format!("{c}/tcp"), Some(vec![binding.clone()])), + (format!("{c}/udp"), Some(vec![binding])), + ] + }) + .collect(), + ), + ..Default::default() + }), + ..Default::default() + }; + + let id = self + .docker + .create_container::( + Some(CreateContainerOptions { + name: name.clone(), + ..Default::default() + }), + config, + ) + .await + .context("failed to create container")? + .id; + + eprintln!("NODE: {}", self.node_name); + eprintln!("CREATED CONTAINER: {} ({})", name, id); + eprintln!("---"); + + // host_config.network_mode should work as well. + self.docker + .connect_network( + network.network_name(), + ConnectNetworkOptions { + container: id.clone(), + ..Default::default() + }, + ) + .await + .context("failed to connect container to network")?; + + Ok(DockerContainer::new( + self.docker.clone(), + self.dropper.clone(), + DockerConstruct { + id, + name, + external: false, + }, + )) + } +} diff --git a/fendermint/testing/materializer/src/lib.rs b/fendermint/testing/materializer/src/lib.rs index f7bd85865..ccddc91ae 100644 --- a/fendermint/testing/materializer/src/lib.rs +++ b/fendermint/testing/materializer/src/lib.rs @@ -1,3 +1,4 @@ +use ethers::providers::{Http, Provider}; // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT use multihash::MultihashDigest; @@ -98,6 +99,10 @@ impl ResourceName { pub fn is_prefix_of(&self, other: &ResourceName) -> bool { other.0.starts_with(&self.0) } + + pub fn path_string(&self) -> String { + self.0.to_string_lossy().to_string() + } } impl From<&str> for ResourceName { @@ -120,13 +125,17 @@ impl Debug for ResourceName { macro_rules! resource_name { ($name:ident) => { - #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] + #[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct $name(ResourceName); impl $name { pub fn path(&self) -> &Path { &self.0 .0 } + + pub fn path_string(&self) -> String { + self.0.path_string() + } } impl AsRef for $name { @@ -151,6 +160,12 @@ macro_rules! resource_name { ) } } + + impl Debug for $name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Display::fmt(&self, f) + } + } }; } @@ -296,6 +311,29 @@ impl ToString for ResourceHash { } } +pub trait HasEthApi { + /// URL of the HTTP endpoint, if it's enabled. + fn ethapi_http_endpoint(&self) -> Option; + + fn ethapi_http_provider(&self) -> anyhow::Result>> { + match self.ethapi_http_endpoint() { + Some(url) => Ok(Some(Provider::::try_from(url)?)), + None => Ok(None), + } + } +} + +pub trait HasCometBftApi { + /// URL of the HTTP endpoint. + fn cometbft_http_endpoint(&self) -> tendermint_rpc::Url; + + fn cometbft_http_provider(&self) -> anyhow::Result { + Ok(tendermint_rpc::HttpClient::new( + self.cometbft_http_endpoint(), + )?) + } +} + #[cfg(test)] mod tests { use std::path::PathBuf; @@ -352,4 +390,11 @@ mod tests { assert!(node.is_in_subnet(&sn)); assert_eq!(node.testnet(), tn, "testnet is the prefix"); } + + #[test] + fn test_resource_name_display() { + let tn = TestnetName::new("display-test"); + assert_eq!(format!("{tn}"), "Testnet('testnets/display-test')"); + assert_eq!(format!("{tn:?}"), "Testnet('testnets/display-test')"); + } } diff --git a/fendermint/testing/materializer/src/manifest.rs b/fendermint/testing/materializer/src/manifest.rs index b8a6e9005..e74863a31 100644 --- a/fendermint/testing/materializer/src/manifest.rs +++ b/fendermint/testing/materializer/src/manifest.rs @@ -39,6 +39,7 @@ pub struct Manifest { pub rootnet: Rootnet, /// Subnets created on the rootnet. + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] pub subnets: SubnetMap, } @@ -131,6 +132,7 @@ pub struct Subnet { /// Child subnets under this parent. /// /// The subnet ID exists so we can find the outcome of existing deployments in the log. + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] pub subnets: SubnetMap, } @@ -141,9 +143,11 @@ pub struct Node { /// Indicate whether to run the Ethereum API. pub ethapi: bool, /// The nodes from which CometBFT should bootstrap itself. + #[serde(default, skip_serializing_if = "Vec::is_empty")] pub seed_nodes: Vec, /// The parent node that the top-down syncer follows; /// or leave it empty if node is on the rootnet. + #[serde(default, skip_serializing_if = "Option::is_none")] pub parent_node: Option, } diff --git a/fendermint/testing/materializer/src/materials/defaults.rs b/fendermint/testing/materializer/src/materials/defaults.rs index 9b3142585..5f118b477 100644 --- a/fendermint/testing/materializer/src/materials/defaults.rs +++ b/fendermint/testing/materializer/src/materials/defaults.rs @@ -123,7 +123,7 @@ impl DefaultAccount { export(&acc.path, "secret", "hex", hex::encode(sk))?; export(&acc.path, "public", "b64", to_b64(pk.as_ref()))?; export(&acc.path, "public", "hex", hex::encode(pk))?; - export(&acc.path, "eth-addr", "", acc.eth_addr().to_string())?; + export(&acc.path, "eth-addr", "", format!("{:?}", acc.eth_addr()))?; export(&acc.path, "fvm-addr", "", acc.fvm_addr().to_string())?; } diff --git a/fendermint/testing/materializer/src/testnet.rs b/fendermint/testing/materializer/src/testnet.rs index da7e7ebab..30ae26c20 100644 --- a/fendermint/testing/materializer/src/testnet.rs +++ b/fendermint/testing/materializer/src/testnet.rs @@ -18,8 +18,7 @@ use crate::{ Materializer, NodeConfig, ParentConfig, SubmitConfig, SubnetConfig, TargetConfig, }, materials::Materials, - AccountId, NodeId, NodeName, RelayerName, ResourceHash, SubnetId, SubnetName, TestnetId, - TestnetName, + AccountId, NodeId, NodeName, RelayerName, ResourceHash, SubnetId, SubnetName, TestnetName, }; /// The `Testnet` parses a [Manifest] and is able to derive the steps @@ -48,20 +47,27 @@ pub struct Testnet { _phantom_materializer: PhantomData, } +impl Drop for Testnet { + fn drop(&mut self) { + // Make sure anything that can use a common network is dropped first. + drop(std::mem::take(&mut self.relayers)); + drop(std::mem::take(&mut self.nodes)); + } +} + impl Testnet where M: Materials, R: Materializer + Sync + Send, { - pub async fn new(m: &mut R, id: &TestnetId) -> anyhow::Result { - let name = TestnetName::new(id); + pub async fn new(m: &mut R, name: &TestnetName) -> anyhow::Result { let network = m - .create_network(&name) + .create_network(name) .await .context("failed to create the network")?; Ok(Self { - name, + name: name.clone(), network, externals: Default::default(), accounts: Default::default(), @@ -74,6 +80,10 @@ where }) } + pub fn name(&self) -> &TestnetName { + &self.name + } + pub fn root(&self) -> SubnetName { self.name.root() } @@ -82,8 +92,8 @@ where /// /// To validate a manifest, we can first create a testnet with a [Materializer] /// that only creates symbolic resources. - pub async fn setup(m: &mut R, id: &TestnetId, manifest: &Manifest) -> anyhow::Result { - let mut t = Self::new(m, id).await?; + pub async fn setup(m: &mut R, name: &TestnetName, manifest: &Manifest) -> anyhow::Result { + let mut t = Self::new(m, name).await?; let root_name = t.root(); // Create keys for accounts. @@ -163,6 +173,11 @@ where .collect() } + /// Iterate all the nodes in the testnet. + pub fn nodes(&self) -> impl Iterator { + self.nodes.iter() + } + /// Where can we send transactions and queries on a subnet. pub fn submit_config(&self, subnet_name: &SubnetName) -> anyhow::Result> { let deployment = self.deployment(subnet_name)?; @@ -232,13 +247,13 @@ where for (node_id, node) in node_ids.iter() { self.create_node(m, subnet_name, node_id, node) .await - .with_context(|| "failed to create node {node_id} in {subnet_name:?}")?; + .with_context(|| format!("failed to create node {node_id} in {subnet_name:?}"))?; } for (node_id, node) in node_ids.iter() { self.start_node(m, subnet_name, node_id, node) .await - .with_context(|| "failed to start node {node_id} in {subnet_name:?}")?; + .with_context(|| format!("failed to start node {node_id} in {subnet_name:?}"))?; } Ok(()) diff --git a/fendermint/testing/materializer/src/validation.rs b/fendermint/testing/materializer/src/validation.rs index 377a75e05..852a2ece7 100644 --- a/fendermint/testing/materializer/src/validation.rs +++ b/fendermint/testing/materializer/src/validation.rs @@ -18,8 +18,7 @@ use crate::{ materializer::{Materializer, NodeConfig, SubmitConfig, SubnetConfig}, materials::Materials, testnet::Testnet, - AccountName, NodeName, RelayerName, ResourceHash, ResourceName, SubnetName, TestnetId, - TestnetName, + AccountName, NodeName, RelayerName, ResourceHash, ResourceName, SubnetName, TestnetName, }; const DEFAULT_FAUCET_FIL: u64 = 100; @@ -28,11 +27,11 @@ const DEFAULT_FAUCET_FIL: u64 = 100; /// * we are not over allocating the balances /// * relayers have balances on the parent to submit transactions /// * subnet creators have balances on the parent to submit transactions -pub async fn validate_manifest(id: &TestnetId, manifest: &Manifest) -> anyhow::Result<()> { +pub async fn validate_manifest(name: &TestnetName, manifest: &Manifest) -> anyhow::Result<()> { let m = ValidatingMaterializer::default(); // Wrap with logging so that we can debug the tests easier. let mut m = LoggingMaterializer::new(m, "validation".to_string()); - let _ = Testnet::setup(&mut m, id, manifest).await?; + let _ = Testnet::setup(&mut m, name, manifest).await?; // We could check here that all subnets have enough validators for a quorum. Ok(()) } @@ -363,7 +362,7 @@ fn parent_name(subnet: &SubnetName) -> anyhow::Result { #[cfg(test)] mod tests { - use crate::{manifest::Manifest, validation::validate_manifest, TestnetId}; + use crate::{manifest::Manifest, validation::validate_manifest, TestnetId, TestnetName}; // Unfortunately doesn't seem to work with quickcheck_async // /// Run the tests with `RUST_LOG=info` to see the logs, for example: @@ -378,6 +377,7 @@ mod tests { /// Check that the random manifests we generate would pass validation. #[quickcheck_async::tokio] async fn prop_validation(id: TestnetId, manifest: Manifest) -> anyhow::Result<()> { - validate_manifest(&id, &manifest).await + let name = TestnetName::new(id); + validate_manifest(&name, &manifest).await } } diff --git a/fendermint/testing/materializer/tests/docker.rs b/fendermint/testing/materializer/tests/docker.rs new file mode 100644 index 000000000..0bbac4c9a --- /dev/null +++ b/fendermint/testing/materializer/tests/docker.rs @@ -0,0 +1,185 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Utility methods and entry point for tests using the docker materializer. +//! +//! # Example +//! +//! `cargo test -p fendermint_materializer --test docker -- --nocapture` + +use std::{ + collections::BTreeSet, + env::current_dir, + path::PathBuf, + pin::Pin, + time::{Duration, Instant}, +}; + +use anyhow::{anyhow, Context}; +use ethers::providers::Middleware; +use fendermint_materializer::{ + docker::{DockerMaterializer, DockerMaterials}, + manifest::Manifest, + testnet::Testnet, + validation::validate_manifest, + HasCometBftApi, HasEthApi, TestnetName, +}; +use futures::Future; +use lazy_static::lazy_static; +use tendermint_rpc::Client; + +pub type DockerTestnet = Testnet; + +lazy_static! { + static ref CI_PROFILE: bool = std::env::var("PROFILE").unwrap_or_default() == "ci"; + static ref STARTUP_TIMEOUT: Duration = Duration::from_secs(60); + static ref TEARDOWN_TIMEOUT: Duration = Duration::from_secs(5); + static ref PRINT_LOGS_ON_ERROR: bool = *CI_PROFILE; +} + +/// Want to keep the testnet artifacts in the `tests/testnets` directory. +fn tests_dir() -> PathBuf { + let dir = current_dir().unwrap(); + debug_assert!( + dir.ends_with("materializer"), + "expected the current directory to be the crate" + ); + dir.join("tests") +} + +/// Directory where we keep the docker-materializer related data files. +fn test_data_dir() -> PathBuf { + tests_dir().join("docker-materializer-data") +} + +/// Parse a manifest from the `tests/manifests` directory. +fn read_manifest(file_name: &str) -> anyhow::Result { + let manifest = tests_dir().join("manifests").join(file_name); + let manifest = std::fs::read_to_string(&manifest).with_context(|| { + format!( + "failed to read manifest from {}", + manifest.to_string_lossy() + ) + })?; + let manifest = serde_yaml::from_str(&manifest).context("failed to parse manifest")?; + Ok(manifest) +} + +/// Parse a manifest file in the `manifests` directory, clean up any corresponding +/// testnet resources, then materialize a testnet and run some tests. +pub async fn with_testnet(manifest_file_name: &str, f: F) -> anyhow::Result<()> +where + // https://users.rust-lang.org/t/function-that-takes-a-closure-with-mutable-reference-that-returns-a-future/54324 + F: for<'a> FnOnce( + &Manifest, + &mut DockerMaterializer, + &'a mut DockerTestnet, + ) -> Pin> + 'a>>, +{ + let testnet_name = TestnetName::new( + PathBuf::from(manifest_file_name) + .file_stem() + .expect("filename missing") + .to_string_lossy() + .to_string(), + ); + + let manifest = read_manifest(manifest_file_name)?; + + // First make sure it's a sound manifest. + validate_manifest(&testnet_name, &manifest) + .await + .context("failed to validate manifest")?; + + let mut materializer = DockerMaterializer::new(&test_data_dir(), 0)?; + + // make sure we start with clean slate by removing any previous files + materializer + .remove(&testnet_name) + .await + .context("failed to remove testnet")?; + + let mut testnet = Testnet::setup(&mut materializer, &testnet_name, &manifest) + .await + .context("failed to set up testnet")?; + + let started = wait_for_startup(&testnet).await?; + + let res = if started { + f(&manifest, &mut materializer, &mut testnet).await + } else { + Err(anyhow!("the startup sequence timed out")) + }; + + // Print all logs on failure. + // Some might be available in logs in the files which are left behind, + // e.g. for `fendermint` we have logs, but maybe not for `cometbft`. + if res.is_err() && *PRINT_LOGS_ON_ERROR { + for (name, node) in testnet.nodes() { + let name = name.path_string(); + for log in node.fendermint_logs().await { + eprintln!("{name}/fendermint: {log}"); + } + for log in node.cometbft_logs().await { + eprintln!("{name}/cometbft: {log}"); + } + for log in node.ethapi_logs().await { + eprintln!("{name}/ethapi: {log}"); + } + } + } + + // Tear down the testnet. + drop(testnet); + + // Allow some time for containers to be dropped. + // This only happens if the testnet setup succeeded, + // otherwise the system shuts down too quick, but + // at least we can inspect the containers. + // If they don't all get dropped, `docker system prune` helps. + tokio::time::sleep(*TEARDOWN_TIMEOUT).await; + + res +} + +/// Allow time for things to consolidate and APIs to start. +async fn wait_for_startup(testnet: &DockerTestnet) -> anyhow::Result { + let start = Instant::now(); + let mut started = BTreeSet::new(); + + 'startup: loop { + if start.elapsed() > *STARTUP_TIMEOUT { + return Ok(false); + } + tokio::time::sleep(Duration::from_secs(5)).await; + + for (name, dnode) in testnet.nodes() { + if started.contains(name) { + continue; + } + + let client = dnode.cometbft_http_provider()?; + + if let Err(e) = client.abci_info().await { + eprintln!("CometBFT on {name} still fails: {e}"); + continue 'startup; + } + + if let Some(client) = dnode.ethapi_http_provider()? { + if let Err(e) = client.get_chainid().await { + eprintln!("EthAPI on {name} still fails: {e}"); + continue 'startup; + } + } + + eprintln!("APIs on {name} started"); + started.insert(name.clone()); + } + + // All of them succeeded. + return Ok(true); + } +} + +// Run these tests serially because they share a common `materializer-state.json` file with the port mappings. +// Unfortunately the `#[serial]` macro can only be applied to module blocks, not this. +mod docker_tests; diff --git a/fendermint/testing/materializer/tests/docker_tests/mod.rs b/fendermint/testing/materializer/tests/docker_tests/mod.rs new file mode 100644 index 000000000..0c9b4a8b4 --- /dev/null +++ b/fendermint/testing/materializer/tests/docker_tests/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! These test modules are all imported by the top level `docker.rs` module, +//! so that they can be annotated with the `#[serial]` macro and run one by one, +//! sharing their materializer state. + +/// Tests using the `root_only.yaml` manifest. +pub mod root_only; diff --git a/fendermint/testing/materializer/tests/docker_tests/root_only.rs b/fendermint/testing/materializer/tests/docker_tests/root_only.rs new file mode 100644 index 000000000..d74892a75 --- /dev/null +++ b/fendermint/testing/materializer/tests/docker_tests/root_only.rs @@ -0,0 +1,38 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use anyhow::{anyhow, bail}; +use ethers::{providers::Middleware, types::U64}; +use fendermint_materializer::HasEthApi; +use futures::FutureExt; + +use crate::with_testnet; + +const MANIFEST: &str = "root-only.yaml"; + +#[serial_test::serial] +#[tokio::test] +async fn test_full_node_sync() { + with_testnet(MANIFEST, |_materializer, _manifest, testnet| { + let test = async { + // Check that node2 is following node1. + let node2 = testnet.root().node("node-2"); + let dnode2 = testnet.node(&node2)?; + + let provider = dnode2 + .ethapi_http_provider()? + .ok_or_else(|| anyhow!("node-2 has ethapi enabled"))?; + + let bn = provider.get_block_number().await?; + + if bn <= U64::one() { + bail!("expected positive block number"); + } + + Ok(()) + }; + + test.boxed_local() + }) + .await + .unwrap() +} diff --git a/fendermint/testing/materializer/tests/golden.rs b/fendermint/testing/materializer/tests/golden.rs index 908067ac0..28aee25cc 100644 --- a/fendermint/testing/materializer/tests/golden.rs +++ b/fendermint/testing/materializer/tests/golden.rs @@ -2,22 +2,22 @@ // SPDX-License-Identifier: Apache-2.0, MIT mod json { + use fendermint_materializer::manifest::Manifest; use fendermint_testing::golden_json; - use fendermint_testing_materializer::manifest::Manifest; use quickcheck::Arbitrary; golden_json! { "manifest/json", manifest, Manifest::arbitrary } } mod yaml { + use fendermint_materializer::manifest::Manifest; use fendermint_testing::golden_yaml; - use fendermint_testing_materializer::manifest::Manifest; use quickcheck::Arbitrary; golden_yaml! { "manifest/yaml", manifest, Manifest::arbitrary } } mod toml { + use fendermint_materializer::manifest::Manifest; use fendermint_testing::golden_toml; - use fendermint_testing_materializer::manifest::Manifest; use quickcheck::Arbitrary; golden_toml! { "manifest/toml", manifest, Manifest::arbitrary } } diff --git a/fendermint/testing/materializer/tests/manifests/root-only.yaml b/fendermint/testing/materializer/tests/manifests/root-only.yaml new file mode 100644 index 000000000..1828fbbe9 --- /dev/null +++ b/fendermint/testing/materializer/tests/manifests/root-only.yaml @@ -0,0 +1,25 @@ +accounts: + alice: {} + bob: {} + charlie: {} +rootnet: + type: New + validators: + alice: '100' + balances: + alice: '1000000000000000000' + bob: '2000000000000000000' + charlie: '3000000000000000000' + nodes: + node-1: + mode: + type: Validator + validator: alice + ethapi: false + + node-2: + mode: + type: Full + ethapi: true + seed_nodes: + - node-1 diff --git a/fendermint/testing/smoke-test/Makefile.toml b/fendermint/testing/smoke-test/Makefile.toml index b942a3bd0..5143e224c 100644 --- a/fendermint/testing/smoke-test/Makefile.toml +++ b/fendermint/testing/smoke-test/Makefile.toml @@ -35,10 +35,9 @@ dependencies = [ ] [tasks.simplecoin-example] -# Using --release in the hope that it can reuse artifacts compiled earlier for the docker build. script = """ cd ${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}/fendermint -cargo run -p fendermint_rpc --release --example simplecoin -- \ +cargo run -p fendermint_rpc --example simplecoin -- \ --secret-key testing/smoke-test/test-data/keys/alice.sk \ ${VERBOSITY} """ @@ -47,7 +46,7 @@ cargo run -p fendermint_rpc --release --example simplecoin -- \ [tasks.ethers-example] script = """ cd ${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}/fendermint -cargo run -p fendermint_eth_api --release --example ethers -- \ +cargo run -p fendermint_eth_api --example ethers -- \ --secret-key-from testing/smoke-test/test-data/keys/emily.sk \ --secret-key-to testing/smoke-test/test-data/keys/eric.sk """ @@ -55,7 +54,7 @@ cargo run -p fendermint_eth_api --release --example ethers -- \ [tasks.query-blockhash-example] script = """ cd ${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}/fendermint -cargo run -p fendermint_eth_api --release --example query_blockhash -- \ +cargo run -p fendermint_eth_api --example query_blockhash -- \ --secret-key testing/smoke-test/test-data/keys/emily.sk \ ${VERBOSITY} """