Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: graceful shutdown #204

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 15 additions & 6 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ simple_logger = "2.1"
thiserror = "1.0.31"
tokio = { version = "1", features = ["full"] }
tokio-retry = "0.3"
tokio-util = { version = "0.7", features = ["rt"] }
uuid = { version = "1.1.0", features = ["v4"] }
jsonrpsee-core = "0.14.0"
mime = "0.3"
Expand Down
53 changes: 38 additions & 15 deletions src/bins/rmb-relay.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,10 @@ use rmb::relay::{
limiter::{FixedWindowOptions, Limiters},
};
use rmb::twin::SubstrateTwinDB;
use tokio::sync::oneshot;
use std::process::ExitCode;
use tokio::signal;
use tokio_util::sync::CancellationToken;
use tokio_util::task::TaskTracker;

/// A peer requires only which rely to connect to, and
/// which identity (mnemonics)
Expand Down Expand Up @@ -93,7 +96,7 @@ fn set_limits() -> Result<()> {
Ok(())
}

async fn app(args: Args, tx: oneshot::Sender<()>) -> Result<()> {
async fn app(args: Args, cloned_token: CancellationToken, tracker: &TaskTracker) -> Result<()> {
if args.workers == 0 {
anyhow::bail!("number of workers cannot be zero");
}
Expand Down Expand Up @@ -171,16 +174,18 @@ async fn app(args: Args, tx: oneshot::Sender<()>) -> Result<()> {
.await
.unwrap();

let relay_cancellation_token = cloned_token.clone();
let mut l = events::Listener::new(args.substrate, redis_cache).await?;
tokio::spawn(async move {
tracker.spawn(async move {
let max_retries = 9; // max wait is 2^9 = 512 seconds ( 5 minutes )
let mut attempt = 0;
let mut backoff = Duration::from_secs(1);
let mut got_hit = false;

loop {
let listener_cancellation_token = cloned_token.clone();
match l
.listen(&mut got_hit)
.listen(&mut got_hit, listener_cancellation_token)
.await
.context("failed to listen to chain events")
{
Expand All @@ -195,7 +200,8 @@ async fn app(args: Args, tx: oneshot::Sender<()>) -> Result<()> {
attempt += 1;
if attempt > max_retries {
log::error!("Listener failed after {} attempts: {:?}", attempt - 1, e);
let _ = tx.send(());
let max_attempt_token = cloned_token.clone();
max_attempt_token.cancel();
break;
}
log::warn!(
Expand All @@ -211,28 +217,45 @@ async fn app(args: Args, tx: oneshot::Sender<()>) -> Result<()> {
}
});

r.start(&args.listen).await.unwrap();
tracker.close();
r.start(&args.listen, relay_cancellation_token)
.await
.unwrap();

Ok(())
}

#[tokio::main]
async fn main() {
async fn main() -> ExitCode {
let args = Args::parse();
let (tx, rx) = oneshot::channel();
let app_handle = tokio::spawn(async move {
if let Err(e) = app(args, tx).await {
let token = CancellationToken::new();
let cloned_token = token.clone();
let tracker = TaskTracker::new();
let cloned_tracker = tracker.clone();
let app_handle = tracker.spawn(async move {
if let Err(e) = app(args, cloned_token, &cloned_tracker).await {
eprintln!("{:#}", e);
std::process::exit(1);
return ExitCode::FAILURE;
}
ExitCode::SUCCESS
});
tracker.close();

tokio::select! {
_ = app_handle => {
status = app_handle => {
log::info!("Application is closing successfully.");
token.cancel();
tracker.wait().await;
match status {
Ok(code) => code,
Err(_) => ExitCode::FAILURE
}
}
_ = rx => {
log::error!("Listener shutdown signal received. Exiting application.");
std::process::exit(1);
_ = signal::ctrl_c() => {
log::info!("Ctrl-C received. Exiting application.");
token.cancel();
tracker.wait().await;
ExitCode::SUCCESS
}
}
}
34 changes: 27 additions & 7 deletions src/events/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ use anyhow::Result;
use futures::StreamExt;
use log;
use subxt::{OnlineClient, PolkadotConfig};
use tokio::select;
use tokio_util::sync::CancellationToken;

#[derive(Clone)]
pub struct Listener<C>
Expand Down Expand Up @@ -60,23 +62,41 @@ where
anyhow::bail!("failed to connect to substrate using the provided urls")
}

pub async fn listen(&mut self, got_hit: &mut bool) -> Result<()> {
pub async fn listen(
&mut self,
got_hit: &mut bool,
listener_cancellation_token: CancellationToken,
) -> Result<()> {
loop {
// always flush in case some blocks were finalized before reconnecting
if let Err(err) = self.cache.flush().await {
log::error!("failed to flush redis cache {}", err);
tokio::time::sleep(Duration::from_millis(500)).await;
continue;
}
if let Err(err) = self.handle_events().await {
log::error!("error listening to events {}", err);
if let Some(subxt::Error::Rpc(_)) = err.downcast_ref::<subxt::Error>() {
self.api = Self::connect(&mut self.substrate_urls).await?;
select! {
_ = listener_cancellation_token.cancelled() => {
log::info!("shutting down listener gracefully");
if let Err(err) = self.cache.flush().await {
log::info!("failed to flush redis cache {}", err);
}else {
log::info!("Succesful flush of redis cache");
}
break;
},
result = self.handle_events() => {
if let Err(err) = result {
log::error!("error listening to events {}", err);
if let Some(subxt::Error::Rpc(_)) = err.downcast_ref::<subxt::Error>() {
self.api = Self::connect(&mut self.substrate_urls).await?;
}
} else {
*got_hit = true
}
}
} else {
*got_hit = true
}
}
Ok(())
}

async fn handle_events(&self) -> Result<()> {
Expand Down
55 changes: 37 additions & 18 deletions src/relay/federation/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ use bb8_redis::{
RedisConnectionManager,
};
use prometheus::{IntCounterVec, Opts, Registry};
use tokio::select;
use tokio_util::{sync::CancellationToken, task::TaskTracker};
use workers::WorkerPool;

mod router;
Expand Down Expand Up @@ -114,16 +116,20 @@ where
}

/// start the federation router
pub fn start(self) -> Federator {
pub fn start(
self,
federator_cancellation_token: CancellationToken,
tracker: &TaskTracker,
) -> Federator {
let federator = Federator {
pool: self.pool.clone(),
};

tokio::spawn(self.run());
tracker.spawn(self.run(federator_cancellation_token));
federator
}

async fn run(self) {
async fn run(self, federator_cancellation_token: CancellationToken) {
let mut workers = self.workers;

loop {
Expand All @@ -135,20 +141,31 @@ where
}
};
let worker_handler = workers.get().await;
let (_, msg): (String, Vec<u8>) = match cmd("BRPOP")
.arg(FEDERATION_QUEUE)
.arg(0.0)
.query_async(&mut *con)
.await
{
Ok(msg) => msg,
Err(err) => {
log::error!("could not get message from redis {}", err);
continue;
}
};
if let Err(err) = worker_handler.send(msg) {
log::error!("failed to send job to worker: {}", err);
select! {
_ = federator_cancellation_token.cancelled() => {
log::info!("shutting down fedartor gracefully");
//workers.close().await;
log::info!("shutting down fedartor gracefully end");
break;
},
result = async {
cmd("BRPOP")
.arg(FEDERATION_QUEUE)
.arg(0.0)
.query_async(&mut *con).await

} => {
let (_, msg): (String, Vec<u8>) = match result {
Ok(msg) => msg,
Err(err) => {
log::error!("could not get message from redis {}", err);
continue;
}
};
if let Err(err) = worker_handler.send(msg) {
log::error!("failed to send job to worker: {}", err);
}
},
}
}
}
Expand Down Expand Up @@ -204,7 +221,9 @@ mod test {
.build(sink, db, ranker)
.unwrap();

let federator = federation.start();
let token = CancellationToken::new();
let tracker = TaskTracker::new();
let federator = federation.start(token, &tracker);

let mock = server.mock(|when, then| {
when.method(POST).path("/");
Expand Down
Loading
Loading