diff --git a/teos-common/src/test_utils.rs b/teos-common/src/test_utils.rs index 9eaed7a3..0b5a95b2 100644 --- a/teos-common/src/test_utils.rs +++ b/teos-common/src/test_utils.rs @@ -5,10 +5,9 @@ use rand::distributions::Standard; use rand::prelude::Distribution; use rand::Rng; -use bitcoin::consensus; use bitcoin::hashes::Hash; use bitcoin::secp256k1::SecretKey; -use bitcoin::Txid; +use bitcoin::{consensus, Script, Transaction, TxOut, Txid}; use crate::appointment::{Appointment, Locator}; use crate::cryptography; @@ -32,6 +31,12 @@ pub fn get_random_user_id() -> UserId { UserId(pk) } +pub fn get_random_locator() -> Locator { + let mut rng = rand::thread_rng(); + + Locator::from_slice(&rng.gen::<[u8; 16]>()).unwrap() +} + pub fn generate_random_appointment(dispute_txid: Option<&Txid>) -> Appointment { let dispute_txid = match dispute_txid { Some(l) => *l, @@ -42,7 +47,15 @@ pub fn generate_random_appointment(dispute_txid: Option<&Txid>) -> Appointment { }; let tx_bytes = Vec::from_hex(TX_HEX).unwrap(); - let penalty_tx = consensus::deserialize(&tx_bytes).unwrap(); + let mut penalty_tx: Transaction = consensus::deserialize(&tx_bytes).unwrap(); + + // Append a random-sized OP_RETURN to make each transcation random in size. + penalty_tx.output.push(TxOut { + value: 0, + script_pubkey: Script::new_op_return(&cryptography::get_random_bytes( + get_random_int::() % 81, + )), + }); let mut raw_locator: [u8; 16] = cryptography::get_random_bytes(16).try_into().unwrap(); raw_locator.copy_from_slice(&dispute_txid[..16]); diff --git a/teos/src/api/http.rs b/teos/src/api/http.rs index ca9c2c99..a041ee34 100644 --- a/teos/src/api/http.rs +++ b/teos/src/api/http.rs @@ -660,8 +660,11 @@ mod tests_methods { }; use super::*; - use crate::extended_appointment::UUID; - use crate::test_utils::{generate_dummy_appointment, ApiConfig, DURATION, SLOTS}; + use crate::responder::{ConfirmationStatus, TransactionTracker}; + use crate::test_utils::{ + generate_dummy_appointment, get_random_tx, ApiConfig, DURATION, SLOTS, + }; + use crate::watcher::Breach; use teos_common::test_utils::get_random_user_id; use teos_common::{cryptography, UserId}; @@ -830,14 +833,20 @@ mod tests_methods { .await .unwrap(); - // Add the appointment to the Responder so it counts as triggered - let appointment = generate_dummy_appointment(None).inner; - let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + // Add the appointment to the Responder as a tracker so it counts as triggered + let dispute_tx = get_random_tx(); + let tracker = TransactionTracker::new( + Breach::new(dispute_tx.clone(), get_random_tx()), + UserId(user_pk), + ConfirmationStatus::ConfirmedIn(100), + ); internal_api .get_watcher() - .add_random_tracker_to_responder(UUID::new(appointment.locator, UserId(user_pk))); + .add_dummy_tracker_to_responder(&tracker); // Try to add it via the http API + let appointment = generate_dummy_appointment(Some(&dispute_tx.txid())).inner; + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); assert_eq!( check_api_error( Endpoint::AddAppointment, diff --git a/teos/src/api/internal.rs b/teos/src/api/internal.rs index c6fa355d..fc2085d8 100644 --- a/teos/src/api/internal.rs +++ b/teos/src/api/internal.rs @@ -2,6 +2,7 @@ use std::sync::{Arc, Condvar, Mutex}; use tonic::{Code, Request, Response, Status}; use triggered::Trigger; +use crate::extended_appointment::UUID; use crate::protos as msgs; use crate::protos::private_tower_services_server::PrivateTowerServices; use crate::protos::public_tower_services_server::PublicTowerServices; @@ -386,10 +387,14 @@ impl PrivateTowerServices for Arc { })?; match self.watcher.get_user_info(user_id) { - Some(info) => Ok(Response::new(msgs::GetUserResponse { + Some((info, locators)) => Ok(Response::new(msgs::GetUserResponse { available_slots: info.available_slots, subscription_expiry: info.subscription_expiry, - appointments: info.appointments.keys().map(|uuid| uuid.to_vec()).collect(), + // TODO: Should make it return locators and make `get_appointments` queryable using the (user_id, locator) pair for consistency. + appointments: locators + .into_iter() + .map(|locator| UUID::new(locator, user_id).to_vec()) + .collect(), })), None => Err(Status::new(Code::NotFound, "User not found")), } @@ -429,11 +434,10 @@ mod tests_private_api { use bitcoin::hashes::Hash; use bitcoin::Txid; - use crate::extended_appointment::UUID; use crate::responder::{ConfirmationStatus, TransactionTracker}; use crate::test_utils::{ - create_api, generate_dummy_appointment, generate_uuid, get_random_tx, DURATION, SLOTS, - START_HEIGHT, + create_api, generate_dummy_appointment, generate_dummy_appointment_with_user, + get_random_tx, DURATION, SLOTS, START_HEIGHT, }; use crate::watcher::Breach; @@ -486,9 +490,7 @@ mod tests_private_api { let (internal_api, _s) = create_api().await; // Add data to the Responser so we can retrieve it later on - internal_api - .watcher - .add_random_tracker_to_responder(generate_uuid()); + internal_api.watcher.add_random_tracker_to_responder(); let response = internal_api .get_all_appointments(Request::new(())) @@ -588,7 +590,7 @@ mod tests_private_api { ); internal_api .watcher - .add_dummy_tracker_to_responder(generate_uuid(), &tracker); + .add_dummy_tracker_to_responder(&tracker); } let locator = Locator::new(dispute_tx.txid()); @@ -655,9 +657,7 @@ mod tests_private_api { // And the Responder for _ in 0..3 { - internal_api - .watcher - .add_random_tracker_to_responder(generate_uuid()); + internal_api.watcher.add_random_tracker_to_responder(); } let response = internal_api @@ -730,12 +730,11 @@ mod tests_private_api { assert!(response.appointments.is_empty()); // Add an appointment and check back - let appointment = generate_dummy_appointment(None).inner; - let uuid = UUID::new(appointment.locator, user_id); - let user_signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + let user_signature = cryptography::sign(&appointment.inner.to_vec(), &user_sk).unwrap(); internal_api .watcher - .add_appointment(appointment.clone(), user_signature) + .add_appointment(appointment.inner, user_signature) .unwrap(); let response = internal_api @@ -786,10 +785,12 @@ mod tests_private_api { mod tests_public_api { use super::*; - use crate::extended_appointment::UUID; + use crate::responder::{ConfirmationStatus, TransactionTracker}; use crate::test_utils::{ - create_api, create_api_with_config, generate_dummy_appointment, ApiConfig, DURATION, SLOTS, + create_api, create_api_with_config, generate_dummy_appointment, get_random_tx, ApiConfig, + DURATION, SLOTS, }; + use crate::watcher::Breach; use teos_common::cryptography::{self, get_random_keypair}; #[tokio::test] @@ -900,12 +901,12 @@ mod tests_public_api { internal_api.watcher.register(UserId(user_pk)).unwrap(); let appointment = generate_dummy_appointment(None).inner; - let user_signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); let response = internal_api .add_appointment(Request::new(common_msgs::AddAppointmentRequest { appointment: Some(appointment.clone().into()), - signature: user_signature.clone(), + signature, })) .await .unwrap() @@ -925,12 +926,12 @@ mod tests_public_api { let (user_sk, _) = get_random_keypair(); let appointment = generate_dummy_appointment(None).inner; - let user_signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); match internal_api .add_appointment(Request::new(common_msgs::AddAppointmentRequest { appointment: Some(appointment.clone().into()), - signature: user_signature.clone(), + signature, })) .await { @@ -954,12 +955,12 @@ mod tests_public_api { internal_api.watcher.register(UserId(user_pk)).unwrap(); let appointment = generate_dummy_appointment(None).inner; - let user_signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); match internal_api .add_appointment(Request::new(common_msgs::AddAppointmentRequest { appointment: Some(appointment.clone().into()), - signature: user_signature.clone(), + signature, })) .await { @@ -983,12 +984,12 @@ mod tests_public_api { internal_api.watcher.register(UserId(user_pk)).unwrap(); let appointment = generate_dummy_appointment(None).inner; - let user_signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); match internal_api .add_appointment(Request::new(common_msgs::AddAppointmentRequest { appointment: Some(appointment.clone().into()), - signature: user_signature.clone(), + signature, })) .await { @@ -1008,16 +1009,24 @@ mod tests_public_api { let user_id = UserId(user_pk); internal_api.watcher.register(user_id).unwrap(); - let appointment = generate_dummy_appointment(None).inner; - let user_signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + // Add a tracker to the responder to simulate it being triggered. + let dispute_tx = get_random_tx(); + let tracker = TransactionTracker::new( + Breach::new(dispute_tx.clone(), get_random_tx()), + user_id, + ConfirmationStatus::ConfirmedIn(100), + ); internal_api - .watcher - .add_random_tracker_to_responder(UUID::new(appointment.locator, user_id)); + .get_watcher() + .add_dummy_tracker_to_responder(&tracker); + // Try to add it again using the API. + let appointment = generate_dummy_appointment(Some(&dispute_tx.txid())).inner; + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); match internal_api .add_appointment(Request::new(common_msgs::AddAppointmentRequest { - appointment: Some(appointment.clone().into()), - signature: user_signature.clone(), + appointment: Some(appointment.into()), + signature, })) .await { @@ -1038,12 +1047,12 @@ mod tests_public_api { let (user_sk, _) = get_random_keypair(); let appointment = generate_dummy_appointment(None).inner; - let user_signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); match internal_api .add_appointment(Request::new(common_msgs::AddAppointmentRequest { appointment: Some(appointment.clone().into()), - signature: user_signature.clone(), + signature, })) .await { diff --git a/teos/src/dbm.rs b/teos/src/dbm.rs index 4a53abda..ca6dabff 100644 --- a/teos/src/dbm.rs +++ b/teos/src/dbm.rs @@ -1,7 +1,7 @@ //! Logic related to the tower database manager (DBM), component in charge of persisting data on disk. //! -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::iter::FromIterator; use std::path::PathBuf; use std::str::FromStr; @@ -14,16 +14,15 @@ use bitcoin::hashes::Hash; use bitcoin::secp256k1::SecretKey; use bitcoin::BlockHash; -use teos_common::appointment::{compute_appointment_slots, Appointment, Locator}; -use teos_common::constants::ENCRYPTED_BLOB_MAX_SIZE; +use teos_common::appointment::{Appointment, Locator}; use teos_common::dbm::{DatabaseConnection, DatabaseManager, Error}; use teos_common::UserId; use crate::extended_appointment::{ExtendedAppointment, UUID}; use crate::gatekeeper::UserInfo; -use crate::responder::{ConfirmationStatus, TransactionTracker}; +use crate::responder::{ConfirmationStatus, PenaltySummary, TransactionTracker}; -const TABLES: [&str; 5] = [ +const TABLES: [&str; 6] = [ "CREATE TABLE IF NOT EXISTS users ( user_id INT PRIMARY KEY, available_slots INT NOT NULL, @@ -59,6 +58,9 @@ const TABLES: [&str; 5] = [ "CREATE TABLE IF NOT EXISTS keys ( id INTEGER PRIMARY KEY AUTOINCREMENT, key INT NOT NULL +)", + "CREATE INDEX IF NOT EXISTS locators_index ON appointments ( + locator )", ]; @@ -139,27 +141,21 @@ impl DBM { } } - /// Loads the associated appointments ([Appointment]) of a given user ([UserInfo]). - pub(crate) fn load_user_appointments(&self, user_id: UserId) -> HashMap { + /// Loads the associated locators ([Locator]) of a given user ([UserId]). + pub(crate) fn load_user_locators(&self, user_id: UserId) -> Vec { let mut stmt = self .connection - .prepare("SELECT UUID, encrypted_blob FROM appointments WHERE user_id=(?)") + .prepare("SELECT locator FROM appointments WHERE user_id=(?)") .unwrap(); - let mut rows = stmt.query([user_id.to_vec()]).unwrap(); - - let mut appointments = HashMap::new(); - while let Ok(Some(inner_row)) = rows.next() { - let raw_uuid: Vec = inner_row.get(0).unwrap(); - let uuid = UUID::from_slice(&raw_uuid[0..20]).unwrap(); - let e_blob: Vec = inner_row.get(1).unwrap(); - appointments.insert( - uuid, - compute_appointment_slots(e_blob.len(), ENCRYPTED_BLOB_MAX_SIZE), - ); - } - - appointments + stmt.query_map([user_id.to_vec()], |row| { + let raw_locator: Vec = row.get(0).unwrap(); + let locator = Locator::from_slice(&raw_locator).unwrap(); + Ok(locator) + }) + .unwrap() + .map(|res| res.unwrap()) + .collect() } /// Loads all users from the database. @@ -178,22 +174,14 @@ impl DBM { let start = row.get(2).unwrap(); let expiry = row.get(3).unwrap(); - users.insert( - user_id, - UserInfo::with_appointments( - slots, - start, - expiry, - self.load_user_appointments(user_id), - ), - ); + users.insert(user_id, UserInfo::new(slots, start, expiry)); } users } /// Removes some users from the database in batch. - pub(crate) fn batch_remove_users(&mut self, users: &HashSet) -> usize { + pub(crate) fn batch_remove_users(&mut self, users: &Vec) -> usize { let limit = self.connection.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER) as usize; let tx = self.connection.transaction().unwrap(); let iter = users @@ -219,6 +207,24 @@ impl DBM { (users.len() as f64 / limit as f64).ceil() as usize } + /// Get the number of stored appointments. + pub(crate) fn get_appointments_count(&self) -> usize { + let mut stmt = self + .connection + .prepare("SELECT COUNT(*) FROM appointments as a LEFT JOIN trackers as t ON a.UUID=t.UUID WHERE t.UUID IS NULL") + .unwrap(); + stmt.query_row([], |row| row.get(0)).unwrap() + } + + /// Get the number of stored trackers. + pub(crate) fn get_trackers_count(&self) -> usize { + let mut stmt = self + .connection + .prepare("SELECT COUNT(*) FROM trackers") + .unwrap(); + stmt.query_row([], |row| row.get(0)).unwrap() + } + /// Stores an [Appointment] into the database. pub(crate) fn store_appointment( &self, @@ -250,7 +256,11 @@ impl DBM { } /// Updates an existing [Appointment] in the database. - pub(crate) fn update_appointment(&self, uuid: UUID, appointment: &ExtendedAppointment) { + pub(crate) fn update_appointment( + &self, + uuid: UUID, + appointment: &ExtendedAppointment, + ) -> Result<(), Error> { // DISCUSS: Check what fields we'd like to make updatable. e_blob and signature are the obvious, to_self_delay and start_block may not be necessary (or even risky) let query = "UPDATE appointments SET encrypted_blob=(?1), to_self_delay=(?2), user_signature=(?3), start_block=(?4) WHERE UUID=(?5)"; @@ -266,9 +276,11 @@ impl DBM { ) { Ok(_) => { log::debug!("Appointment successfully updated: {uuid}"); + Ok(()) } - Err(_) => { - log::error!("Appointment not found, data cannot be updated: {uuid}"); + Err(e) => { + log::error!("Appointment not found, data cannot be updated: {uuid}. Error: {e:?}"); + Err(e) } } } @@ -305,6 +317,15 @@ impl DBM { .ok() } + /// Check if an appointment with `uuid` exists. + pub(crate) fn appointment_exists(&self, uuid: UUID) -> bool { + self.connection + .prepare("SELECT UUID FROM appointments WHERE UUID=(?)") + .unwrap() + .exists([uuid.to_vec()]) + .unwrap() + } + /// Loads appointments from the database. If a locator is given, this method loads only the appointments /// matching this locator. If no locator is given, all the appointments in the database would be returned. pub(crate) fn load_appointments( @@ -352,6 +373,32 @@ impl DBM { appointments } + /// Gets the length of an appointment (the length of `appointment.encrypted_blob`). + pub(crate) fn get_appointment_length(&self, uuid: UUID) -> Option { + let mut stmt = self + .connection + .prepare("SELECT length(encrypted_blob) FROM appointments WHERE UUID=(?)") + .unwrap(); + + stmt.query_row([uuid.to_vec()], |row| row.get(0)).ok() + } + + /// Gets the [`UserId`] of the owner of the appointment along with the appointment + /// length (same as [DBM::get_appointment_length]) for `uuid`. + pub(crate) fn get_appointment_user_and_length(&self, uuid: UUID) -> Option<(UserId, usize)> { + let mut stmt = self + .connection + .prepare("SELECT user_id, length(encrypted_blob) FROM appointments WHERE UUID=(?)") + .unwrap(); + + stmt.query_row([uuid.to_vec()], |row| { + let raw_userid: Vec = row.get(0).unwrap(); + let length = row.get(1).unwrap(); + Ok((UserId::from_slice(&raw_userid).unwrap(), length)) + }) + .ok() + } + /// Removes an [Appointment] from the database. pub(crate) fn remove_appointment(&self, uuid: UUID) { let query = "DELETE FROM appointments WHERE UUID=(?)"; @@ -365,11 +412,12 @@ impl DBM { } } - /// Removes some appointments from the database in batch and updates the associated users giving back - /// the freed appointment slots + /// Removes some appointments from the database in batch and updates the associated users + /// (giving back freed appointment slots) in one transaction so that the deletion and the + /// update is atomic. pub(crate) fn batch_remove_appointments( &mut self, - appointments: &HashSet, + appointments: &Vec, updated_users: &HashMap, ) -> usize { let limit = self.connection.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER) as usize; @@ -405,18 +453,49 @@ impl DBM { (appointments.len() as f64 / limit as f64).ceil() as usize } - /// Loads the locator associated to a given UUID - pub(crate) fn load_locator(&self, uuid: UUID) -> Option { + /// Loads the [`UUID`]s of appointments triggered by `locator`. + pub(crate) fn load_uuids(&self, locator: Locator) -> Vec { let mut stmt = self .connection - .prepare("SELECT locator FROM appointments WHERE UUID=(?)") + .prepare("SELECT UUID from appointments WHERE locator=(?)") .unwrap(); - stmt.query_row([uuid.to_vec()], |row| { - let raw_locator: Vec = row.get(0).unwrap(); - Ok(Locator::from_slice(&raw_locator).unwrap()) + stmt.query_map([locator.to_vec()], |row| { + let raw_uuid: Vec = row.get(0).unwrap(); + let uuid = UUID::from_slice(&raw_uuid).unwrap(); + Ok(uuid) }) - .ok() + .unwrap() + .map(|uuid_res| uuid_res.unwrap()) + .collect() + } + + /// Filters the given set of [`Locator`]s by including only the ones which trigger any of our stored appointments. + pub(crate) fn batch_check_locators_exist(&self, locators: Vec<&Locator>) -> Vec { + let mut registered_locators = Vec::new(); + let locators: Vec> = locators.iter().map(|l| l.to_vec()).collect(); + let limit = self.connection.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER) as usize; + + for chunk in locators.chunks(limit) { + let query = "SELECT locator FROM appointments WHERE locator IN ".to_owned(); + let placeholders = format!("(?{})", (", ?").repeat(chunk.len() - 1)); + + let mut stmt = self + .connection + .prepare(&format!("{query}{placeholders}")) + .unwrap(); + let known_locators = stmt + .query_map(params_from_iter(chunk), |row| { + let raw_locator: Vec = row.get(0).unwrap(); + let locator = Locator::from_slice(&raw_locator).unwrap(); + Ok(locator) + }) + .unwrap() + .map(|locator_res| locator_res.unwrap()); + registered_locators.extend(known_locators); + } + + registered_locators } /// Stores a [TransactionTracker] into the database. @@ -450,6 +529,29 @@ impl DBM { } } + /// Updates the tracker status in the database. + /// + /// The only updatable fields are `height` and `confirmed`. + pub(crate) fn update_tracker_status( + &self, + uuid: UUID, + status: &ConfirmationStatus, + ) -> Result<(), Error> { + let (height, confirmed) = status.to_db_data().ok_or(Error::MissingField)?; + + let query = "UPDATE trackers SET height=(?1), confirmed=(?2) WHERE UUID=(?3)"; + match self.update_data(query, params![height, confirmed, uuid.to_vec(),]) { + Ok(x) => { + log::debug!("Tracker successfully updated: {uuid}"); + Ok(x) + } + Err(e) => { + log::error!("Couldn't update tracker: {uuid}. Error: {e:?}"); + Err(e) + } + } + } + /// Loads a [TransactionTracker] from the database. pub(crate) fn load_tracker(&self, uuid: UUID) -> Option { let key = uuid.to_vec(); @@ -481,6 +583,15 @@ impl DBM { .ok() } + /// Check if a tracker with `uuid` exists. + pub(crate) fn tracker_exists(&self, uuid: UUID) -> bool { + self.connection + .prepare("SELECT UUID FROM trackers WHERE UUID=(?)") + .unwrap() + .exists([uuid.to_vec()]) + .unwrap() + } + /// Loads trackers from the database. If a locator is given, this method loads only the trackers /// matching this locator. If no locator is given, all the trackers in the database would be returned. pub(crate) fn load_trackers( @@ -530,6 +641,66 @@ impl DBM { trackers } + /// Loads trackers with the given confirmation status. + /// + /// Note that for [`ConfirmationStatus::InMempoolSince(height)`] variant, this pulls trackers + /// with `h <= height` and not just `h = height`. + pub(crate) fn load_trackers_with_confirmation_status( + &self, + status: ConfirmationStatus, + ) -> Result, Error> { + let (height, confirmed) = status.to_db_data().ok_or(Error::MissingField)?; + let sql = format!( + "SELECT UUID FROM trackers WHERE confirmed=(?1) AND height{}(?2)", + if confirmed { "=" } else { "<=" } + ); + let mut stmt = self.connection.prepare(&sql).unwrap(); + + Ok(stmt + .query_map(params![confirmed, height], |row| { + let raw_uuid: Vec = row.get(0).unwrap(); + let uuid = UUID::from_slice(&raw_uuid).unwrap(); + Ok(uuid) + }) + .unwrap() + .map(|uuid_res| uuid_res.unwrap()) + .collect()) + } + + /// Loads the transaction IDs of all the penalties and their status from the database. + pub(crate) fn load_penalties_summaries(&self) -> HashMap { + let mut summaries = HashMap::new(); + + let mut stmt = self + .connection + .prepare( + "SELECT t.UUID, t.penalty_tx, t.height, t.confirmed + FROM trackers as t INNER JOIN appointments as a ON t.UUID=a.UUID", + ) + .unwrap(); + let mut rows = stmt.query([]).unwrap(); + + while let Ok(Some(row)) = rows.next() { + let raw_uuid: Vec = row.get(0).unwrap(); + let raw_penalty_tx: Vec = row.get(1).unwrap(); + let height: u32 = row.get(2).unwrap(); + let confirmed: bool = row.get(3).unwrap(); + + // DISCUSS: Should we store the txids to avoid pulling raw txs and deserializing then hashing them. + let penalty_txid = consensus::deserialize::(&raw_penalty_tx) + .unwrap() + .txid(); + summaries.insert( + UUID::from_slice(&raw_uuid).unwrap(), + PenaltySummary::new( + penalty_txid, + ConfirmationStatus::from_db_data(height, confirmed), + ), + ); + } + summaries + } + /// Stores the last known block into the database. pub(crate) fn store_last_known_block(&self, block_hash: &BlockHash) -> Result<(), Error> { let query = "INSERT OR REPLACE INTO last_known_block (id, block_hash) VALUES (0, ?)"; @@ -581,11 +752,13 @@ impl DBM { #[cfg(test)] mod tests { use super::*; + use std::collections::HashSet; use std::iter::FromIterator; use teos_common::cryptography::{get_random_bytes, get_random_keypair}; - use teos_common::test_utils::get_random_user_id; + use teos_common::test_utils::{get_random_locator, get_random_user_id}; + use crate::rpc_errors; use crate::test_utils::{ generate_dummy_appointment, generate_dummy_appointment_with_user, generate_uuid, get_random_tracker, get_random_tx, AVAILABLE_SLOTS, SUBSCRIPTION_EXPIRY, @@ -607,20 +780,15 @@ mod tests { let mut stmt = self .connection .prepare( - "SELECT user_id, available_slots, subscription_start, subscription_expiry + "SELECT available_slots, subscription_start, subscription_expiry FROM users WHERE user_id=(?)", ) .unwrap(); stmt.query_row([&key], |row| { - let slots = row.get(1).unwrap(); - let start = row.get(2).unwrap(); - let expiry = row.get(3).unwrap(); - Ok(UserInfo::with_appointments( - slots, - start, - expiry, - self.load_user_appointments(user_id), - )) + let slots = row.get(0).unwrap(); + let start = row.get(1).unwrap(); + let expiry = row.get(2).unwrap(); + Ok(UserInfo::new(slots, start, expiry)) }) .ok() } @@ -651,27 +819,6 @@ mod tests { )); } - #[test] - fn test_store_load_user_with_appointments() { - let dbm = DBM::in_memory().unwrap(); - - let user_id = get_random_user_id(); - let mut user = UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY); - - dbm.store_user(user_id, &user).unwrap(); - - // Add some appointments to the user - for _ in 0..10 { - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - dbm.store_appointment(uuid, &appointment).unwrap(); - user.appointments.insert(uuid, 1); - } - - // Check both loading the whole user info or only the associated appointments - assert_eq!(dbm.load_user(user_id).unwrap(), user); - assert_eq!(dbm.load_user_appointments(user_id), user.appointments); - } - #[test] fn test_load_nonexistent_user() { let dbm = DBM::in_memory().unwrap(); @@ -695,6 +842,30 @@ mod tests { assert_eq!(dbm.load_user(user_id).unwrap(), user); } + #[test] + fn test_load_user_locators() { + let dbm = DBM::in_memory().unwrap(); + + let user_id = get_random_user_id(); + let user = UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY); + dbm.store_user(user_id, &user).unwrap(); + + let mut locators = HashSet::new(); + + // Add some appointments to the user + for _ in 0..10 { + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + dbm.store_appointment(uuid, &appointment).unwrap(); + locators.insert(appointment.locator()); + } + + assert_eq!(dbm.load_user(user_id).unwrap(), user); + assert_eq!( + HashSet::from_iter(dbm.load_user_locators(user_id)), + locators + ); + } + #[test] fn test_load_all_users() { let dbm = DBM::in_memory().unwrap(); @@ -707,19 +878,8 @@ mod tests { SUBSCRIPTION_START + i, SUBSCRIPTION_EXPIRY + i, ); - users.insert(user_id, user.clone()); + users.insert(user_id, user); dbm.store_user(user_id, &user).unwrap(); - - // Add appointments to some of the users - if i % 2 == 0 { - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - dbm.store_appointment(uuid, &appointment).unwrap(); - users - .get_mut(&user_id) - .unwrap() - .appointments - .insert(uuid, 1); - } } assert_eq!(dbm.load_all_users(), users); @@ -735,7 +895,7 @@ mod tests { dbm.connection .set_limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER, limit); - let mut to_be_deleted = HashSet::new(); + let mut to_be_deleted = Vec::new(); let mut rest = HashSet::new(); for i in 1..100 { let user_id = get_random_user_id(); @@ -743,7 +903,7 @@ mod tests { dbm.store_user(user_id, &user).unwrap(); if i % 2 == 0 { - to_be_deleted.insert(user_id); + to_be_deleted.push(user_id); } else { rest.insert(user_id); } @@ -775,7 +935,7 @@ mod tests { Ok { .. } )); - dbm.batch_remove_users(&HashSet::from_iter(vec![appointment.user_id])); + dbm.batch_remove_users(&vec![appointment.user_id]); assert!(dbm.load_user(appointment.user_id).is_none()); assert!(dbm.load_appointment(uuid).is_none()); @@ -787,7 +947,7 @@ mod tests { )); assert!(matches!(dbm.store_tracker(uuid, &tracker), Ok { .. })); - dbm.batch_remove_users(&HashSet::from_iter(vec![appointment.user_id])); + dbm.batch_remove_users(&vec![appointment.user_id]); assert!(dbm.load_user(appointment.user_id).is_none()); assert!(dbm.load_appointment(uuid).is_none()); assert!(dbm.load_tracker(uuid).is_none()); @@ -802,6 +962,37 @@ mod tests { dbm.batch_remove_users(&users); } + #[test] + fn test_get_appointments_trackers_count() { + let dbm = DBM::in_memory().unwrap(); + let n_users = 100; + let n_app_per_user = 4; + let n_trk_per_user = 6; + + for _ in 0..n_users { + let user_id = get_random_user_id(); + let user = UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY); + dbm.store_user(user_id, &user).unwrap(); + + // These are un-triggered appointments. + for _ in 0..n_app_per_user { + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + dbm.store_appointment(uuid, &appointment).unwrap(); + } + + // And these are triggered ones (trackers). + for _ in 0..n_trk_per_user { + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + dbm.store_appointment(uuid, &appointment).unwrap(); + let tracker = get_random_tracker(user_id, ConfirmationStatus::ConfirmedIn(42)); + dbm.store_tracker(uuid, &tracker).unwrap(); + } + } + + assert_eq!(dbm.get_appointments_count(), n_users * n_app_per_user); + assert_eq!(dbm.get_trackers_count(), n_users * n_trk_per_user); + } + #[test] fn test_store_load_appointment() { let dbm = DBM::in_memory().unwrap(); @@ -848,6 +1039,22 @@ mod tests { assert!(dbm.load_appointment(uuid).is_none()); } + #[test] + fn test_appointment_exists() { + let dbm = DBM::in_memory().unwrap(); + + let user_id = get_random_user_id(); + let user = UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY); + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + + assert!(!dbm.appointment_exists(uuid)); + + dbm.store_user(user_id, &user).unwrap(); + dbm.store_appointment(uuid, &appointment).unwrap(); + + assert!(dbm.appointment_exists(uuid)); + } + #[test] fn test_update_appointment() { let dbm = DBM::in_memory().unwrap(); @@ -871,7 +1078,8 @@ mod tests { another_modified_appointment.user_id = get_random_user_id(); // Check how only the modifiable fields have been updated - dbm.update_appointment(uuid, &another_modified_appointment); + dbm.update_appointment(uuid, &another_modified_appointment) + .unwrap(); assert_eq!(dbm.load_appointment(uuid).unwrap(), modified_appointment); assert_ne!( dbm.load_appointment(uuid).unwrap(), @@ -970,6 +1178,44 @@ mod tests { assert_eq!(dbm.load_appointments(Some(locator)), appointments); } + #[test] + fn test_get_appointment_length() { + let dbm = DBM::in_memory().unwrap(); + + let user_id = get_random_user_id(); + let user = UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY); + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + + dbm.store_user(user_id, &user).unwrap(); + dbm.store_appointment(uuid, &appointment).unwrap(); + + assert_eq!( + dbm.get_appointment_length(uuid).unwrap(), + appointment.inner.encrypted_blob.len() + ); + assert!(dbm.get_appointment_length(generate_uuid()).is_none()); + } + + #[test] + fn test_get_appointment_user_and_length() { + let dbm = DBM::in_memory().unwrap(); + + let user_id = get_random_user_id(); + let user = UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY); + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + + dbm.store_user(user_id, &user).unwrap(); + dbm.store_appointment(uuid, &appointment).unwrap(); + + assert_eq!( + dbm.get_appointment_user_and_length(uuid).unwrap(), + (user_id, appointment.encrypted_blob().len()) + ); + assert!(dbm + .get_appointment_user_and_length(generate_uuid()) + .is_none()); + } + #[test] fn test_batch_remove_appointments() { let mut dbm = DBM::in_memory().unwrap(); @@ -990,13 +1236,13 @@ mod tests { let mut rest = HashSet::new(); for i in 1..6 { - let mut to_be_deleted = HashSet::new(); + let mut to_be_deleted = Vec::new(); for j in 0..limit * 2 * i { let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); dbm.store_appointment(uuid, &appointment).unwrap(); if j % 2 == 0 { - to_be_deleted.insert(uuid); + to_be_deleted.push(uuid); } else { rest.insert(uuid); } @@ -1005,7 +1251,7 @@ mod tests { // When the appointment are deleted, the user will get back slots based on the deleted data. // Here we can just make a number up to make sure it matches. user.available_slots = i as u32; - let updated_users = HashMap::from_iter([(user_id, user.clone())]); + let updated_users = HashMap::from_iter([(user_id, user)]); // Check that the db transaction had i queries on it assert_eq!( @@ -1041,8 +1287,8 @@ mod tests { )); dbm.batch_remove_appointments( - &HashSet::from_iter(vec![uuid]), - &HashMap::from_iter([(appointment.user_id, info.clone())]), + &vec![uuid], + &HashMap::from_iter([(appointment.user_id, info)]), ); assert!(dbm.load_appointment(uuid).is_none()); @@ -1054,7 +1300,7 @@ mod tests { assert!(matches!(dbm.store_tracker(uuid, &tracker), Ok { .. })); dbm.batch_remove_appointments( - &HashSet::from_iter(vec![uuid]), + &vec![uuid], &HashMap::from_iter([(appointment.user_id, info)]), ); assert!(dbm.load_appointment(uuid).is_none()); @@ -1069,32 +1315,83 @@ mod tests { // Test it does not fail even if the user does not exist (it will log though) dbm.batch_remove_appointments(&appointments, &HashMap::new()); } + #[test] - fn test_load_locator() { + fn test_load_uuids() { let dbm = DBM::in_memory().unwrap(); - // In order to add an appointment we need the associated user to be present - let user_id = get_random_user_id(); let user = UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY); - dbm.store_user(user_id, &user).unwrap(); + let dispute_tx = get_random_tx(); + let dispute_txid = dispute_tx.txid(); + let mut uuids = HashSet::new(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + // Add ten appointments triggered by the same locator. + for _ in 0..10 { + let user_id = get_random_user_id(); + dbm.store_user(user_id, &user).unwrap(); - assert!(matches!( - dbm.store_appointment(uuid, &appointment), - Ok { .. } - )); + let (uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&dispute_txid)); + dbm.store_appointment(uuid, &appointment).unwrap(); + + uuids.insert(uuid); + } - // We should be able to load the locator now the appointment exists - assert_eq!(dbm.load_locator(uuid).unwrap(), appointment.locator()); + // Add ten more appointments triggered by different locators. + for _ in 0..10 { + let user_id = get_random_user_id(); + dbm.store_user(user_id, &user).unwrap(); + + let dispute_txid = get_random_tx().txid(); + let (uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&dispute_txid)); + dbm.store_appointment(uuid, &appointment).unwrap(); + } + + assert_eq!( + HashSet::from_iter(dbm.load_uuids(Locator::new(dispute_txid))), + uuids + ); } #[test] - fn test_load_nonexistent_locator() { + fn test_batch_check_locators_exist() { let dbm = DBM::in_memory().unwrap(); + // Generate `n_app` appointments which we will store in the DB. + let n_app = 100; + let appointments: Vec<_> = (0..n_app) + .map(|_| generate_dummy_appointment(None)) + .collect(); + + // Register all the users beforehand. + for user_id in appointments.iter().map(|a| a.user_id) { + let user = UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY); + dbm.store_user(user_id, &user).unwrap(); + } - let (uuid, _) = generate_dummy_appointment_with_user(get_random_user_id(), None); - assert!(dbm.load_locator(uuid).is_none()); + // Store all the `n_app` appointments. + for appointment in appointments.iter() { + dbm.store_appointment(appointment.uuid(), appointment) + .unwrap(); + } + + // Select `n_app / 5` locators as if they appeared in a new block. + let known_locators: HashSet<_> = appointments + .iter() + .take(n_app / 5) + .map(|a| a.locator()) + .collect(); + // And extra `n_app / 5` unknown locators. + let unknown_locators: HashSet<_> = (0..n_app / 5).map(|_| get_random_locator()).collect(); + let all_locators = known_locators + .iter() + .chain(unknown_locators.iter()) + .collect(); + + assert_eq!( + HashSet::from_iter(dbm.batch_check_locators_exist(all_locators)), + known_locators + ); } #[test] @@ -1154,6 +1451,38 @@ mod tests { )); } + #[test] + fn test_update_tracker_status() { + let dbm = DBM::in_memory().unwrap(); + + let user_id = get_random_user_id(); + let user = UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY); + dbm.store_user(user_id, &user).unwrap(); + + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + dbm.store_appointment(uuid, &appointment).unwrap(); + + let tracker = get_random_tracker(user_id, ConfirmationStatus::InMempoolSince(42)); + dbm.store_tracker(uuid, &tracker).unwrap(); + + // Update the status and check if it's actually updated. + dbm.update_tracker_status(uuid, &ConfirmationStatus::ConfirmedIn(100)) + .unwrap(); + assert_eq!( + dbm.load_tracker(uuid).unwrap().status, + ConfirmationStatus::ConfirmedIn(100) + ); + + // Rejected status doesn't have a persistent DB representation. + assert!(matches!( + dbm.update_tracker_status( + uuid, + &ConfirmationStatus::Rejected(rpc_errors::RPC_VERIFY_REJECTED) + ), + Err(Error::MissingField) + )); + } + #[test] fn test_load_nonexistent_tracker() { let dbm = DBM::in_memory().unwrap(); @@ -1224,6 +1553,166 @@ mod tests { assert_eq!(dbm.load_trackers(Some(locator)), trackers); } + #[test] + fn test_load_trackers_with_confirmation_status_in_mempool() { + let dbm = DBM::in_memory().unwrap(); + let n_trackers = 100; + let mut tracker_statuses = HashMap::new(); + + // Store a bunch of trackers. + for i in 0..n_trackers { + let user_id = get_random_user_id(); + let user = UserInfo::new( + AVAILABLE_SLOTS + i, + SUBSCRIPTION_START + i, + SUBSCRIPTION_EXPIRY + i, + ); + dbm.store_user(user_id, &user).unwrap(); + + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + dbm.store_appointment(uuid, &appointment).unwrap(); + + // Some trackers confirmed and some aren't. + let status = if i % 2 == 0 { + ConfirmationStatus::InMempoolSince(i) + } else { + ConfirmationStatus::ConfirmedIn(i) + }; + + let tracker = get_random_tracker(user_id, status); + dbm.store_tracker(uuid, &tracker).unwrap(); + tracker_statuses.insert(uuid, status); + } + + for i in 0..n_trackers + 10 { + let in_mempool_since_i: HashSet = tracker_statuses + .iter() + .filter_map(|(&uuid, &status)| { + if let ConfirmationStatus::InMempoolSince(x) = status { + // If a tracker was in mempool since x, then it's also in mempool since x + 1, x + 2, etc... + return (x <= i).then_some(uuid); + } + None + }) + .collect(); + assert_eq!( + HashSet::from_iter( + dbm.load_trackers_with_confirmation_status(ConfirmationStatus::InMempoolSince( + i + )) + .unwrap() + ), + in_mempool_since_i, + ); + } + } + + #[test] + fn test_load_trackers_with_confirmation_status_confirmed() { + let dbm = DBM::in_memory().unwrap(); + let n_blocks = 100; + let n_trackers = 30; + let mut tracker_statuses = HashMap::new(); + + // Loop over a bunch of blocks. + for i in 0..n_blocks { + // Store a bunch of trackers in each block. + for j in 0..n_trackers { + let user_id = get_random_user_id(); + let user = UserInfo::new( + AVAILABLE_SLOTS + i, + SUBSCRIPTION_START + i, + SUBSCRIPTION_EXPIRY + i, + ); + dbm.store_user(user_id, &user).unwrap(); + + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + dbm.store_appointment(uuid, &appointment).unwrap(); + + // Some trackers confirmed and some aren't. + let status = if j % 2 == 0 { + ConfirmationStatus::InMempoolSince(i) + } else { + ConfirmationStatus::ConfirmedIn(i) + }; + + let tracker = get_random_tracker(user_id, status); + dbm.store_tracker(uuid, &tracker).unwrap(); + tracker_statuses.insert(uuid, status); + } + } + + for i in 0..n_blocks + 10 { + let confirmed_in_i: HashSet = tracker_statuses + .iter() + .filter_map(|(&uuid, &status)| { + if let ConfirmationStatus::ConfirmedIn(x) = status { + return (x == i).then_some(uuid); + } + None + }) + .collect(); + assert_eq!( + HashSet::from_iter( + dbm.load_trackers_with_confirmation_status(ConfirmationStatus::ConfirmedIn(i)) + .unwrap() + ), + confirmed_in_i, + ); + } + } + + #[test] + fn test_load_trackers_with_confirmation_status_bad_status() { + let dbm = DBM::in_memory().unwrap(); + + assert!(matches!( + dbm.load_trackers_with_confirmation_status(ConfirmationStatus::Rejected( + rpc_errors::RPC_VERIFY_REJECTED + )), + Err(Error::MissingField) + )); + + assert!(matches!( + dbm.load_trackers_with_confirmation_status(ConfirmationStatus::IrrevocablyResolved), + Err(Error::MissingField) + )); + } + + #[test] + fn test_load_penalties_summaries() { + let dbm = DBM::in_memory().unwrap(); + let n_trackers = 100; + let mut penalties_summaries = HashMap::new(); + + for i in 0..n_trackers { + let user_id = get_random_user_id(); + let user = UserInfo::new( + AVAILABLE_SLOTS + i, + SUBSCRIPTION_START + i, + SUBSCRIPTION_EXPIRY + i, + ); + dbm.store_user(user_id, &user).unwrap(); + + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + dbm.store_appointment(uuid, &appointment).unwrap(); + + let status = if i % 2 == 0 { + ConfirmationStatus::InMempoolSince(i) + } else { + ConfirmationStatus::ConfirmedIn(i) + }; + + let tracker = get_random_tracker(user_id, status); + dbm.store_tracker(uuid, &tracker).unwrap(); + + penalties_summaries + .insert(uuid, PenaltySummary::new(tracker.penalty_tx.txid(), status)); + } + + assert_eq!(dbm.load_penalties_summaries(), penalties_summaries); + } + #[test] fn test_store_load_last_known_block() { let dbm = DBM::in_memory().unwrap(); diff --git a/teos/src/extended_appointment.rs b/teos/src/extended_appointment.rs index 88920de0..e7ba8f45 100644 --- a/teos/src/extended_appointment.rs +++ b/teos/src/extended_appointment.rs @@ -46,8 +46,6 @@ impl std::fmt::Display for UUID { /// An extended version of the appointment hold by the tower. /// /// The [Appointment] is extended in terms of data, that is, it provides further information only relevant to the tower. -/// Notice [ExtendedAppointment]s are not kept in memory but persisted on disk. The [Watcher](crate::watcher::Watcher) -/// keeps [AppointmentSummary] instead. #[derive(Debug, Eq, PartialEq, Clone)] pub(crate) struct ExtendedAppointment { /// The underlying appointment extended by [ExtendedAppointment]. @@ -60,18 +58,6 @@ pub(crate) struct ExtendedAppointment { pub start_block: u32, } -/// A summary of an appointment. -/// -/// Contains the minimal amount of data the [Watcher](crate::watcher::Watcher) needs to keep in memory in order to -/// watch for breaches. -#[derive(Debug, Eq, PartialEq, Clone)] -pub(crate) struct AppointmentSummary { - /// The [Appointment] locator. - pub locator: Locator, - /// The user this [Appointment] belongs to. - pub user_id: UserId, -} - impl ExtendedAppointment { /// Create a new [ExtendedAppointment]. pub fn new( @@ -103,12 +89,8 @@ impl ExtendedAppointment { self.inner.to_self_delay } - /// Computes the summary of the [ExtendedAppointment]. - pub fn get_summary(&self) -> AppointmentSummary { - AppointmentSummary { - locator: self.locator(), - user_id: self.user_id, - } + pub fn uuid(&self) -> UUID { + UUID::new(self.inner.locator, self.user_id) } } @@ -116,22 +98,14 @@ impl ExtendedAppointment { mod tests { use super::*; - use teos_common::appointment::Appointment; - use teos_common::cryptography::get_random_bytes; - use teos_common::test_utils::get_random_user_id; + use crate::test_utils::generate_uuid; #[test] - fn test_get_summary() { - let locator = Locator::from_slice(&get_random_bytes(16)).unwrap(); - let user_id = get_random_user_id(); - let signature = String::new(); - - let a = Appointment::new(locator, get_random_bytes(32), 42); - let e = ExtendedAppointment::new(a, user_id, signature, 21); - - let s = e.get_summary(); - - assert_eq!(e.locator(), s.locator); - assert_eq!(e.user_id, s.user_id); + fn test_uuid_ser_deser() { + let original_uuid = generate_uuid(); + assert_eq!( + UUID::from_slice(&original_uuid.to_vec()).unwrap(), + original_uuid + ); } } diff --git a/teos/src/gatekeeper.rs b/teos/src/gatekeeper.rs index 31069916..e5d490e3 100644 --- a/teos/src/gatekeeper.rs +++ b/teos/src/gatekeeper.rs @@ -1,13 +1,11 @@ //! Logic related to the Gatekeeper, the component in charge of managing access to the tower resources. -use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; +use lightning::chain; +use std::collections::HashMap; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::{Arc, Mutex}; -use lightning::chain; - -use teos_common::appointment::compute_appointment_slots; +use teos_common::appointment::{compute_appointment_slots, Locator}; use teos_common::constants::ENCRYPTED_BLOB_MAX_SIZE; use teos_common::cryptography; use teos_common::receipts::RegistrationReceipt; @@ -17,7 +15,7 @@ use crate::dbm::DBM; use crate::extended_appointment::{ExtendedAppointment, UUID}; /// Data regarding a user subscription with the tower. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) struct UserInfo { /// Number of appointment slots available for a given user. pub(crate) available_slots: u32, @@ -25,8 +23,6 @@ pub(crate) struct UserInfo { pub(crate) subscription_start: u32, /// Block height where the user subscription expires. pub(crate) subscription_expiry: u32, - /// Map of appointment ids and the how many slots they take from the subscription. - pub(crate) appointments: HashMap, } impl UserInfo { @@ -36,22 +32,6 @@ impl UserInfo { available_slots, subscription_start, subscription_expiry, - appointments: HashMap::new(), - } - } - - /// Creates a new [UserInfo] instance with some associated appointments. - pub fn with_appointments( - available_slots: u32, - subscription_start: u32, - subscription_expiry: u32, - appointments: HashMap, - ) -> Self { - UserInfo { - available_slots, - subscription_start, - subscription_expiry, - appointments, } } } @@ -134,8 +114,9 @@ impl Gatekeeper { } /// Gets the data held by the tower about a given user. - pub(crate) fn get_user_info(&self, user_id: UserId) -> Option { - self.registered_users.lock().unwrap().get(&user_id).cloned() + pub(crate) fn get_user_info(&self, user_id: UserId) -> Option<(UserInfo, Vec)> { + let info = self.registered_users.lock().unwrap().get(&user_id).cloned(); + info.map(|info| (info, self.dbm.lock().unwrap().load_user_locators(user_id))) } /// Authenticates a user. @@ -219,7 +200,13 @@ impl Gatekeeper { // For updates, the difference between the existing appointment size and the update is computed. let mut registered_users = self.registered_users.lock().unwrap(); let user_info = registered_users.get_mut(&user_id).unwrap(); - let used_slots = user_info.appointments.get(&uuid).map_or(0, |x| *x); + let used_blob_size = self + .dbm + .lock() + .unwrap() + .get_appointment_length(uuid) + .unwrap_or(0); + let used_slots = compute_appointment_slots(used_blob_size, ENCRYPTED_BLOB_MAX_SIZE); let required_slots = compute_appointment_slots(appointment.encrypted_blob().len(), ENCRYPTED_BLOB_MAX_SIZE); @@ -228,7 +215,6 @@ impl Gatekeeper { if diff <= user_info.available_slots as i64 { // Filling / freeing slots depending on whether this is an update or not, and if it is bigger or smaller // than the old appointment - user_info.appointments.insert(uuid, required_slots); user_info.available_slots = (user_info.available_slots as i64 - diff) as u32; self.dbm.lock().unwrap().update_user(user_id, user_info); @@ -258,56 +244,50 @@ impl Gatekeeper { /// Gets a map of outdated users. Outdated users are those whose subscription has expired and the renewal grace period /// has already passed ([expiry_delta](Self::expiry_delta)). - pub(crate) fn get_outdated_users(&self, block_height: u32) -> HashMap> { - let registered_users = self.registered_users.lock().unwrap().clone(); - registered_users - .into_iter() - .filter(|(_, info)| block_height == info.subscription_expiry + self.expiry_delta) - .map(|(id, info)| (id, info.appointments.keys().cloned().collect())) - .collect() - } - - /// Gets a set of outdated user ids. - pub(crate) fn get_outdated_user_ids(&self, block_height: u32) -> HashSet { - self.get_outdated_users(block_height) - .keys() - .cloned() + pub(crate) fn get_outdated_users(&self, block_height: u32) -> Vec { + self.registered_users + .lock() + .unwrap() + .iter() + // NOTE: Ideally there won't be a user with `block_height > subscription_expiry + expiry_delta`, but + // this might happen if we skip a couple of block connections due to a force update. + .filter(|(_, info)| block_height >= info.subscription_expiry + self.expiry_delta) + .map(|(user_id, _)| *user_id) .collect() } - /// Get a map of outdated appointments (from any user). - pub(crate) fn get_outdated_appointments(&self, block_height: u32) -> HashSet { - HashSet::from_iter( - self.get_outdated_users(block_height) - .into_values() - .flatten(), - ) - } - - /// Deletes a collection of appointments from the users' subscriptions (from memory only) - /// and updates the available_slots count for the given user. + /// Deletes these appointments from the database and updates the user's information. /// - /// Notice appointments are only de-linked from users, but not actually removed. This is because the [Gatekeeper] - /// does not actually hold any [ExtendedAppointment](crate::extended_appointment::ExtendedAppointment) data, - /// just references to them. - pub(crate) fn delete_appointments_from_memory( - &self, - appointments: &HashMap, - ) -> HashMap { - let mut updated_users = HashMap::new(); - let mut registered_users = self.registered_users.lock().unwrap(); + /// If `refund` is set, the appointments owners will get their slots refunded back. + /// + /// DISCUSS: When `refund` is `false` we don't give back the slots to the user for the deleted appointments. + /// This is to discourage misbehavior (sending bad appointments, either non-decryptable or rejected by the network). + pub(crate) fn delete_appointments(&self, appointments: Vec, refund: bool) { + let mut dbm = self.dbm.lock().unwrap(); - for (uuid, user_id) in appointments { - // Remove the appointment from the appointment list and update the available slots - if let Some(user_info) = registered_users.get_mut(user_id) { - if let Some(x) = user_info.appointments.remove(uuid) { - user_info.available_slots += x; - } - updated_users.insert(*user_id, user_info.clone()); - }; - } + let updated_users = if refund { + let mut updated_users = HashMap::new(); + let mut registered_users = self.registered_users.lock().unwrap(); + // Give back the consumed slots to each user. + for uuid in appointments.iter() { + let (user_id, blob_size) = dbm.get_appointment_user_and_length(*uuid).unwrap(); + registered_users.get_mut(&user_id).unwrap().available_slots += + compute_appointment_slots(blob_size, ENCRYPTED_BLOB_MAX_SIZE); + updated_users.insert(user_id, registered_users[&user_id]); + } + updated_users + } else { + // No updated users. + HashMap::new() + }; - updated_users + // An optimization for the case when only one appointment is being deleted without refunding. + // This avoids creating a DB transaction for a single query. + if appointments.len() == 1 && updated_users.is_empty() { + dbm.remove_appointment(appointments[0]) + } else { + dbm.batch_remove_appointments(&appointments, &updated_users); + } } } @@ -324,12 +304,17 @@ impl chain::Listen for Gatekeeper { log::info!("New block received: {}", header.block_hash()); // Expired user deletion is delayed. Users are deleted when their subscription is outdated, not expired. - let outdated_users = self.get_outdated_user_ids(height); + let outdated_users = self.get_outdated_users(height); if !outdated_users.is_empty() { - self.registered_users - .lock() - .unwrap() - .retain(|id, _| !outdated_users.contains(id)); + // Remove the outdated users from memory first. + { + let mut registered_users = self.registered_users.lock().unwrap(); + // Removing each outdated user in a loop is more efficient than retaining non-outdated users + // because retaining would loop over all the available users which is always more than the outdated ones. + for outdated_user in outdated_users.iter() { + registered_users.remove(outdated_user); + } + } self.dbm.lock().unwrap().batch_remove_users(&outdated_users); } @@ -351,13 +336,13 @@ impl chain::Listen for Gatekeeper { mod tests { use super::*; - use crate::test_utils::{ - generate_dummy_appointment, generate_dummy_appointment_with_user, generate_uuid, Blockchain, - }; + use crate::test_utils::{generate_dummy_appointment_with_user, get_random_tracker, Blockchain}; use lightning::chain::Listen; use teos_common::cryptography::{get_random_bytes, get_random_keypair}; use teos_common::test_utils::get_random_user_id; + use crate::responder::ConfirmationStatus; + const SLOTS: u32 = 21; const DURATION: u32 = 500; const EXPIRY_DELTA: u32 = 42; @@ -380,21 +365,11 @@ mod tests { &self.registered_users } - pub(crate) fn add_outdated_user( - &self, - user_id: UserId, - outdates_at: u32, - appointments: Option>, - ) { + pub(crate) fn add_outdated_user(&self, user_id: UserId, outdates_at: u32) { self.add_update_user(user_id).unwrap(); let mut registered_users = self.registered_users.lock().unwrap(); let user = registered_users.get_mut(&user_id).unwrap(); user.subscription_expiry = outdates_at - self.expiry_delta; - if let Some(uuids) = appointments { - for uuid in uuids.iter() { - user.appointments.insert(*uuid, 1); - } - } } } @@ -567,25 +542,32 @@ mod tests { let available_slots = gatekeeper .add_update_appointment(user_id, uuid, &appointment) .unwrap(); + // Simulate the watcher adding the appointment in the database. + gatekeeper + .dbm + .lock() + .unwrap() + .store_appointment(uuid, &appointment) + .unwrap(); - assert!(gatekeeper.registered_users.lock().unwrap()[&user_id] - .appointments - .contains_key(&uuid)); + let (_, user_locators) = gatekeeper.get_user_info(user_id).unwrap(); + assert!(user_locators.contains(&appointment.locator())); assert_eq!(slots_before, available_slots + 1); - // Slots should have been updated in the database too. Notice the appointment won't be there yet - // given the Watcher is responsible for adding it, and it will do so after calling this method + // Slots should have been updated in the database too. let mut loaded_user = gatekeeper.dbm.lock().unwrap().load_user(user_id).unwrap(); assert_eq!(loaded_user.available_slots, available_slots); - // Adding the exact same appointment should leave the slots count unchanged + // Adding the exact same appointment should leave the slots count unchanged. + // We don't really need to update the appointment in the DB since it's the very same appointment. let mut updated_slot_count = gatekeeper .add_update_appointment(user_id, uuid, &appointment) .unwrap(); - assert!(gatekeeper.registered_users.lock().unwrap()[&user_id] - .appointments - .contains_key(&uuid)); + + let (_, user_locators) = gatekeeper.get_user_info(user_id).unwrap(); + assert!(user_locators.contains(&appointment.locator())); assert_eq!(updated_slot_count, available_slots); + loaded_user = gatekeeper.dbm.lock().unwrap().load_user(user_id).unwrap(); assert_eq!(loaded_user.available_slots, updated_slot_count); @@ -595,10 +577,18 @@ mod tests { updated_slot_count = gatekeeper .add_update_appointment(user_id, uuid, &bigger_appointment) .unwrap(); - assert!(gatekeeper.registered_users.lock().unwrap()[&user_id] - .appointments - .contains_key(&uuid)); + // Simulate the watcher updating the appointment in the database. + gatekeeper + .dbm + .lock() + .unwrap() + .update_appointment(uuid, &bigger_appointment) + .unwrap(); + + let (_, user_locators) = gatekeeper.get_user_info(user_id).unwrap(); + assert!(user_locators.contains(&appointment.locator())); assert_eq!(updated_slot_count, available_slots - 1); + loaded_user = gatekeeper.dbm.lock().unwrap().load_user(user_id).unwrap(); assert_eq!(loaded_user.available_slots, updated_slot_count); @@ -606,26 +596,43 @@ mod tests { updated_slot_count = gatekeeper .add_update_appointment(user_id, uuid, &appointment) .unwrap(); - assert!(gatekeeper.registered_users.lock().unwrap()[&user_id] - .appointments - .contains_key(&uuid)); + // Simulate the watcher updating the appointment in the database. + gatekeeper + .dbm + .lock() + .unwrap() + .update_appointment(uuid, &appointment) + .unwrap(); + + let (_, user_locators) = gatekeeper.get_user_info(user_id).unwrap(); + assert!(user_locators.contains(&appointment.locator())); assert_eq!(updated_slot_count, available_slots); + loaded_user = gatekeeper.dbm.lock().unwrap().load_user(user_id).unwrap(); assert_eq!(loaded_user.available_slots, updated_slot_count); // Adding an appointment with a different uuid should not count as an update - let new_uuid = generate_uuid(); + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); updated_slot_count = gatekeeper - .add_update_appointment(user_id, new_uuid, &appointment) + .add_update_appointment(user_id, uuid, &appointment) + .unwrap(); + // Simulate the watcher adding the appointment in the database. + gatekeeper + .dbm + .lock() + .unwrap() + .store_appointment(uuid, &appointment) .unwrap(); - assert!(gatekeeper.registered_users.lock().unwrap()[&user_id] - .appointments - .contains_key(&new_uuid)); + + let (_, user_locators) = gatekeeper.get_user_info(user_id).unwrap(); + assert!(user_locators.contains(&appointment.locator())); assert_eq!(updated_slot_count, available_slots - 1); + loaded_user = gatekeeper.dbm.lock().unwrap().load_user(user_id).unwrap(); assert_eq!(loaded_user.available_slots, updated_slot_count); // Finally, trying to add an appointment when the user has no enough slots should fail + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); gatekeeper .registered_users .lock() @@ -634,9 +641,10 @@ mod tests { .unwrap() .available_slots = 0; assert!(matches!( - gatekeeper.add_update_appointment(user_id, generate_uuid(), &appointment), + gatekeeper.add_update_appointment(user_id, uuid, &appointment), Err(NotEnoughSlots) )); + // The entry in the database should remain unchanged in this case loaded_user = gatekeeper.dbm.lock().unwrap().load_user(user_id).unwrap(); assert_eq!(loaded_user.available_slots, updated_slot_count); @@ -682,144 +690,175 @@ mod tests { // Initially, there are not outdated users, so querying any block height should return an empty map for i in 0..start_height { - assert_eq!(gatekeeper.get_outdated_users(i).len(), 0); + assert_eq!(gatekeeper.get_outdated_users(i), vec![]); } // Adding a user whose subscription is outdated should return an entry let user_id = get_random_user_id(); gatekeeper.add_update_user(user_id).unwrap(); - // Add also an appointment so we can check the returned data - let appointment = generate_dummy_appointment(None); - let uuid = generate_uuid(); - gatekeeper - .add_update_appointment(user_id, uuid, &appointment) - .unwrap(); - // Check that data is not yet outdated - assert_eq!(gatekeeper.get_outdated_users(start_height).len(), 0); + assert_eq!(gatekeeper.get_outdated_users(start_height), vec![]); // Add an outdated user and check again - gatekeeper.add_outdated_user(user_id, start_height, None); - let outdated_users = gatekeeper.get_outdated_users(start_height); - assert_eq!(outdated_users.len(), 1); - assert_eq!(outdated_users[&user_id], HashSet::from_iter([uuid])); + gatekeeper.add_outdated_user(user_id, start_height); + assert_eq!(gatekeeper.get_outdated_users(start_height), vec![user_id]); } #[test] - fn test_get_outdated_appointments() { - let start_height = START_HEIGHT as u32 + EXPIRY_DELTA; - let gatekeeper = init_gatekeeper(&Blockchain::default().with_height(start_height as usize)); - - // get_outdated_appointments returns a list of appointments that were outdated at a given block height, indistinguishably of their user. - - // If there are no outdated users, there cannot be outdated appointments - for i in 0..start_height { - assert_eq!(gatekeeper.get_outdated_appointments(i).len(), 0); + fn test_delete_appointments_without_refund() { + let gatekeeper = init_gatekeeper(&Blockchain::default().with_height(START_HEIGHT)); + let n_users = 100; + let n_apps = 10; + let mut uuids_to_delete = Vec::new(); + let mut rest = Vec::new(); + let mut trackers = Vec::new(); + let mut users_info = HashMap::new(); + + for _ in 0..n_users { + let user_id = get_random_user_id(); + gatekeeper.add_update_user(user_id).unwrap(); + for i in 0..n_apps { + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + gatekeeper + .add_update_appointment(user_id, uuid, &appointment) + .unwrap(); + // Add the appointment to the database. This is normally done by the Watcher. + gatekeeper + .dbm + .lock() + .unwrap() + .store_appointment(uuid, &appointment) + .unwrap(); + if i % 2 == 0 { + uuids_to_delete.push(uuid); + } else { + rest.push(uuid); + } + // Also trigger some of these appointments as trackers. + if i % 5 == 0 { + gatekeeper + .dbm + .lock() + .unwrap() + .store_tracker( + uuid, + &get_random_tracker(user_id, ConfirmationStatus::ConfirmedIn(42)), + ) + .unwrap(); + trackers.push(uuid); + } + } + users_info.insert(user_id, gatekeeper.get_user_info(user_id).unwrap().0); } - // Adding data about different users and appointments should return a flattened list of appointments - let user1_id = get_random_user_id(); - let user2_id = get_random_user_id(); - let uuid1 = generate_uuid(); - let uuid2 = generate_uuid(); + // Delete these appointments without refunding their owners. + gatekeeper.delete_appointments(uuids_to_delete.clone(), false); - // Manually set the user expiry for the test - for (user_id, uuid) in [(user1_id, uuid1), (user2_id, uuid2)] { - gatekeeper.add_outdated_user(user_id, start_height, Some(Vec::from_iter([uuid]))); + for uuid in uuids_to_delete.clone() { + assert!(!gatekeeper.dbm.lock().unwrap().appointment_exists(uuid)); + } + for uuid in rest { + assert!(gatekeeper.dbm.lock().unwrap().appointment_exists(uuid)); + } + for uuid in trackers { + if uuids_to_delete.contains(&uuid) { + // The tracker should be deleted as well. + assert!(!gatekeeper.dbm.lock().unwrap().tracker_exists(uuid)); + } else { + assert!(gatekeeper.dbm.lock().unwrap().tracker_exists(uuid)); + } } - let outdated_appointments = gatekeeper.get_outdated_appointments(start_height); - assert_eq!(outdated_appointments.len(), 2); - assert!(outdated_appointments.contains(&uuid1)); - assert!(outdated_appointments.contains(&uuid2)); + for (user_id, user_info_before_deletion) in users_info { + // Since `refund` was false, the users' slots should not have changed after deleting appointments. + let (user_info_after_deletion, _) = gatekeeper.get_user_info(user_id).unwrap(); + assert_eq!(user_info_after_deletion, user_info_before_deletion); + } } #[test] - fn test_delete_appointments_from_memory() { + fn test_delete_appointments_with_refund() { let gatekeeper = init_gatekeeper(&Blockchain::default().with_height(START_HEIGHT)); - - // delete_appointments will remove a list of appointments from the Gatekeeper (as long as they exist) - let mut all_appointments = HashMap::new(); - let mut to_be_deleted = HashMap::new(); - let mut rest = HashMap::new(); - for i in 1..11 { + let n_users = 100; + let n_apps = 10; + let mut uuids_to_delete = Vec::new(); + let mut rest = Vec::new(); + let mut trackers = Vec::new(); + let mut users_remaining_slots = HashMap::new(); + + for _ in 0..n_users { let user_id = get_random_user_id(); - let uuid = generate_uuid(); - all_appointments.insert(uuid, user_id); - - if i % 2 == 0 { - to_be_deleted.insert(uuid, user_id); - } else { - rest.insert(uuid, user_id); + gatekeeper.add_update_user(user_id).unwrap(); + let mut user_remaining_slots = + gatekeeper.get_user_info(user_id).unwrap().0.available_slots; + for i in 0..n_apps { + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + gatekeeper + .add_update_appointment(user_id, uuid, &appointment) + .unwrap(); + // Add the appointment to the database. This is normally done by the Watcher. + gatekeeper + .dbm + .lock() + .unwrap() + .store_appointment(uuid, &appointment) + .unwrap(); + if i % 2 == 0 { + // We don't reduce the remaining slots for the appointments which are + // going to delete since we will refund their owners. + uuids_to_delete.push(uuid); + } else { + rest.push(uuid); + user_remaining_slots -= compute_appointment_slots( + appointment.encrypted_blob().len(), + ENCRYPTED_BLOB_MAX_SIZE, + ); + } + // Also trigger some of these appointments as trackers. + if i % 5 == 0 { + gatekeeper + .dbm + .lock() + .unwrap() + .store_tracker( + uuid, + &get_random_tracker(user_id, ConfirmationStatus::ConfirmedIn(42)), + ) + .unwrap(); + trackers.push(uuid); + } } + users_remaining_slots.insert(user_id, user_remaining_slots); } - // Calling the method with unknown data should work but do nothing - assert!(gatekeeper.registered_users.lock().unwrap().is_empty()); - assert!(gatekeeper - .delete_appointments_from_memory(&all_appointments) - .is_empty()); + // Delete these appointments and refund their owners their slots back. + gatekeeper.delete_appointments(uuids_to_delete.clone(), true); - // If there's matching data in the gatekeeper it should be deleted - for (uuid, user_id) in to_be_deleted.iter() { - gatekeeper.add_update_user(*user_id).unwrap(); - gatekeeper - .add_update_appointment(*user_id, *uuid, &generate_dummy_appointment(None)) - .unwrap(); + for uuid in uuids_to_delete.clone() { + assert!(!gatekeeper.dbm.lock().unwrap().appointment_exists(uuid)); } - - // Check before deleting - assert_eq!(gatekeeper.registered_users.lock().unwrap().len(), 5); - for (uuid, user_id) in to_be_deleted.iter() { - assert!(gatekeeper.registered_users.lock().unwrap()[user_id] - .appointments - .contains_key(uuid)); - - // The slot count should be decreased now too (both in memory and in the database) - assert_ne!( - gatekeeper.registered_users.lock().unwrap()[user_id].available_slots, - gatekeeper.subscription_slots - ); - assert_ne!( - gatekeeper - .dbm - .lock() - .unwrap() - .load_user(*user_id) - .unwrap() - .available_slots, - gatekeeper.subscription_slots - ); + for uuid in rest { + assert!(gatekeeper.dbm.lock().unwrap().appointment_exists(uuid)); } - for (_, user_id) in rest.iter() { - assert!(!gatekeeper - .registered_users - .lock() - .unwrap() - .contains_key(user_id)); + for uuid in trackers { + if uuids_to_delete.contains(&uuid) { + // The tracker should be deleted as well. + assert!(!gatekeeper.dbm.lock().unwrap().tracker_exists(uuid)); + } else { + assert!(gatekeeper.dbm.lock().unwrap().tracker_exists(uuid)); + } } - // And after - gatekeeper.delete_appointments_from_memory(&all_appointments); - for (uuid, user_id) in to_be_deleted.iter() { - assert!(!gatekeeper.registered_users.lock().unwrap()[user_id] - .appointments - .contains_key(uuid)); - - // The slot count is back to default + for (user_id, correct_remaining_slots) in users_remaining_slots { + let remaining_slots_from_db = + gatekeeper.get_user_info(user_id).unwrap().0.available_slots; + assert_eq!(remaining_slots_from_db, correct_remaining_slots); assert_eq!( - gatekeeper.registered_users.lock().unwrap()[user_id].available_slots, - gatekeeper.subscription_slots + gatekeeper.registered_users.lock().unwrap()[&user_id].available_slots, + correct_remaining_slots ); } - for (_, user_id) in rest.iter() { - assert!(!gatekeeper - .registered_users - .lock() - .unwrap() - .contains_key(user_id)); - } } #[test] @@ -835,7 +874,7 @@ mod tests { let user3_id = get_random_user_id(); for user_id in &[user1_id, user2_id, user3_id] { - gatekeeper.add_outdated_user(*user_id, chain.tip().height + 1, None) + gatekeeper.add_outdated_user(*user_id, chain.tip().height + 1) } // Connect a new block. Outdated users are deleted diff --git a/teos/src/main.rs b/teos/src/main.rs index ed3e8b1d..bdd30e06 100644 --- a/teos/src/main.rs +++ b/teos/src/main.rs @@ -48,6 +48,7 @@ where { let mut last_n_blocks = Vec::with_capacity(n); for _ in 0..n { + log::debug!("Fetching block #{}", last_known_block.height); let block = poller.fetch_block(&last_known_block).await?; last_known_block = poller.look_up_previous_header(&last_known_block).await?; last_n_blocks.push(block); @@ -257,16 +258,6 @@ async fn main() { any => any, }; - let mut poller = ChainPoller::new(&mut derefed, Network::from_str(btc_network).unwrap()); - let last_n_blocks = get_last_n_blocks(&mut poller, tip, IRREVOCABLY_RESOLVED as usize) - .await.unwrap_or_else(|e| { - // I'm pretty sure this can only happen if we are pulling blocks from the target to the prune height, and by the time we get to - // the end at least one has been pruned. - log::error!("Couldn't load the latest {IRREVOCABLY_RESOLVED} blocks. Please try again (Error: {})", e.into_inner()); - std::process::exit(1); - } - ); - // Build components let gatekeeper = Arc::new(Gatekeeper::new( tip.height, @@ -276,23 +267,35 @@ async fn main() { dbm.clone(), )); - let carrier = Carrier::new(rpc, bitcoind_reachable.clone(), tip.height); - let responder = Arc::new(Responder::new( - &last_n_blocks, - tip.height, - carrier, - gatekeeper.clone(), - dbm.clone(), - )); - let watcher = Arc::new(Watcher::new( - gatekeeper.clone(), - responder.clone(), - &last_n_blocks[0..6], - tip.height, - tower_sk, - TowerId(tower_pk), - dbm.clone(), - )); + let mut poller = ChainPoller::new(&mut derefed, Network::from_str(btc_network).unwrap()); + let (responder, watcher) = { + let last_n_blocks = get_last_n_blocks(&mut poller, tip, IRREVOCABLY_RESOLVED as usize) + .await.unwrap_or_else(|e| { + // I'm pretty sure this can only happen if we are pulling blocks from the target to the prune height, and by the time we get to + // the end at least one has been pruned. + log::error!("Couldn't load the latest {IRREVOCABLY_RESOLVED} blocks. Please try again (Error: {})", e.into_inner()); + std::process::exit(1); + } + ); + + let responder = Arc::new(Responder::new( + &last_n_blocks, + tip.height, + Carrier::new(rpc, bitcoind_reachable.clone(), tip.height), + gatekeeper.clone(), + dbm.clone(), + )); + let watcher = Arc::new(Watcher::new( + gatekeeper.clone(), + responder.clone(), + &last_n_blocks[0..6], + tip.height, + tower_sk, + TowerId(tower_pk), + dbm.clone(), + )); + (responder, watcher) + }; if watcher.is_fresh() & responder.is_fresh() & gatekeeper.is_fresh() { log::info!("Fresh bootstrap"); @@ -307,8 +310,8 @@ async fn main() { let shutdown_signal_tor = shutdown_signal_rpc_api.clone(); // The ordering here actually matters. Listeners are called by order, and we want the gatekeeper to be called - // last, so both the Watcher and the Responder can query the necessary data from it during data deletion. - let listener = &(watcher.clone(), &(responder, gatekeeper)); + // first so it updates the users' states and both the Watcher and the Responder operate only on registered users. + let listener = &(gatekeeper, &(watcher.clone(), responder)); let cache = &mut UnboundedCache::new(); let spv_client = SpvClient::new(tip, poller, cache, listener); let mut chain_monitor = ChainMonitor::new( diff --git a/teos/src/responder.rs b/teos/src/responder.rs index 9ffebda0..b7412cca 100644 --- a/teos/src/responder.rs +++ b/teos/src/responder.rs @@ -1,7 +1,6 @@ //! Logic related to the Responder, the components in charge of making sure breaches get properly punished. -use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; +use std::collections::HashSet; use std::sync::{Arc, Mutex}; use bitcoin::{consensus, BlockHash}; @@ -16,7 +15,7 @@ use teos_common::UserId; use crate::carrier::Carrier; use crate::dbm::DBM; use crate::extended_appointment::UUID; -use crate::gatekeeper::{Gatekeeper, UserInfo}; +use crate::gatekeeper::Gatekeeper; use crate::tx_index::TxIndex; use crate::watcher::Breach; @@ -30,14 +29,6 @@ pub enum ConfirmationStatus { InMempoolSince(u32), IrrevocablyResolved, Rejected(i32), - ReorgedOut, -} - -/// Reason why the tracker is deleted. Used for logging purposes. -enum DeletionReason { - Outdated, - Rejected, - Completed, } impl ConfirmationStatus { @@ -72,17 +63,6 @@ impl ConfirmationStatus { } } -/// Minimal data required in memory to keep track of transaction trackers. -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct TrackerSummary { - /// Identifier of the user who arranged the appointment. - user_id: UserId, - /// Transaction id the [Responder] is keeping track of. - penalty_txid: Txid, - /// The confirmation status of a given tracker. - status: ConfirmationStatus, -} - /// Structure to keep track of triggered appointments. /// /// It is analogous to [ExtendedAppointment](crate::extended_appointment::ExtendedAppointment) for the [`Watcher`](crate::watcher::Watcher). @@ -108,15 +88,6 @@ impl TransactionTracker { user_id, } } - - /// Computes the [TrackerSummary] of the [TransactionTracker]. - pub fn get_summary(&self) -> TrackerSummary { - TrackerSummary { - user_id: self.user_id, - penalty_txid: self.penalty_tx.txid(), - status: self.status, - } - } } impl From for common_msgs::Tracker { @@ -129,6 +100,22 @@ impl From for common_msgs::Tracker { } } +/// A struct that packages the summary of a tracker's penalty transaction. +#[derive(Debug, PartialEq)] +pub(crate) struct PenaltySummary { + pub penalty_txid: Txid, + pub status: ConfirmationStatus, +} + +impl PenaltySummary { + pub fn new(penalty_txid: Txid, status: ConfirmationStatus) -> Self { + PenaltySummary { + penalty_txid, + status, + } + } +} + /// Component in charge of keeping track of triggered appointments. /// /// The [Responder] receives data from the [Watcher](crate::watcher::Watcher) in form of a [Breach]. @@ -136,11 +123,6 @@ impl From for common_msgs::Tracker { /// The [Transaction] is then monitored to make sure it makes it to a block and it gets [irrevocably resolved](https://github.com/lightning/bolts/blob/master/05-onchain.md#general-nomenclature). #[derive(Debug)] pub struct Responder { - /// A map holding a summary of every tracker ([TransactionTracker]) hold by the [Responder], identified by [UUID]. - /// The identifiers match those used by the [Watcher](crate::watcher::Watcher). - trackers: Mutex>, - /// A map between [Txid]s and [UUID]s. - tx_tracker_map: Mutex>>, /// A local, pruned, [TxIndex] used to avoid the need of `txindex=1`. tx_index: Mutex>, /// A [Carrier] instance. Data is sent to the `bitcoind` through it. @@ -149,6 +131,8 @@ pub struct Responder { gatekeeper: Arc, /// A [DBM] (database manager) instance. Used to persist tracker data into disk. dbm: Arc>, + /// A list of all the reorged trackers that might need to be republished after reorg resolution. + reorged_trackers: Mutex>, } impl Responder { @@ -160,37 +144,28 @@ impl Responder { gatekeeper: Arc, dbm: Arc>, ) -> Self { - let mut trackers = HashMap::new(); - let mut tx_tracker_map: HashMap> = HashMap::new(); - - for (uuid, tracker) in dbm.lock().unwrap().load_trackers(None) { - trackers.insert(uuid, tracker.get_summary()); - - if let Some(map) = tx_tracker_map.get_mut(&tracker.penalty_tx.txid()) { - map.insert(uuid); - } else { - tx_tracker_map.insert(tracker.penalty_tx.txid(), HashSet::from_iter(vec![uuid])); - } - } - Responder { carrier: Mutex::new(carrier), - trackers: Mutex::new(trackers), - tx_tracker_map: Mutex::new(tx_tracker_map), tx_index: Mutex::new(TxIndex::new(last_n_blocs, last_known_block_height)), dbm, gatekeeper, + reorged_trackers: Mutex::new(HashSet::new()), } } /// Returns whether the [Responder] has been created from scratch (fresh) or from backed-up data. pub fn is_fresh(&self) -> bool { - self.trackers.lock().unwrap().is_empty() + self.get_trackers_count() == 0 } - /// Gets the total number of trackers in the responder. + /// Gets the total number of trackers in the [Responder]. pub(crate) fn get_trackers_count(&self) -> usize { - self.trackers.lock().unwrap().len() + self.dbm.lock().unwrap().get_trackers_count() + } + + /// Checks whether the [Responder] has gone through a reorg and some transactions should to be resent. + fn coming_from_reorg(&self) -> bool { + !self.reorged_trackers.lock().unwrap().is_empty() } /// Data entry point for the [Responder]. Handles a [Breach] provided by the [Watcher](crate::watcher::Watcher). @@ -203,21 +178,15 @@ impl Responder { breach: Breach, user_id: UserId, ) -> ConfirmationStatus { - // Do not add already added trackers. This can only happen if handle_breach is called twice with the same data, which can only happen - // if Watcher::block_connected is interrupted during execution and called back during bootstrap. - if let Some(tracker) = self.trackers.lock().unwrap().get(&uuid) { - return tracker.status; - } - let mut carrier = self.carrier.lock().unwrap(); let tx_index = self.tx_index.lock().unwrap(); // Check whether the transaction is in mempool or part of our internal txindex. Send it to our node otherwise. - let status = if carrier.in_mempool(&breach.penalty_tx.txid()) { + let status = if let Some(block_hash) = tx_index.get(&breach.penalty_tx.txid()) { + ConfirmationStatus::ConfirmedIn(tx_index.get_height(block_hash).unwrap() as u32) + } else if carrier.in_mempool(&breach.penalty_tx.txid()) { // If it's in mempool we assume it was just included ConfirmationStatus::InMempoolSince(carrier.block_height()) - } else if let Some(block_hash) = tx_index.get(&breach.penalty_tx.txid()) { - ConfirmationStatus::ConfirmedIn(tx_index.get_height(block_hash).unwrap() as u32) } else { carrier.send_transaction(&breach.penalty_tx) }; @@ -244,271 +213,177 @@ impl Responder { user_id: UserId, status: ConfirmationStatus, ) { - let tracker = TransactionTracker::new(breach, user_id, status); - - self.trackers + if self + .dbm .lock() .unwrap() - .insert(uuid, tracker.get_summary()); - - let mut tx_tracker_map = self.tx_tracker_map.lock().unwrap(); - if let Some(map) = tx_tracker_map.get_mut(&tracker.penalty_tx.txid()) { - map.insert(uuid); + .store_tracker(uuid, &TransactionTracker::new(breach, user_id, status)) + .is_ok() + { + log::info!("New tracker added (uuid={uuid})"); } else { - tx_tracker_map.insert(tracker.penalty_tx.txid(), HashSet::from_iter(vec![uuid])); + log::error!( + "Failed to store tracker in database (uuid={uuid}). It might be already stored." + ); } - - self.dbm - .lock() - .unwrap() - .store_tracker(uuid, &tracker) - .unwrap(); - log::info!("New tracker added (uuid={uuid})"); } /// Checks whether a given tracker can be found in the [Responder]. pub(crate) fn has_tracker(&self, uuid: UUID) -> bool { - // has_tracker should return true as long as the given tracker is hold by the Responder. - // If the tracker is partially kept, the function will log and the return will be false. - // This may point out that some partial data deletion is happening, which must be fixed. - self.trackers - .lock() - .unwrap() - .get(&uuid) - .map_or(false, |tracker| { - self.tx_tracker_map - .lock() - .unwrap() - .get(&tracker.penalty_txid) - .map_or( - { - log::debug!( - "Partially found Tracker. Some data may have not been properly deleted" - ); - false - }, - |_| true, - ) - }) - } - - /// Gets a tracker from the [Responder] if found. [None] otherwise. - /// - /// The [TransactionTracker] is queried to the [DBM]. - pub(crate) fn get_tracker(&self, uuid: UUID) -> Option { - if self.trackers.lock().unwrap().contains_key(&uuid) { - self.dbm.lock().unwrap().load_tracker(uuid) - } else { - None - } + self.dbm.lock().unwrap().tracker_exists(uuid) } /// Checks the confirmation count for the [TransactionTracker]s. /// /// For unconfirmed transactions, it checks whether they have been confirmed or keep missing confirmations. /// For confirmed transactions, nothing is done until they are completed (confirmation count reaches [IRREVOCABLY_RESOLVED](constants::IRREVOCABLY_RESOLVED)) - /// Returns the set of completed trackers. - fn check_confirmations(&self, txids: &[Txid], current_height: u32) -> HashSet { - let mut completed_trackers = HashSet::new(); + /// Returns the set of completed trackers or [None] if none were completed. + fn check_confirmations(&self, txids: HashSet, current_height: u32) -> Option> { + let mut completed_trackers = Vec::new(); + let mut reorged_trackers = self.reorged_trackers.lock().unwrap(); + let dbm = self.dbm.lock().unwrap(); - for (uuid, tracker) in self.trackers.lock().unwrap().iter_mut() { - if let ConfirmationStatus::ConfirmedIn(h) = tracker.status { + for (uuid, penalty_summary) in dbm.load_penalties_summaries() { + if txids.contains(&penalty_summary.penalty_txid) { + // First confirmation was received + dbm.update_tracker_status(uuid, &ConfirmationStatus::ConfirmedIn(current_height)) + .unwrap(); + // Remove that uuid from reorged trackers if it was confirmed. + reorged_trackers.remove(&uuid); + // TODO: We won't need this check when we persist the correct tracker status + // in the DB after migrations are supported. + } else if reorged_trackers.contains(&uuid) { + // Don't consider reorged trackers since they have wrong DB status. + continue; + } else if let ConfirmationStatus::ConfirmedIn(h) = penalty_summary.status { let confirmations = current_height - h; if confirmations == constants::IRREVOCABLY_RESOLVED { // Tracker is deep enough in the chain, it can be deleted - completed_trackers.insert(*uuid); + completed_trackers.push(uuid); } else { log::info!("{uuid} received a confirmation (count={confirmations})"); } - } else if txids.contains(&tracker.penalty_txid) { - // First confirmation was received - tracker.status = ConfirmationStatus::ConfirmedIn(current_height); - } else if let ConfirmationStatus::InMempoolSince(h) = tracker.status { + } else if let ConfirmationStatus::InMempoolSince(h) = penalty_summary.status { // Log all transactions that have missed confirmations log::info!( "Transaction missed a confirmation: {} (missed conf count: {})", - tracker.penalty_txid, + penalty_summary.penalty_txid, current_height - h ); } } - completed_trackers - } - - /// Gets a map of transactions that need to be rebroadcast. A [Transaction] is flagged to be rebroadcast - /// if its missed confirmation count has reached the threshold ([CONFIRMATIONS_BEFORE_RETRY]) or if they have been - /// reorged out of the chain. If the transaction has been reorged out, the commitment transaction is also returned. - /// - /// Given the [Responder] only keeps around the minimal data to track transactions, the [TransactionTracker]s - /// are queried to the [DBM]. - fn get_txs_to_rebroadcast( - &self, - height: u32, - ) -> HashMap)> { - let dbm = self.dbm.lock().unwrap(); - let mut tx_to_rebroadcast = HashMap::new(); - let mut tracker: TransactionTracker; - - for (uuid, t) in self.trackers.lock().unwrap().iter() { - if let ConfirmationStatus::InMempoolSince(h) = t.status { - if (height - h) as u8 >= CONFIRMATIONS_BEFORE_RETRY { - tracker = dbm.load_tracker(*uuid).unwrap(); - tx_to_rebroadcast.insert(*uuid, (tracker.penalty_tx, None)); - } - } else if let ConfirmationStatus::ReorgedOut = t.status { - tracker = dbm.load_tracker(*uuid).unwrap(); - tx_to_rebroadcast.insert(*uuid, (tracker.penalty_tx, Some(tracker.dispute_tx))); - } - } - - tx_to_rebroadcast - } - - /// Gets a collection of trackers that have been outdated. An outdated tracker is a [TransactionTracker] - /// from a user who's subscription has been outdated (and therefore will be removed from the tower). - /// - /// Trackers are only returned as long as they have not been confirmed, otherwise we'll keep watching for then anyway. - fn get_outdated_trackers(&self, block_height: u32) -> HashSet { - let mut outdated_trackers = HashSet::new(); - let trackers = self.trackers.lock().unwrap(); - for uuid in self - .gatekeeper - .get_outdated_appointments(block_height) - .intersection(&trackers.keys().cloned().collect()) - { - if let ConfirmationStatus::InMempoolSince(_) = trackers[uuid].status { - outdated_trackers.insert(*uuid); - } - } - - outdated_trackers + (!completed_trackers.is_empty()).then_some(completed_trackers) } - /// Rebroadcasts a list of penalty transactions that have missed too many confirmations (or that have been reorged out). + /// Handles the reorged out trackers when we start connecting to the stronger chain. /// - /// This covers both the case where a transaction is not getting confirmations (most likely due to low fess, and needs to be bumped), - /// and the case where the transaction has been reorged out of the chain. For the former, there's no much to be done at the moment (until anchors), - /// for the latter, we need to rebroadcast the penalty (and potentially the commitment if that has also been reorged). + /// This is called in the first block connection after a bunch of block disconnections. + /// It tries to publish the dispute and penalty transactions of reorged trackers to the blockchain. /// - /// Given how the confirmation status and reorgs work with a bitcoind backend, we will be rebroadcasting this during the first new connected block - /// after a reorg, but bitcoind will already be at the new tip. If the transaction is accepted, we won't do anything else until passed the new tip, - /// otherwise, we could potentially try to rebroadcast again while processing the upcoming reorged blocks (if the tx hits [CONFIRMATIONS_BEFORE_RETRY]). - /// - /// Returns a tuple with two maps, one containing the trackers that where successfully rebroadcast and another one containing the ones that were rejected. - fn rebroadcast( - &self, - txs: HashMap)>, - ) -> (HashMap, HashSet) { - let mut accepted = HashMap::new(); - let mut rejected = HashSet::new(); - - let mut trackers = self.trackers.lock().unwrap(); + /// Returns a vector of rejected trackers during rebroadcast if any were rejected, [None] otherwise. + fn handle_reorged_txs(&self, height: u32) -> Option> { + // NOTE: We are draining the reorged trackers set, meaning that we won't try sending these disputes again. + let reorged_trackers: Vec = self.reorged_trackers.lock().unwrap().drain().collect(); let mut carrier = self.carrier.lock().unwrap(); - let tx_index = self.tx_index.lock().unwrap(); + let dbm = self.dbm.lock().unwrap(); - for (uuid, (penalty_tx, dispute_tx)) in txs.into_iter() { - let status = if let Some(dispute_tx) = dispute_tx { - // The tracker was reorged out, and the dispute may potentially not be in the chain (or mempool) anymore. - if tx_index.contains_key(&dispute_tx.txid()) - | carrier.in_mempool(&dispute_tx.txid()) - { - // Dispute tx is on chain (or mempool), so we only need to care about the penalty - carrier.send_transaction(&penalty_tx) - } else { - // Dispute tx has also been reorged out, meaning that both transactions need to be broadcast. - // DISCUSS: For lightning transactions, if the dispute has been reorged the penalty cannot make it to the network. - // If we keep this general, the dispute can simply be a trigger and the penalty doesn't necessarily have to spend from it. - // We'll keel it lightning specific, at least for now. - let status = carrier.send_transaction(&dispute_tx); - if let ConfirmationStatus::Rejected(e) = status { - log::error!( - "Reorged dispute transaction rejected during rebroadcast: {} (reason: {e})", - dispute_tx.txid() + let mut rejected = Vec::new(); + // Republish all the dispute transactions of the reorged trackers. + for uuid in reorged_trackers { + let tracker = dbm.load_tracker(uuid).unwrap(); + let dispute_txid = tracker.dispute_tx.txid(); + // Try to publish the dispute transaction. + let should_publish_penalty = match carrier.send_transaction(&tracker.dispute_tx) { + ConfirmationStatus::InMempoolSince(_) => { + log::info!( + "Reorged dispute tx (txid={}) is in the mempool now", + dispute_txid ); - status - } else { - // The dispute was accepted, so we can rebroadcast the penalty. - carrier.send_transaction(&penalty_tx) - } + true } - } else { - // The tracker has simply reached CONFIRMATIONS_BEFORE_RETRY missed confirmations. - log::warn!( - "Penalty transaction has missed many confirmations: {}", - penalty_tx.txid() - ); - carrier.send_transaction(&penalty_tx) + // NOTE: We aren't fully synced with the bitcoind backend so can't check if the dispute tx is in our txindex. + ConfirmationStatus::IrrevocablyResolved => { + log::info!( + "Reorged dispute tx (txid={}) is already on the strong chain", + dispute_txid + ); + true + } + ConfirmationStatus::Rejected(e) => { + log::error!( + "Reorged dispute tx (txid={}) rejected during rebroadcast (reason: {e:?})", + dispute_txid + ); + false + } + x => unreachable!( + "`Carrier::send_transaction` shouldn't return this variant: {:?}", + x + ), }; - if let ConfirmationStatus::Rejected(_) = status { - rejected.insert(uuid); + if should_publish_penalty { + // Try to rebroadcast the penalty tx. + if let ConfirmationStatus::Rejected(_) = + carrier.send_transaction(&tracker.penalty_tx) + { + rejected.push(uuid) + } else { + // The penalty might actually be confirmed (ConfirmationStatus::IrrevocablyResolved) since bitcoind + // is fully synced with the stronger chain already, but we won't know which block was it confirmed in. + // We should see the tracker appear in the blockchain in the next couple of connected blocks. + dbm.update_tracker_status(uuid, &ConfirmationStatus::InMempoolSince(height)) + .unwrap() + } } else { - // Update the tracker if it gets accepted. This will also update the height (since when we are counting the tracker - // to have been in mempool), so it resets the wait period instead of trying to rebroadcast every block. - // DISCUSS: We may want to find another approach in the future for the InMempoool transactions. - trackers.get_mut(&uuid).unwrap().status = status; - accepted.insert(uuid, status); + rejected.push(uuid) } } - (accepted, rejected) + (!rejected.is_empty()).then_some(rejected) } - // DISCUSS: Check comment regarding callbacks in watcher.rs - - /// Deletes trackers from memory. + /// Rebroadcasts a list of penalty transactions that have missed too many confirmations. /// - /// Logs a different message depending on whether the trackers have been outdated or completed. - fn delete_trackers_from_memory(&self, uuids: &HashSet, reason: DeletionReason) { - let mut trackers = self.trackers.lock().unwrap(); - let mut tx_tracker_map = self.tx_tracker_map.lock().unwrap(); - for uuid in uuids.iter() { - match reason { - DeletionReason::Completed => log::info!("Appointment completed. Penalty transaction was irrevocably confirmed: {uuid}"), - DeletionReason::Outdated => log::info!("Appointment couldn't be completed. Expiry reached but penalty didn't make it to the chain: {uuid}"), - DeletionReason::Rejected => log::info!("Appointment couldn't be completed. Either the dispute or the penalty txs where rejected during rebroadcast: {uuid}"), - } - - match trackers.remove(uuid) { - Some(tracker) => { - let trackers = tx_tracker_map.get_mut(&tracker.penalty_txid).unwrap(); - - if trackers.len() == 1 { - tx_tracker_map.remove(&tracker.penalty_txid); - - log::info!( - "No more trackers for penalty transaction: {}", - tracker.penalty_txid - ); - } else { - trackers.remove(uuid); - } - } - None => { - // This should never happen. Logging just in case so we can fix it if so - log::error!("Completed tracker not found when cleaning: {uuid}"); - } + /// This covers the case where a transaction is not getting confirmations (most likely due to low + /// fess and needs to be bumped, but there is not much we can do until anchors). + /// + /// Returns a vector of rejected trackers during rebroadcast if any were rejected, [None] otherwise. + fn rebroadcast_stale_txs(&self, height: u32) -> Option> { + let dbm = self.dbm.lock().unwrap(); + let mut carrier = self.carrier.lock().unwrap(); + let mut rejected = Vec::new(); + + // Retry sending trackers which have been in the mempool since more than `CONFIRMATIONS_BEFORE_RETRY` blocks. + let stale_confirmation_status = + ConfirmationStatus::InMempoolSince(height - CONFIRMATIONS_BEFORE_RETRY as u32); + // NOTE: Ideally this will only pull UUIDs which have been in mempool since `CONFIRMATIONS_BEFORE_RETRY`, but + // might also return ones which have been there for a longer period. This can only happen if the tower missed + // a couple of block connections due to a force update. + for uuid in dbm + .load_trackers_with_confirmation_status(stale_confirmation_status) + .unwrap() + { + let tracker = dbm.load_tracker(uuid).unwrap(); + log::warn!( + "Penalty transaction has missed many confirmations: {}", + tracker.penalty_tx.txid() + ); + // Rebroadcast the penalty transaction. + let status = carrier.send_transaction(&tracker.penalty_tx); + if let ConfirmationStatus::Rejected(_) = status { + rejected.push(uuid); + } else { + // DISCUSS: What if the tower was down for some time and was later force updated while this penalty got on-chain? + // Sending it will yield `ConfirmationStatus::IrrevocablyResolved` which would panic here. + // We might want to replace `ConfirmationStatus::IrrevocablyResolved` variant with + // `ConfirmationStatus::ConfirmedIn(height - IRREVOCABLY_RESOLVED) + dbm.update_tracker_status(uuid, &status).unwrap(); } } - } - /// Deletes trackers from memory and the database. - /// - /// Removes all data related to the appointment from the database in cascade. - fn delete_trackers( - &self, - uuids: &HashSet, - updated_users: &HashMap, - reason: DeletionReason, - ) { - if !uuids.is_empty() { - self.delete_trackers_from_memory(uuids, reason); - self.dbm - .lock() - .unwrap() - .batch_remove_appointments(uuids, updated_users); - } + (!rejected.is_empty()).then_some(rejected) } } @@ -539,69 +414,55 @@ impl chain::Listen for Responder { .collect(); self.tx_index.lock().unwrap().update(*header, &txs); - if !self.trackers.lock().unwrap().is_empty() { - // Complete those appointments that are due at this height - let completed_trackers = self.check_confirmations( - &txdata.iter().map(|(_, tx)| tx.txid()).collect::>(), - height, - ); - let trackers_to_delete_gk = completed_trackers - .iter() - .map(|uuid| (*uuid, self.trackers.lock().unwrap()[uuid].user_id)) - .collect(); - self.delete_trackers( - &completed_trackers, - &self - .gatekeeper - .delete_appointments_from_memory(&trackers_to_delete_gk), - DeletionReason::Completed, - ); - - // Also delete trackers from outdated users (from memory only, the db deletion is handled by the Gatekeeper) - self.delete_trackers_from_memory( - &self.get_outdated_trackers(height), - DeletionReason::Outdated, - ); + // Delete trackers completed at this height + if let Some(trackers) = self.check_confirmations(txs.keys().cloned().collect(), height) { + self.gatekeeper.delete_appointments(trackers, true); + } - // Rebroadcast those transactions that need to - let (_, rejected_trackers) = self.rebroadcast(self.get_txs_to_rebroadcast(height)); - // Delete trackers rejected during rebroadcast - let trackers_to_delete_gk = rejected_trackers - .iter() - .map(|uuid| (*uuid, self.trackers.lock().unwrap()[uuid].user_id)) - .collect(); - self.delete_trackers( - &rejected_trackers, - &self - .gatekeeper - .delete_appointments_from_memory(&trackers_to_delete_gk), - DeletionReason::Rejected, - ); + let mut trackers_to_delete = Vec::new(); + // We might be connecting a new block after a disconnection (reorg). + // We will need to update those trackers that have been reorged. + if self.coming_from_reorg() { + // Handle reorged transactions. This clears `self.reorged_trackers`. + if let Some(trackers) = self.handle_reorged_txs(height) { + trackers_to_delete.extend(trackers); + } + } - // Remove all receipts created in this block - self.carrier.lock().unwrap().clear_receipts(); + // Rebroadcast those transactions that need to + if let Some(trackers) = self.rebroadcast_stale_txs(height) { + trackers_to_delete.extend(trackers); + } - if self.trackers.lock().unwrap().is_empty() { - log::info!("No more pending trackers"); - } + if !trackers_to_delete.is_empty() { + self.gatekeeper + .delete_appointments(trackers_to_delete, false); } + + // Remove all receipts created in this block + self.carrier.lock().unwrap().clear_receipts(); } /// Handles reorgs in the [Responder]. fn block_disconnected(&self, header: &BlockHeader, height: u32) { log::warn!("Block disconnected: {}", header.block_hash()); + // Update the carrier and our tx_index. self.carrier.lock().unwrap().update_height(height); self.tx_index .lock() .unwrap() .remove_disconnected_block(&header.block_hash()); - - for tracker in self.trackers.lock().unwrap().values_mut() { - // The transaction has been unconfirmed. Flag it as reorged out so we can rebroadcast it. - if tracker.status == ConfirmationStatus::ConfirmedIn(height) { - tracker.status = ConfirmationStatus::ReorgedOut; - } - } + // And store the reorged transactions to be retried later. + // TODO: Not only confirmed trackers need to be marked as reorged, but trackers that hasn't confirmed but their + // dispute did confirm in the reorged block. We can pull dispute txids of non confirmed penalties and get their + // confirmation block from our tx_index. + self.reorged_trackers.lock().unwrap().extend( + self.dbm + .lock() + .unwrap() + .load_trackers_with_confirmation_status(ConfirmationStatus::ConfirmedIn(height)) + .unwrap(), + ); } } @@ -609,74 +470,82 @@ impl chain::Listen for Responder { mod tests { use super::*; use lightning::chain::Listen; + use teos_common::appointment::Locator; + use std::collections::HashMap; + use std::iter::FromIterator; use std::sync::{Arc, Mutex}; use crate::dbm::DBM; - use crate::gatekeeper::UserInfo; use crate::rpc_errors; use crate::test_utils::{ - create_carrier, generate_dummy_appointment_with_user, generate_uuid, get_last_n_blocks, - get_random_breach, get_random_tracker, get_random_tx, store_appointment_and_fks_to_db, - BitcoindStopper, Blockchain, MockedServerQuery, AVAILABLE_SLOTS, DURATION, EXPIRY_DELTA, - SLOTS, START_HEIGHT, SUBSCRIPTION_EXPIRY, SUBSCRIPTION_START, + create_carrier, generate_dummy_appointment, generate_dummy_appointment_with_user, + generate_uuid, get_last_n_blocks, get_random_breach, get_random_tracker, get_random_tx, + store_appointment_and_its_user, BitcoindStopper, Blockchain, MockedServerQuery, DURATION, + EXPIRY_DELTA, SLOTS, START_HEIGHT, }; use teos_common::constants::IRREVOCABLY_RESOLVED; use teos_common::test_utils::get_random_user_id; + impl TransactionTracker { + pub fn locator(&self) -> Locator { + Locator::new(self.dispute_tx.txid()) + } + + pub fn uuid(&self) -> UUID { + UUID::new(self.locator(), self.user_id) + } + } + impl PartialEq for Responder { fn eq(&self, other: &Self) -> bool { - *self.trackers.lock().unwrap() == *other.trackers.lock().unwrap() - && *self.tx_tracker_map.lock().unwrap() == *other.tx_tracker_map.lock().unwrap() + // Same in-memory data. + *self.reorged_trackers.lock().unwrap() == *other.reorged_trackers.lock().unwrap() && + *self.tx_index.lock().unwrap() == *other.tx_index.lock().unwrap() && + // && Same DB data. + self.get_trackers() == other.get_trackers() } } impl Eq for Responder {} impl Responder { - pub(crate) fn get_trackers(&self) -> &Mutex> { - &self.trackers + pub(crate) fn get_trackers(&self) -> HashMap { + self.dbm.lock().unwrap().load_trackers(None) } pub(crate) fn get_carrier(&self) -> &Mutex { &self.carrier } - pub(crate) fn add_random_tracker( - &self, - uuid: UUID, - status: ConfirmationStatus, - ) -> TransactionTracker { + pub(crate) fn add_random_tracker(&self, status: ConfirmationStatus) -> TransactionTracker { let user_id = get_random_user_id(); let tracker = get_random_tracker(user_id, status); - self.add_dummy_tracker(uuid, &tracker); + self.add_dummy_tracker(&tracker); tracker } - pub(crate) fn add_dummy_tracker(&self, uuid: UUID, tracker: &TransactionTracker) { - // Add data to memory - self.trackers - .lock() - .unwrap() - .insert(uuid, tracker.get_summary()); - self.tx_tracker_map - .lock() - .unwrap() - .insert(tracker.penalty_tx.txid(), HashSet::from_iter([uuid])); - - // Add data to the db + pub(crate) fn add_dummy_tracker(&self, tracker: &TransactionTracker) { let (_, appointment) = generate_dummy_appointment_with_user( tracker.user_id, Some(&tracker.dispute_tx.txid()), ); - store_appointment_and_fks_to_db(&self.dbm.lock().unwrap(), uuid, &appointment); + store_appointment_and_its_user(&self.dbm.lock().unwrap(), &appointment); self.dbm .lock() .unwrap() - .store_tracker(uuid, tracker) + .store_tracker(appointment.uuid(), tracker) .unwrap(); } + + fn store_dummy_appointment_to_db(&self) -> (UserId, UUID) { + let appointment = generate_dummy_appointment(None); + let (uuid, user_id) = (appointment.uuid(), appointment.user_id); + // Store the appointment and the user to the DB. + store_appointment_and_its_user(&self.dbm.lock().unwrap(), &appointment); + (user_id, uuid) + } } async fn create_responder( @@ -724,23 +593,17 @@ mod tests { #[test] fn test_confirmation_status_from_db_data() { // These are pretty simple tests. The db can only store trackers with a confirmation status - // that's either ConfirmedIn or InMempoolSince (Rejected and Reorged are never passed to store). + // that's either ConfirmedIn or InMempoolSince (Rejected and IrrevocablyResolved are never passed to store). let h = 21; - let statuses = [true, false]; - for status in statuses { - if status { - assert_eq!( - ConfirmationStatus::from_db_data(h, status), - ConfirmationStatus::ConfirmedIn(h) - ); - } else { - assert_eq!( - ConfirmationStatus::from_db_data(h, status), - ConfirmationStatus::InMempoolSince(h) - ); - } - } + assert_eq!( + ConfirmationStatus::from_db_data(h, true), + ConfirmationStatus::ConfirmedIn(h) + ); + assert_eq!( + ConfirmationStatus::from_db_data(h, false), + ConfirmationStatus::InMempoolSince(h) + ); } #[test] @@ -758,7 +621,7 @@ mod tests { Some((h, false)) ); assert_eq!(ConfirmationStatus::Rejected(0).to_db_data(), None); - assert_eq!(ConfirmationStatus::ReorgedOut.to_db_data(), None); + assert_eq!(ConfirmationStatus::IrrevocablyResolved.to_db_data(), None); } #[tokio::test] @@ -774,11 +637,7 @@ mod tests { // If we add some trackers to the system and create a new Responder reusing the same db // (as if simulating a bootstrap from existing data), the data should be properly loaded. for i in 0..10 { - // Add the necessary FKs in the database - let user_id = get_random_user_id(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); - + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); let breach = get_random_breach(); let s = if i % 2 == 0 { ConfirmationStatus::InMempoolSince(i) @@ -800,46 +659,31 @@ mod tests { let start_height = START_HEIGHT as u32; let (responder, _s) = init_responder(MockedServerQuery::Regular).await; - let user_id = get_random_user_id(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); - + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); let breach = get_random_breach(); - let penalty_txid = breach.penalty_tx.txid(); assert_eq!( responder.handle_breach(uuid, breach, user_id), ConfirmationStatus::InMempoolSince(start_height) ); - assert!(responder.trackers.lock().unwrap().contains_key(&uuid)); + let tracker = responder.dbm.lock().unwrap().load_tracker(uuid).unwrap(); assert_eq!( - responder.trackers.lock().unwrap()[&uuid].status, + tracker.status, ConfirmationStatus::InMempoolSince(start_height) ); - assert!(responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(&penalty_txid)); // Breaches won't be overwritten once passed to the Responder. If the same UUID is // passed twice, the receipt corresponding to the first breach will be handed back. let another_breach = get_random_breach(); assert_eq!( - responder.handle_breach(uuid, another_breach.clone(), user_id), + responder.handle_breach(uuid, another_breach, user_id), ConfirmationStatus::InMempoolSince(start_height) ); - - assert!(responder.trackers.lock().unwrap().contains_key(&uuid)); + // Getting the tracker should return the old one. assert_eq!( - responder.trackers.lock().unwrap()[&uuid].status, - ConfirmationStatus::InMempoolSince(start_height) + tracker, + responder.dbm.lock().unwrap().load_tracker(uuid).unwrap() ); - assert!(!responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(&another_breach.penalty_tx.txid())); } #[tokio::test] @@ -847,36 +691,25 @@ mod tests { let start_height = START_HEIGHT as u32; let (responder, _s) = init_responder(MockedServerQuery::InMempoool).await; - let user_id = get_random_user_id(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); - + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); let breach = get_random_breach(); - let penalty_txid = breach.penalty_tx.txid(); assert_eq!( responder.handle_breach(uuid, breach, user_id), ConfirmationStatus::InMempoolSince(start_height) ); - assert!(responder.trackers.lock().unwrap().contains_key(&uuid)); + let tracker = responder.dbm.lock().unwrap().load_tracker(uuid).unwrap(); assert_eq!( - responder.trackers.lock().unwrap()[&uuid].status, + tracker.status, ConfirmationStatus::InMempoolSince(start_height) ); - assert!(responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(&penalty_txid)); } #[tokio::test] async fn test_handle_breach_accepted_in_txindex() { let (responder, _s) = init_responder(MockedServerQuery::Regular).await; - let user_id = get_random_user_id(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); let breach = get_random_breach(); let penalty_txid = breach.penalty_tx.txid(); @@ -900,16 +733,11 @@ mod tests { responder.handle_breach(uuid, breach, user_id), ConfirmationStatus::ConfirmedIn(target_height) ); - assert!(responder.trackers.lock().unwrap().contains_key(&uuid)); + let tracker = responder.dbm.lock().unwrap().load_tracker(uuid).unwrap(); assert_eq!( - responder.trackers.lock().unwrap()[&uuid].status, + tracker.status, ConfirmationStatus::ConfirmedIn(target_height) ); - assert!(responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(&penalty_txid)); } #[tokio::test] @@ -922,18 +750,12 @@ mod tests { let user_id = get_random_user_id(); let uuid = generate_uuid(); let breach = get_random_breach(); - let penalty_txid = breach.penalty_tx.txid(); assert_eq!( responder.handle_breach(uuid, breach, user_id), ConfirmationStatus::Rejected(rpc_errors::RPC_VERIFY_ERROR) ); - assert!(!responder.trackers.lock().unwrap().contains_key(&uuid)); - assert!(!responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(&penalty_txid)); + assert!(!responder.has_tracker(uuid)); } #[tokio::test] @@ -941,11 +763,7 @@ mod tests { let (responder, _s) = init_responder(MockedServerQuery::Regular).await; let start_height = START_HEIGHT as u32; - // Add the necessary FKs in the database - let user_id = get_random_user_id(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); - + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); let mut breach = get_random_breach(); responder.add_tracker( uuid, @@ -954,21 +772,7 @@ mod tests { ConfirmationStatus::InMempoolSince(start_height), ); - // Check that the data has been added to trackers and to the tx_tracker_map - assert_eq!( - responder.trackers.lock().unwrap().get(&uuid), - Some(&TrackerSummary { - user_id, - penalty_txid: breach.penalty_tx.txid(), - status: ConfirmationStatus::InMempoolSince(start_height) - }) - ); - assert!(responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(&breach.penalty_tx.txid())); - // Check that the data is also in the database + // Check that the data has been added to the responder. assert_eq!( responder.dbm.lock().unwrap().load_tracker(uuid).unwrap(), TransactionTracker::new( @@ -979,16 +783,9 @@ mod tests { ); // Adding a confirmed tracker should result in the same but with the height being set. - let uuid = generate_uuid(); - breach = get_random_breach(); - - responder - .dbm - .lock() - .unwrap() - .store_appointment(uuid, &appointment) - .unwrap(); + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); + breach = get_random_breach(); responder.add_tracker( uuid, breach.clone(), @@ -996,23 +793,6 @@ mod tests { ConfirmationStatus::ConfirmedIn(start_height - 1), ); - assert_eq!( - responder.trackers.lock().unwrap().get(&uuid), - Some(&TrackerSummary { - user_id, - penalty_txid: breach.penalty_tx.txid(), - status: ConfirmationStatus::ConfirmedIn(start_height - 1) - }) - ); - assert!(responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(&breach.penalty_tx.txid())); - assert_eq!( - responder.tx_tracker_map.lock().unwrap()[&breach.penalty_tx.txid()].len(), - 1 - ); assert_eq!( responder.dbm.lock().unwrap().load_tracker(uuid).unwrap(), TransactionTracker::new( @@ -1022,15 +802,8 @@ mod tests { ) ); - // Adding another breach with the same penalty transaction (but different uuid) adds an additional uuid to the map entry - let uuid = generate_uuid(); - responder - .dbm - .lock() - .unwrap() - .store_appointment(uuid, &appointment) - .unwrap(); - + // Adding another breach with the same penalty transaction (but different uuid) + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); responder.add_tracker( uuid, breach.clone(), @@ -1038,16 +811,6 @@ mod tests { ConfirmationStatus::ConfirmedIn(start_height), ); - assert!(responder.trackers.lock().unwrap().contains_key(&uuid)); - assert!(responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(&breach.penalty_tx.txid())); - assert_eq!( - responder.tx_tracker_map.lock().unwrap()[&breach.penalty_tx.txid()].len(), - 2 - ); assert_eq!( responder.dbm.lock().unwrap().load_tracker(uuid).unwrap(), TransactionTracker::new( @@ -1066,10 +829,7 @@ mod tests { let (responder, _s) = init_responder(MockedServerQuery::Regular).await; // Add a new tracker - let user_id = get_random_user_id(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); - + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); let breach = get_random_breach(); responder.add_tracker( uuid, @@ -1080,12 +840,8 @@ mod tests { assert!(responder.has_tracker(uuid)); - // Delete the tracker and check again (updated users are irrelevant here) - responder.delete_trackers( - &HashSet::from_iter([uuid]), - &HashMap::new(), - DeletionReason::Completed, - ); + // Delete the tracker and check again. + responder.gatekeeper.delete_appointments(vec![uuid], false); assert!(!responder.has_tracker(uuid)); } @@ -1096,12 +852,10 @@ mod tests { let (responder, _s) = init_responder(MockedServerQuery::Regular).await; // Store the user and the appointment in the database so we can add the tracker later on (due to FK restrictions) - let user_id = get_random_user_id(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); // Data should not be there before adding it - assert_eq!(responder.get_tracker(uuid), None); + assert!(responder.dbm.lock().unwrap().load_tracker(uuid).is_none()); // Data should be there now let breach = get_random_breach(); @@ -1112,7 +866,7 @@ mod tests { ConfirmationStatus::InMempoolSince(start_height), ); assert_eq!( - responder.get_tracker(uuid).unwrap(), + responder.dbm.lock().unwrap().load_tracker(uuid).unwrap(), TransactionTracker::new( breach, user_id, @@ -1120,13 +874,9 @@ mod tests { ) ); - // After deleting the data it should be gone (updated users are irrelevant here) - responder.delete_trackers( - &HashSet::from_iter([uuid]), - &HashMap::new(), - DeletionReason::Outdated, - ); - assert_eq!(responder.get_tracker(uuid), None); + // After deleting the data it should be gone + responder.gatekeeper.delete_appointments(vec![uuid], false); + assert!(responder.dbm.lock().unwrap().load_tracker(uuid).is_none()); } #[tokio::test] @@ -1139,67 +889,69 @@ mod tests { let mut just_confirmed = HashSet::new(); let mut confirmed = HashSet::new(); let mut completed = HashSet::new(); - let mut txids = Vec::new(); + let mut txids = HashSet::new(); for i in 0..40 { - let user_id = get_random_user_id(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + let (user_id, uuid) = responder.store_dummy_appointment_to_db(); let breach = get_random_breach(); - store_appointment_and_fks_to_db(&responder.dbm.lock().unwrap(), uuid, &appointment); - - if i % 4 == 0 { - responder.add_tracker( - uuid, - breach.clone(), - user_id, - ConfirmationStatus::InMempoolSince(21), - ); - in_mempool.insert(uuid); - } else if i % 4 == 1 { - responder.add_tracker( - uuid, - breach.clone(), - user_id, - ConfirmationStatus::InMempoolSince(i), - ); - just_confirmed.insert(uuid); - txids.push(breach.penalty_tx.txid()); - } else if i % 4 == 2 { - responder.add_tracker( - uuid, - breach.clone(), - user_id, - ConfirmationStatus::ConfirmedIn(42), - ); - confirmed.insert(uuid); - } else { - responder.add_tracker( - uuid, - breach.clone(), - user_id, - ConfirmationStatus::ConfirmedIn( - target_height - constants::IRREVOCABLY_RESOLVED, - ), - ); - completed.insert(uuid); + match i % 4 { + 0 => { + responder.add_tracker( + uuid, + breach.clone(), + user_id, + ConfirmationStatus::InMempoolSince(21), + ); + in_mempool.insert(uuid); + } + 1 => { + responder.add_tracker( + uuid, + breach.clone(), + user_id, + ConfirmationStatus::InMempoolSince(i), + ); + just_confirmed.insert(uuid); + txids.insert(breach.penalty_tx.txid()); + } + 2 => { + responder.add_tracker( + uuid, + breach.clone(), + user_id, + ConfirmationStatus::ConfirmedIn(42), + ); + confirmed.insert(uuid); + } + _ => { + responder.add_tracker( + uuid, + breach.clone(), + user_id, + ConfirmationStatus::ConfirmedIn( + target_height - constants::IRREVOCABLY_RESOLVED, + ), + ); + completed.insert(uuid); + } } } // The trackers that were completed should be returned assert_eq!( completed, - responder.check_confirmations(&txids, target_height) + HashSet::from_iter(responder.check_confirmations(txids, target_height).unwrap()) ); // The ones in mempool should still be there (at the same height) for uuid in in_mempool { assert_eq!( responder - .trackers + .dbm .lock() .unwrap() - .get(&uuid) + .load_tracker(uuid) .unwrap() .status, ConfirmationStatus::InMempoolSince(21) @@ -1210,10 +962,10 @@ mod tests { for uuid in just_confirmed { assert_eq!( responder - .trackers + .dbm .lock() .unwrap() - .get(&uuid) + .load_tracker(uuid) .unwrap() .status, ConfirmationStatus::ConfirmedIn(target_height) @@ -1224,10 +976,10 @@ mod tests { for uuid in confirmed { assert_eq!( responder - .trackers + .dbm .lock() .unwrap() - .get(&uuid) + .load_tracker(uuid) .unwrap() .status, ConfirmationStatus::ConfirmedIn(42) @@ -1236,478 +988,165 @@ mod tests { } #[tokio::test] - async fn test_get_txs_to_rebroadcast() { - let (responder, _s) = init_responder(MockedServerQuery::Regular).await; - let current_height = 100; + async fn test_handle_reorged_txs() { + let (responder, _s) = init_responder(MockedServerQuery::InMempoool).await; + let mut trackers = Vec::new(); - let user_id = get_random_user_id(); - responder - .dbm - .lock() - .unwrap() - .store_user( - user_id, - &UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY), - ) - .unwrap(); + for _ in 0..10 { + let uuid = responder + .add_random_tracker(ConfirmationStatus::ConfirmedIn(42)) + .uuid(); + responder.reorged_trackers.lock().unwrap().insert(uuid); + trackers.push(uuid); + } - // Transactions are flagged to be rebroadcast when they've been in mempool for longer than CONFIRMATIONS_BEFORE_RETRY - let mut txs = HashMap::new(); + let height = 100; + assert!(responder.handle_reorged_txs(height).is_none()); + // The reorged trackers buffer should be empty after this. + assert!(responder.reorged_trackers.lock().unwrap().is_empty()); - for i in 0..CONFIRMATIONS_BEFORE_RETRY + 2 { - // Add the appointment to the db so FK rules are satisfied - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - responder - .dbm - .lock() - .unwrap() - .store_appointment(uuid, &appointment) - .unwrap(); - - // Create a breach and add it, setting all them as unconfirmed (at different heights) - let breach = get_random_breach(); - - responder.add_tracker( - uuid, - breach.clone(), - user_id, - ConfirmationStatus::InMempoolSince(current_height - i as u32), + // And all the reorged trackers should have in mempool since `height` status. + for uuid in trackers { + assert_eq!( + responder + .dbm + .lock() + .unwrap() + .load_tracker(uuid) + .unwrap() + .status, + ConfirmationStatus::InMempoolSince(height) ); - - if i >= CONFIRMATIONS_BEFORE_RETRY { - txs.insert(uuid, (breach.penalty_tx.clone(), None)); - } } - - assert_eq!(responder.get_txs_to_rebroadcast(current_height), txs); } #[tokio::test] - async fn test_get_txs_to_rebroadcast_reorged() { - // For reorged transactions this works a bit different, the dispute transaction will also be returned here - let (responder, _s) = init_responder(MockedServerQuery::Regular).await; - let current_height = 100; - - let user_id = get_random_user_id(); - responder - .dbm - .lock() - .unwrap() - .store_user( - user_id, - &UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY), - ) - .unwrap(); - - // Transactions are flagged to be rebroadcast when they've been in mempool for longer than CONFIRMATIONS_BEFORE_RETRY - let mut txs = HashMap::new(); - - for i in 0..10 { - // Add the appointment to the db so FK rules are satisfied - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - responder - .dbm - .lock() - .unwrap() - .store_appointment(uuid, &appointment) - .unwrap(); + async fn test_handle_reorged_txs_rejected() { + let (responder, _s) = init_responder(MockedServerQuery::Error( + rpc_errors::RPC_VERIFY_REJECTED as i64, + )) + .await; + let n_trackers = 10; + let mut trackers = HashSet::new(); + + for _ in 0..n_trackers { + let uuid = responder + .add_random_tracker(ConfirmationStatus::ConfirmedIn(42)) + .uuid(); + responder.reorged_trackers.lock().unwrap().insert(uuid); + trackers.insert(uuid); + } - // Create a breach and add it, setting half of them as reorged - let breach = get_random_breach(); - responder.add_tracker( - uuid, - breach.clone(), - user_id, - ConfirmationStatus::ConfirmedIn(current_height), - ); + let height = 100; + let rejected = HashSet::from_iter(responder.handle_reorged_txs(height).unwrap()); + // All the trackers should be returned as rejected. + assert_eq!(trackers, rejected); + // The reorged trackers buffer should be empty after this. + assert!(responder.reorged_trackers.lock().unwrap().is_empty()); - // Since we are adding trackers using add_trackers we'll need to manually change the state of the transaction - // (reorged transactions are not passed to add_tracker, they are detected after they are already there). - // Not doing so will trigger an error in the dbm since reorged transactions are not stored in the db. - if i % 2 == 0 { + // And all the reorged trackers statuses should be untouched. + for uuid in trackers { + assert_eq!( responder - .trackers + .dbm .lock() .unwrap() - .get_mut(&uuid) + .load_tracker(uuid) .unwrap() - .status = ConfirmationStatus::ReorgedOut; - // Here the dispute is also included - txs.insert( - uuid, - (breach.penalty_tx.clone(), Some(breach.dispute_tx.clone())), - ); - } + .status, + ConfirmationStatus::ConfirmedIn(42) + ); } - - // Since we have only added confirmed and reorged transactions, we should get back only the reorged ones. - assert_eq!(responder.get_txs_to_rebroadcast(current_height), txs); } #[tokio::test] - async fn test_get_outdated_trackers() { - let (responder, _s) = init_responder(MockedServerQuery::Regular).await; - - // Outdated trackers are those whose associated subscription is outdated and have not been confirmed yet (they don't have - // a single confirmation). + async fn test_rebroadcast_stale_txs_accepted() { + let (responder, _s) = init_responder(MockedServerQuery::InMempoool).await; + let mut statues = HashMap::new(); + let height = 100; - // Mock data into the GK - let target_block_height = START_HEIGHT as u32; - let user_id = get_random_user_id(); - let uuids = (0..10).map(|_| generate_uuid()).collect::>(); - responder - .gatekeeper - .add_outdated_user(user_id, target_block_height, Some(uuids.clone())); - - // Mock the data to the Responder. Add data to trackers (half of them unconfirmed) - let mut target_uuids = HashSet::new(); - for (i, uuid) in uuids.into_iter().enumerate() { - let tracker = if i % 2 == 0 { - target_uuids.insert(uuid); - get_random_tracker( - user_id, - ConfirmationStatus::InMempoolSince(target_block_height), - ) + for i in 0..height { + let status = if i % 4 == 0 { + ConfirmationStatus::ConfirmedIn(i) } else { - get_random_tracker( - user_id, - ConfirmationStatus::ConfirmedIn(target_block_height), - ) + ConfirmationStatus::InMempoolSince(i) }; - responder - .trackers - .lock() - .unwrap() - .insert(uuid, tracker.get_summary()); + let uuid = responder.add_random_tracker(status).uuid(); + statues.insert(uuid, status); } - // Check the expected data is there - assert_eq!( - responder.get_outdated_trackers(target_block_height), - target_uuids - ); - } - - #[tokio::test] - async fn test_rebroadcast_accepted() { - // This test positive rebroadcast cases, including reorgs. However, complex reorg logic is not tested here, it will need a - // dedicated test (against bitcoind, not mocked). - let (responder, _s) = init_responder(MockedServerQuery::Regular).await; - let current_height = 100; - - // Add user to the database - let user_id = get_random_user_id(); - responder - .dbm - .lock() - .unwrap() - .store_user( - user_id, - &UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY), - ) - .unwrap(); - - // Transactions are rebroadcast once they've been in mempool for CONFIRMATIONS_BEFORE_RETRY or they've been reorged out - let mut need_rebroadcast = HashSet::new(); + // There should be no rejected tx. + assert!(responder.rebroadcast_stale_txs(height).is_none()); - for i in 0..10 { - // Generate appointment and also add it to the DB (FK checks) - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - responder + for (uuid, former_status) in statues { + let status = responder .dbm .lock() .unwrap() - .store_appointment(uuid, &appointment) - .unwrap(); - - let breach = get_random_breach(); - - let height = if i % 2 == 0 { - current_height + 1 - CONFIRMATIONS_BEFORE_RETRY as u32 + .load_tracker(uuid) + .unwrap() + .status; + if let ConfirmationStatus::InMempoolSince(h) = former_status { + if height - h >= CONFIRMATIONS_BEFORE_RETRY as u32 { + // Transactions which stayed for more than `CONFIRMATIONS_BEFORE_RETRY` should have been rebroadcasted. + assert_eq!(status, ConfirmationStatus::InMempoolSince(height)); + } else { + // Others left untouched. + assert_eq!(status, former_status); + } } else { - need_rebroadcast.insert(uuid); - current_height - CONFIRMATIONS_BEFORE_RETRY as u32 - }; - - responder.add_tracker( - uuid, - breach, - user_id, - ConfirmationStatus::InMempoolSince(height), - ); - - // Reorged txs need to be set manually - if i % 2 == 1 { - responder - .trackers - .lock() - .unwrap() - .get_mut(&uuid) - .unwrap() - .status = ConfirmationStatus::ReorgedOut; + // Confirmed transactions left untouched as well. + assert_eq!(status, former_status); } } - - // Check all are accepted - let (accepted, rejected) = - responder.rebroadcast(responder.get_txs_to_rebroadcast(current_height)); - let accepted_uuids: HashSet = accepted.keys().cloned().collect(); - assert_eq!(accepted_uuids, need_rebroadcast); - assert!(rejected.is_empty()); } #[tokio::test] - async fn test_rebroadcast_rejected() { - // This test negative rebroadcast cases, including reorgs. However, complex reorg logic is not tested here, it will need a - // dedicated test (against bitcoind, not mocked). + async fn test_rebroadcast_stale_txs_rejected() { let (responder, _s) = init_responder(MockedServerQuery::Error( rpc_errors::RPC_VERIFY_ERROR as i64, )) .await; - let current_height = 100; - - // Add user to the database - let user_id = get_random_user_id(); - responder - .dbm - .lock() - .unwrap() - .store_user( - user_id, - &UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY), - ) - .unwrap(); - - // Transactions are rebroadcast once they've been in mempool for CONFIRMATIONS_BEFORE_RETRY or they've been reorged out - let mut need_rebroadcast = HashSet::new(); - - for i in 0..30 { - // Generate appointment and also add it to the DB (FK checks) - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - responder - .dbm - .lock() - .unwrap() - .store_appointment(uuid, &appointment) - .unwrap(); - - let breach = get_random_breach(); + let mut statues = HashMap::new(); + let height = 100; - let height = if i % 2 == 0 { - current_height + 1 - CONFIRMATIONS_BEFORE_RETRY as u32 + for i in 0..height { + let status = if i % 4 == 0 { + ConfirmationStatus::ConfirmedIn(i) } else { - need_rebroadcast.insert(uuid); - current_height - CONFIRMATIONS_BEFORE_RETRY as u32 + ConfirmationStatus::InMempoolSince(i) }; - responder.add_tracker( - uuid, - breach, - user_id, - ConfirmationStatus::InMempoolSince(height), - ); - - // Reorged txs need to be set manually - if i % 2 == 1 { - responder - .trackers - .lock() - .unwrap() - .get_mut(&uuid) - .unwrap() - .status = ConfirmationStatus::ReorgedOut; - } + let uuid = responder.add_random_tracker(status).uuid(); + statues.insert(uuid, status); } - // Check all are rejected - let (accepted, rejected) = - responder.rebroadcast(responder.get_txs_to_rebroadcast(current_height)); - assert_eq!(rejected, need_rebroadcast); - assert!(accepted.is_empty()); - } - - #[tokio::test] - async fn test_delete_trackers_from_memory() { - let (responder, _s) = init_responder(MockedServerQuery::Regular).await; - - // Add user to the database - let user_id = get_random_user_id(); - responder - .dbm - .lock() - .unwrap() - .store_user( - user_id, - &UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY), - ) - .unwrap(); - - // Add some trackers both to memory and to the database - let mut to_be_deleted = HashMap::new(); + // `rebroadcast_stale_txs` will broadcast txs which has been in mempool since `CONFIRMATIONS_BEFORE_RETRY` or more + // blocks. Since our backend rejects all the txs, all these broadcasted txs should be returned from this method (rejected). + let rejected = HashSet::from_iter(responder.rebroadcast_stale_txs(height).unwrap()); + let should_reject: HashSet<_> = statues + .iter() + .filter_map(|(&uuid, &status)| { + if let ConfirmationStatus::InMempoolSince(h) = status { + (height - h >= CONFIRMATIONS_BEFORE_RETRY as u32).then_some(uuid) + } else { + None + } + }) + .collect(); + assert_eq!(should_reject, rejected); - for _ in 0..10 { - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - responder + for (uuid, former_status) in statues { + let status = responder .dbm .lock() .unwrap() - .store_appointment(uuid, &appointment) - .unwrap(); - - let breach = get_random_breach(); - responder.add_tracker( - uuid, - breach.clone(), - user_id, - ConfirmationStatus::ConfirmedIn(21), - ); - to_be_deleted.insert(uuid, breach.penalty_tx.txid()); - } - - // Delete and check data is not in memory (the reason does not matter for the test) - responder.delete_trackers_from_memory( - &to_be_deleted.keys().cloned().collect(), - DeletionReason::Completed, - ); - - for (uuid, txid) in to_be_deleted { - // Data is not in memory - assert!(!responder.trackers.lock().unwrap().contains_key(&uuid)); - assert!(!responder.tx_tracker_map.lock().unwrap().contains_key(&txid)); - - // But it can be found in the database - assert!(responder.dbm.lock().unwrap().load_tracker(uuid).is_some()); - } - } - - #[tokio::test] - async fn test_delete_trackers() { - let (responder, _s) = init_responder(MockedServerQuery::Regular).await; - - // Add user to the database - let user_id = get_random_user_id(); - responder - .dbm - .lock() - .unwrap() - .store_user( - user_id, - &UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY), - ) - .unwrap(); - - // Delete trackers removes data from the trackers, tx_tracker_map maps, the database. The deletion of the later is - // better check in test_filtered_block_connected. Add data to the map first. - let mut all_trackers = HashSet::new(); - let mut target_trackers = HashSet::new(); - let mut uuid_txid_map = HashMap::new(); - let mut txs_with_multiple_uuids = HashSet::new(); - let mut updated_users = HashMap::new(); - - for i in 0..10 { - // Generate appointment and also add it to the DB (FK checks) - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); - responder - .dbm - .lock() + .load_tracker(uuid) .unwrap() - .store_appointment(uuid, &appointment) - .unwrap(); - - let breach = get_random_breach(); - responder.add_tracker( - uuid, - breach.clone(), - user_id, - ConfirmationStatus::ConfirmedIn(42), - ); - - // Make it so some of the penalties have multiple associated trackers - if i % 3 == 0 { - let uuid2 = generate_uuid(); - responder - .tx_tracker_map - .lock() - .unwrap() - .get_mut(&breach.penalty_tx.txid()) - .unwrap() - .insert(uuid2); - txs_with_multiple_uuids.insert(breach.penalty_tx.txid()); - } - - all_trackers.insert(uuid); - uuid_txid_map.insert(uuid, breach.penalty_tx.txid()); - - // Add some trackers to be deleted - if i % 2 == 0 { - // Users will also be updated once the data is deleted. - // We can made up the numbers here just to check they are updated. - target_trackers.insert(uuid); - updated_users.insert( - appointment.user_id, - UserInfo::new( - AVAILABLE_SLOTS + i, - SUBSCRIPTION_START + i, - SUBSCRIPTION_EXPIRY + i, - ), - ); - } - } - - responder.delete_trackers(&target_trackers, &updated_users, DeletionReason::Rejected); - - // Only trackers in the target_trackers map should have been removed from - // the Responder data structures. - for uuid in all_trackers { - if target_trackers.contains(&uuid) { - assert!(!responder.trackers.lock().unwrap().contains_key(&uuid)); - assert!(responder.dbm.lock().unwrap().load_tracker(uuid).is_none()); - let penalty_txid = &uuid_txid_map[&uuid]; - // If the penalty had more than one associated uuid, only one has been deleted - // (because that's how the test has been designed) - if txs_with_multiple_uuids.contains(penalty_txid) { - assert_eq!( - responder - .tx_tracker_map - .lock() - .unwrap() - .get(penalty_txid) - .unwrap() - .len(), - 1 - ); - } else { - // Otherwise the whole structure is removed, given it is now empty - assert!(!responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(penalty_txid)); - } - } else { - assert!(responder.trackers.lock().unwrap().contains_key(&uuid)); - assert!(responder - .tx_tracker_map - .lock() - .unwrap() - .contains_key(&uuid_txid_map[&uuid])); - assert!(responder.dbm.lock().unwrap().load_tracker(uuid).is_some()); - } - } - - // The users that needed to be updated in the database have been (just checking the slot count) - for (id, info) in updated_users { - assert_eq!( - responder - .dbm - .lock() - .unwrap() - .load_user(id) - .unwrap() - .available_slots, - info.available_slots - ) + .status; + // All tracker statues shouldn't change since the submitted ones were all rejected. + assert_eq!(status, former_status); } } @@ -1719,35 +1158,40 @@ mod tests { let (responder, _s) = init_responder_with_chain_and_dbm(MockedServerQuery::Regular, &mut chain, dbm).await; - // block_connected is used to keep track of the confirmation received (or missed) by the trackers the Responder + // filtered_block_connected is used to keep track of the confirmation received (or missed) by the trackers the Responder // is keeping track of. // // If there are any trackers, the Responder will: // - Check if there is any tracker that has been completed - // - Check if there is any tracker that has been outdated // - Check if any tracker has been confirmed or add missing confirmations otherwise // - Rebroadcast all penalty transactions that need so - // - Delete completed and outdated data (including data in the GK) + // - Delete completed and invalid data (and update the data in the GK) // - Clear the Carrier issued_receipts cache + // + // We will also test that trackers for outdated users are removed by the GK. // Let's start by doing the data setup for each test (i.e. adding all the necessary data to the Responder and GK) let target_block_height = chain.get_block_count() + 1; let mut users = Vec::new(); - for _ in 2..23 { + for _ in 0..21 { let user_id = get_random_user_id(); - responder.gatekeeper.add_update_user(user_id).unwrap(); users.push(user_id); } - let mut completed_trackers = HashMap::new(); - // COMPLETED TRACKERS SETUP + let mut completed_trackers = Vec::new(); for i in 0..10 { - // Adding two trackers to each user + // Add these trackers to the first two users let user_id = users[i % 2]; - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + let dispute_tx = get_random_tx(); + let (uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&dispute_tx.txid())); + responder + .gatekeeper + .add_update_appointment(user_id, uuid, &appointment) + .unwrap(); responder .dbm .lock() @@ -1756,58 +1200,42 @@ mod tests { .unwrap(); // Trackers complete in the next block. - let breach = get_random_breach(); - responder.add_tracker( - uuid, - breach.clone(), - user_id, - ConfirmationStatus::ConfirmedIn( - target_block_height - constants::IRREVOCABLY_RESOLVED, - ), + let breach = Breach::new(dispute_tx, get_random_tx()); + let status = ConfirmationStatus::ConfirmedIn( + target_block_height - constants::IRREVOCABLY_RESOLVED, ); - responder - .gatekeeper - .get_registered_users() - .lock() - .unwrap() - .get_mut(&user_id) - .unwrap() - .appointments - .insert(uuid, 1); - - completed_trackers.insert(uuid, (user_id, breach)); + responder.add_tracker(uuid, breach.clone(), user_id, status); + completed_trackers.push(TransactionTracker::new(breach, user_id, status)); } // OUTDATED TRACKER SETUP - let mut penalties = Vec::new(); - let mut uuids = Vec::new(); - - for user_id in users.iter().take(21).skip(11) { - let pair = [generate_uuid(), generate_uuid()].to_vec(); - - for uuid in pair.iter() { - let (_, appointment) = generate_dummy_appointment_with_user(*user_id, None); + let mut outdated_trackers = Vec::new(); + for &user_id in users.iter().take(21).skip(11) { + for _ in 0..3 { + let dispute_tx = get_random_tx(); + let (uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&dispute_tx.txid())); + responder + .gatekeeper + .add_update_appointment(user_id, uuid, &appointment) + .unwrap(); responder .dbm .lock() .unwrap() - .store_appointment(*uuid, &appointment) + .store_appointment(uuid, &appointment) .unwrap(); - let breach = get_random_breach(); - penalties.push(breach.penalty_tx.txid()); - responder.add_tracker( - *uuid, - breach, - *user_id, - ConfirmationStatus::InMempoolSince(target_block_height - 1), - ); + let breach = Breach::new(dispute_tx, get_random_tx()); + let status = ConfirmationStatus::InMempoolSince(target_block_height - 1); + responder.add_tracker(uuid, breach.clone(), user_id, status); + outdated_trackers.push(TransactionTracker::new(breach, user_id, status)); } - uuids.extend(pair.clone()); + // Outdate this user so their trackers are deleted responder .gatekeeper - .add_outdated_user(*user_id, target_block_height, Some(pair)); + .add_outdated_user(user_id, target_block_height); } // CONFIRMATIONS SETUP @@ -1817,11 +1245,16 @@ mod tests { .add_update_user(standalone_user_id) .unwrap(); - let mut transactions = Vec::new(); - let mut just_confirmed_txs = Vec::new(); + let mut missed_confirmation_trackers = Vec::new(); + let mut just_confirmed_trackers = Vec::new(); for i in 0..10 { + let dispute_tx = get_random_tx(); let (uuid, appointment) = - generate_dummy_appointment_with_user(standalone_user_id, None); + generate_dummy_appointment_with_user(standalone_user_id, Some(&dispute_tx.txid())); + responder + .gatekeeper + .add_update_appointment(standalone_user_id, uuid, &appointment) + .unwrap(); responder .dbm .lock() @@ -1829,39 +1262,53 @@ mod tests { .store_appointment(uuid, &appointment) .unwrap(); - let breach = get_random_breach(); - transactions.push(breach.clone().penalty_tx.txid()); + let breach = Breach::new(dispute_tx, get_random_tx()); + let status = ConfirmationStatus::InMempoolSince(target_block_height - 1); + responder.add_tracker(uuid, breach.clone(), standalone_user_id, status); if i % 2 == 0 { - just_confirmed_txs.push(breach.clone().penalty_tx); + just_confirmed_trackers.push(TransactionTracker::new( + breach, + standalone_user_id, + status, + )); + } else { + missed_confirmation_trackers.push(TransactionTracker::new( + breach, + standalone_user_id, + status, + )); } - responder.add_tracker( - uuid, - breach, - standalone_user_id, - ConfirmationStatus::InMempoolSince(target_block_height - 1), - ); } // REBROADCAST SETUP - let (uuid, appointment) = generate_dummy_appointment_with_user(standalone_user_id, None); - - responder - .dbm - .lock() - .unwrap() - .store_appointment(uuid, &appointment) - .unwrap(); + let mut trackers_to_rebroadcast = Vec::new(); + for _ in 0..5 { + let dispute_tx = get_random_tx(); + let (uuid, appointment) = + generate_dummy_appointment_with_user(standalone_user_id, Some(&dispute_tx.txid())); + responder + .gatekeeper + .add_update_appointment(standalone_user_id, uuid, &appointment) + .unwrap(); + responder + .dbm + .lock() + .unwrap() + .store_appointment(uuid, &appointment) + .unwrap(); - let tracker_to_rebroadcast = uuid; - responder.add_tracker( - uuid, - get_random_breach(), - standalone_user_id, - ConfirmationStatus::InMempoolSince( + let breach = Breach::new(dispute_tx, get_random_tx()); + let status = ConfirmationStatus::InMempoolSince( target_block_height - CONFIRMATIONS_BEFORE_RETRY as u32, - ), - ); + ); + responder.add_tracker(uuid, breach.clone(), standalone_user_id, status); + trackers_to_rebroadcast.push(TransactionTracker::new( + breach, + standalone_user_id, + status, + )); + } // CARRIER CACHE SETUP // Add some dummy data in the cache to check that it gets cleared @@ -1873,10 +1320,16 @@ mod tests { .insert(get_random_tx().txid(), ConfirmationStatus::ConfirmedIn(21)); // Connecting a block should trigger all the state transitions - responder.block_connected( - &chain.generate(Some(just_confirmed_txs.clone())), - chain.get_block_count(), - ); + let block = chain.generate(Some( + just_confirmed_trackers + .iter() + .map(|t| t.penalty_tx.clone()) + .collect(), + )); + let height = chain.get_block_count(); + // We connect the gatekeeper first so it deletes the outdated users. + responder.gatekeeper.block_connected(&block, height); + responder.block_connected(&block, height); // CARRIER CHECKS assert!(responder @@ -1894,66 +1347,72 @@ mod tests { // COMPLETED TRACKERS CHECKS // Data should have been removed - for (uuid, (user_id, breach)) in completed_trackers { - assert!(!responder.trackers.lock().unwrap().contains_key(&uuid)); - assert!(!responder - .tx_tracker_map + for tracker in completed_trackers { + assert!(responder + .dbm .lock() .unwrap() - .contains_key(&breach.penalty_tx.txid())); - assert!( - !responder.gatekeeper.get_registered_users().lock().unwrap()[&user_id] - .appointments - .contains_key(&uuid) - ); + .load_tracker(tracker.uuid()) + .is_none()); + let (_, user_locators) = responder.gatekeeper.get_user_info(tracker.user_id).unwrap(); + assert!(!user_locators.contains(&tracker.locator())); } // OUTDATED TRACKERS CHECKS - // Data should have been removed - for uuid in uuids { - assert!(!responder.trackers.lock().unwrap().contains_key(&uuid)); - } - for txid in penalties { - assert!(!responder.tx_tracker_map.lock().unwrap().contains_key(&txid)); + // Data should have been removed (tracker not found nor the user) + for tracker in outdated_trackers { + assert!(responder + .dbm + .lock() + .unwrap() + .load_tracker(tracker.uuid()) + .is_none()); + assert!(responder + .gatekeeper + .get_user_info(tracker.user_id) + .is_none()); } // CONFIRMATIONS CHECKS // The transaction confirmation count / confirmation missed should have been updated - let tx_tracker_map = responder.tx_tracker_map.lock().unwrap(); - for txid in transactions { - let uuids = tx_tracker_map.get(&txid).unwrap(); - if just_confirmed_txs - .iter() - .map(|tx| tx.txid()) - .any(|x| x == txid) - { - for uuid in uuids.iter() { - assert_eq!( - responder.trackers.lock().unwrap()[uuid].status, - ConfirmationStatus::ConfirmedIn(target_block_height) - ); - } - } else { - for uuid in uuids.iter() { - assert_eq!( - responder.trackers.lock().unwrap()[uuid].status, - ConfirmationStatus::InMempoolSince(target_block_height - 1) - ); - } - } + for tracker in just_confirmed_trackers { + assert_eq!( + responder + .dbm + .lock() + .unwrap() + .load_tracker(tracker.uuid()) + .unwrap() + .status, + ConfirmationStatus::ConfirmedIn(target_block_height) + ); + } + for tracker in missed_confirmation_trackers { + assert_eq!( + responder + .dbm + .lock() + .unwrap() + .load_tracker(tracker.uuid()) + .unwrap() + .status, + ConfirmationStatus::InMempoolSince(target_block_height - 1) + ); } // REBROADCAST CHECKS - assert_eq!( - responder - .trackers - .lock() - .unwrap() - .get(&tracker_to_rebroadcast) - .unwrap() - .status, - ConfirmationStatus::InMempoolSince(target_block_height), - ); + for tracker in trackers_to_rebroadcast { + assert_eq!( + responder + .dbm + .lock() + .unwrap() + .load_tracker(tracker.uuid()) + .unwrap() + .status, + ConfirmationStatus::InMempoolSince(target_block_height), + ); + } } #[tokio::test] @@ -1965,21 +1424,16 @@ mod tests { // Add user to the database let user_id = get_random_user_id(); - responder - .dbm - .lock() - .unwrap() - .store_user( - user_id, - &UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY), - ) - .unwrap(); + responder.gatekeeper.add_update_user(user_id).unwrap(); let mut reorged = Vec::new(); + let block_range = START_HEIGHT - 10..START_HEIGHT; - for i in 0..10 { - // Generate appointment and also add it to the DB (FK checks) - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + for i in block_range.clone() { + // Generate appointment and also add it to the DB + let dispute_tx = get_random_tx(); + let (uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&dispute_tx.txid())); responder .dbm .lock() @@ -1987,49 +1441,33 @@ mod tests { .store_appointment(uuid, &appointment) .unwrap(); - let breach = get_random_breach(); - + let breach = Breach::new(dispute_tx, get_random_tx()); responder.add_tracker( uuid, - breach.clone(), + breach, user_id, - ConfirmationStatus::ConfirmedIn(i), + ConfirmationStatus::ConfirmedIn(i as u32), ); reorged.push(uuid); } // Check that trackers are flagged as reorged if the height they were included at gets disconnected - for i in (0..10).rev() { + for (i, uuid) in block_range.clone().zip(reorged.iter()).rev() { // The header doesn't really matter, just the height - responder.block_disconnected(&chain.tip().header, i); + responder.block_disconnected(&chain.tip().header, i as u32); // Check that the proper tracker gets reorged at the proper height - assert_eq!( - responder - .trackers - .lock() - .unwrap() - .get(reorged.get(i as usize).unwrap()) - .unwrap() - .status, - ConfirmationStatus::ReorgedOut - ); - + assert!(responder.reorged_trackers.lock().unwrap().contains(uuid)); // Check that the carrier block_height has been updated - assert_eq!(responder.carrier.lock().unwrap().get_height(), i); + assert_eq!(responder.carrier.lock().unwrap().get_height(), i as u32); } // Check that all reorged trackers are still reorged - for uuid in reorged { - assert_eq!( - responder - .trackers - .lock() - .unwrap() - .get(&uuid) - .unwrap() - .status, - ConfirmationStatus::ReorgedOut - ); + for uuid in reorged.iter() { + assert!(responder.reorged_trackers.lock().unwrap().contains(uuid)); } + + // But should be clear after the first block connection + responder.block_connected(&chain.generate(None), block_range.start as u32); + assert!(responder.reorged_trackers.lock().unwrap().is_empty()); } } diff --git a/teos/src/test_utils.rs b/teos/src/test_utils.rs index b7959fd3..4dba0a93 100644 --- a/teos/src/test_utils.rs +++ b/teos/src/test_utils.rs @@ -323,7 +323,7 @@ pub(crate) fn generate_dummy_appointment_with_user( let mut app = generate_dummy_appointment(dispute_txid); app.user_id = user_id; - (UUID::new(app.locator(), user_id), app) + (app.uuid(), app) } pub(crate) fn get_random_breach() -> Breach { @@ -341,17 +341,15 @@ pub(crate) fn get_random_tracker( TransactionTracker::new(breach, user_id, status) } -pub(crate) fn store_appointment_and_fks_to_db( - dbm: &DBM, - uuid: UUID, - appointment: &ExtendedAppointment, -) { +pub(crate) fn store_appointment_and_its_user(dbm: &DBM, appointment: &ExtendedAppointment) { dbm.store_user( appointment.user_id, &UserInfo::new(AVAILABLE_SLOTS, SUBSCRIPTION_START, SUBSCRIPTION_EXPIRY), ) - .unwrap(); - dbm.store_appointment(uuid, appointment).unwrap(); + // It's ok if the user is already stored. + .ok(); + dbm.store_appointment(appointment.uuid(), appointment) + .unwrap(); } pub(crate) async fn get_last_n_blocks(chain: &mut Blockchain, n: usize) -> Vec { diff --git a/teos/src/tx_index.rs b/teos/src/tx_index.rs index fd7c2b98..a0e4a35a 100644 --- a/teos/src/tx_index.rs +++ b/teos/src/tx_index.rs @@ -9,7 +9,7 @@ use lightning_block_sync::poll::ValidatedBlock; use teos_common::appointment::Locator; /// A trait implemented by types that can be used as key in a [TxIndex]. -pub trait Key: Hash { +pub trait Key: Hash + Eq { fn from_txid(txid: Txid) -> Self; } @@ -79,8 +79,8 @@ impl Value for Transaction { /// Data structure used to index locators computed from parsed blocks. /// /// Holds up to `size` blocks with their corresponding computed [Locator]s. -#[derive(Debug)] -pub struct TxIndex { +#[derive(Debug, PartialEq, Eq)] +pub struct TxIndex { /// A [K]:[V] map. index: HashMap, /// Vector of block hashes covered by the index. @@ -95,7 +95,7 @@ pub struct TxIndex { impl TxIndex where - K: Key + std::cmp::Eq + Copy, + K: Key + Copy, V: Value + Clone, Self: Sized, { @@ -143,11 +143,6 @@ where self.index.get(k) } - /// Checks whether the index contains a certain key. - pub fn contains_key(&self, k: &K) -> bool { - self.index.contains_key(k) - } - /// Checks if the index if full. pub fn is_full(&self) -> bool { self.blocks.len() > self.size @@ -175,7 +170,7 @@ where if self.is_full() { // Avoid logging during bootstrap - log::info!("New block added to index: {}", block_header.block_hash()); + log::debug!("New block added to index: {}", block_header.block_hash()); self.tip += 1; self.remove_oldest_block(); } @@ -204,11 +199,11 @@ where let ks = self.tx_in_block.remove(&h).unwrap(); self.index.retain(|k, _| !ks.contains(k)); - log::info!("Oldest block removed from index: {h}"); + log::debug!("Oldest block removed from index: {h}"); } } -impl fmt::Display for TxIndex { +impl fmt::Display for TxIndex { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, @@ -240,6 +235,10 @@ mod tests { pub fn blocks(&self) -> &VecDeque { &self.blocks } + + pub fn contains_key(&self, k: &K) -> bool { + self.index.contains_key(k) + } } #[tokio::test] @@ -304,7 +303,7 @@ mod tests { ); let fake_hash = BlockHash::default(); - assert!(matches!(cache.get_height(&fake_hash), None)); + assert!(cache.get_height(&fake_hash).is_none()); } #[tokio::test] diff --git a/teos/src/watcher.rs b/teos/src/watcher.rs index ec2e7da5..90606fcf 100644 --- a/teos/src/watcher.rs +++ b/teos/src/watcher.rs @@ -1,10 +1,6 @@ //! Logic related to the Watcher, the components in charge of watching for breaches on chain. -use log; - -use std::collections::hash_map::Entry; -use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; +use std::collections::HashMap; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::{Arc, Mutex}; @@ -19,7 +15,7 @@ use teos_common::receipts::{AppointmentReceipt, RegistrationReceipt}; use teos_common::{TowerId, UserId}; use crate::dbm::DBM; -use crate::extended_appointment::{AppointmentSummary, ExtendedAppointment, UUID}; +use crate::extended_appointment::{ExtendedAppointment, UUID}; use crate::gatekeeper::{Gatekeeper, MaxSlotsReached, UserInfo}; use crate::responder::{ConfirmationStatus, Responder, TransactionTracker}; use crate::tx_index::TxIndex; @@ -82,19 +78,11 @@ pub(crate) enum AppointmentInfo { Tracker(TransactionTracker), } -/// Reason why the appointment is deleted. Used for logging purposes. -enum DeletionReason { - Outdated, - Invalid, - Accepted, -} - /// Types of new appointments stored in the [Watcher]. #[derive(Debug, PartialEq, Eq)] enum StoredAppointment { New, Update, - Collision, } /// Types of new triggered appointments handled by the [Watcher]. @@ -108,10 +96,6 @@ enum TriggeredAppointment { /// Component in charge of watching for triggers in the chain (aka channel breaches for lightning). #[derive(Debug)] pub struct Watcher { - /// A map holding a summary of every appointment ([ExtendedAppointment]) hold by the [Watcher], identified by a [UUID]. - appointments: Mutex>, - /// A map between [Locator]s (user identifiers for [Appointment]s) and [UUID]s (tower identifiers). - locator_uuid_map: Mutex>>, /// A cache of the [Locator]s computed for the transactions in the last few blocks. locator_cache: Mutex>, /// A [Responder] instance. Data will be passed to it once triggered (if valid). @@ -139,21 +123,7 @@ impl Watcher { tower_id: TowerId, dbm: Arc>, ) -> Self { - let mut appointments = HashMap::new(); - let mut locator_uuid_map: HashMap> = HashMap::new(); - for (uuid, appointment) in dbm.lock().unwrap().load_appointments(None) { - appointments.insert(uuid, appointment.get_summary()); - - if let Some(map) = locator_uuid_map.get_mut(&appointment.locator()) { - map.insert(uuid); - } else { - locator_uuid_map.insert(appointment.locator(), HashSet::from_iter(vec![uuid])); - } - } - Watcher { - appointments: Mutex::new(appointments), - locator_uuid_map: Mutex::new(locator_uuid_map), locator_cache: Mutex::new(TxIndex::new(last_n_blocks, last_known_block_height)), responder, gatekeeper, @@ -166,7 +136,7 @@ impl Watcher { /// Returns whether the [Watcher] has been created from scratch (fresh) or from backed-up data. pub fn is_fresh(&self) -> bool { - self.appointments.lock().unwrap().is_empty() + self.get_appointments_count() == 0 } /// Registers a new user within the [Watcher]. This request is passed to the [Gatekeeper], who is in @@ -186,8 +156,7 @@ impl Watcher { /// - The user has enough available slots to fit the appointment /// - The appointment hasn't been responded to yet (data cannot be found in the [Responder]) /// - /// If an appointment is accepted, an [AppointmentSummary] will be added to the the watching pool and - /// monitored by the [Watcher]. An [ExtendedAppointment] (constructed from the [Appointment]) will be persisted on disk. + /// If an appointment is accepted, an [ExtendedAppointment] (constructed from the [Appointment]) will be persisted on disk. /// In case the locator for the given appointment can be found in the cache (meaning the appointment has been /// triggered recently) the data will be passed to the [Responder] straightaway (modulo it being valid). pub(crate) fn add_appointment( @@ -214,13 +183,15 @@ impl Watcher { self.last_known_block_height.load(Ordering::Acquire), ); - let uuid = UUID::new(extended_appointment.locator(), user_id); + let uuid = extended_appointment.uuid(); if self.responder.has_tracker(uuid) { log::info!("Tracker for {uuid} already found in Responder"); return Err(AddAppointmentFailure::AlreadyTriggered); } + // TODO: This is not atomic, we update the users slots and THEN add their appointment + // this means it can happen that we update the slots but some failure happens before we insert their appointment. let available_slots = self .gatekeeper .add_update_appointment(user_id, uuid, &extended_appointment) @@ -255,54 +226,23 @@ impl Watcher { Ok((receipt, available_slots, expiry)) } - /// Stores an appointment in the [Watcher] memory and into the database (or updates it if it already exists). - /// - /// Data is stored in `locator_uuid_map` and `appointments`. + /// Stores an appointment in the database (or updates it if it already exists). fn store_appointment( &self, uuid: UUID, appointment: &ExtendedAppointment, ) -> StoredAppointment { - self.appointments - .lock() - .unwrap() - .insert(uuid, appointment.get_summary()); - let mut locator_uuid_map = self.locator_uuid_map.lock().unwrap(); - if let Entry::Vacant(e) = locator_uuid_map.entry(appointment.locator()) { - // New appointment - e.insert(HashSet::from_iter(vec![uuid])); - - self.dbm - .lock() - .unwrap() - .store_appointment(uuid, appointment) - .unwrap(); - StoredAppointment::New + let dbm = self.dbm.lock().unwrap(); + if dbm.appointment_exists(uuid) { + log::debug!( + "User {} is updating appointment {uuid}", + appointment.user_id + ); + dbm.update_appointment(uuid, appointment).unwrap(); + StoredAppointment::Update } else { - // Either an update or an appointment from another user sharing the same locator - if locator_uuid_map - .get_mut(&appointment.locator()) - .unwrap() - .insert(uuid) - { - log::debug!( - "Adding an additional appointment to locator {}: {uuid}", - appointment.locator() - ); - self.dbm - .lock() - .unwrap() - .store_appointment(uuid, appointment) - .unwrap(); - StoredAppointment::Collision - } else { - log::debug!("Update received for {uuid}, locator map not modified"); - self.dbm - .lock() - .unwrap() - .update_appointment(uuid, appointment); - StoredAppointment::Update - } + dbm.store_appointment(uuid, appointment).unwrap(); + StoredAppointment::New } } @@ -329,6 +269,9 @@ impl Watcher { .lock() .unwrap() .store_appointment(uuid, appointment) + // TODO: Don't unwrap, or better, make this insertion atomic with the + // `responder.has_tracker` that might cause the unwrap in the first place. + // ref: https://github.com/talaia-labs/rust-teos/pull/190#discussion_r1218235632 .unwrap(); if let ConfirmationStatus::Rejected(reason) = self.responder.handle_breach( @@ -336,11 +279,8 @@ impl Watcher { Breach::new(dispute_tx.clone(), penalty_tx), user_id, ) { - // DISCUSS: We could either free the slots or keep it occupied as if this was misbehavior. - // Keeping it for now. log::warn!("Appointment bounced in the Responder. Reason: {reason:?}"); - - self.dbm.lock().unwrap().remove_appointment(uuid); + self.gatekeeper.delete_appointments(vec![uuid], false); TriggeredAppointment::Rejected } else { log::info!("Appointment went straight to the Responder"); @@ -389,25 +329,17 @@ impl Watcher { } let uuid = UUID::new(locator, user_id); - - if self.appointments.lock().unwrap().contains_key(&uuid) { - Ok(AppointmentInfo::Appointment( - self.dbm - .lock() - .unwrap() - .load_appointment(uuid) - .unwrap() - .inner, - )) - } else { - self.responder - .get_tracker(uuid) - .map(AppointmentInfo::Tracker) - .ok_or_else(|| { - log::info!("Cannot find {locator}"); - GetAppointmentFailure::NotFound - }) - } + let dbm = self.dbm.lock().unwrap(); + dbm.load_tracker(uuid) + .map(AppointmentInfo::Tracker) + .or_else(|| { + dbm.load_appointment(uuid) + .map(|ext_app| AppointmentInfo::Appointment(ext_app.inner)) + }) + .ok_or_else(|| { + log::info!("Cannot find {locator}"); + GetAppointmentFailure::NotFound + }) } /// Gets a map of breaches provided a map between locators and transactions. @@ -419,20 +351,14 @@ impl Watcher { &self, locator_tx_map: HashMap, ) -> HashMap { - let monitored_locators: HashSet = self - .locator_uuid_map + let breaches: HashMap = self + .dbm .lock() .unwrap() - .keys() - .cloned() + .batch_check_locators_exist(locator_tx_map.keys().collect()) + .iter() + .map(|locator| (*locator, locator_tx_map[locator].clone())) .collect(); - let new_locators = locator_tx_map.keys().cloned().collect(); - let mut breaches = HashMap::new(); - - for locator in monitored_locators.intersection(&new_locators) { - let (k, v) = locator_tx_map.get_key_value(locator).unwrap(); - breaches.insert(*k, v.clone()); - } if breaches.is_empty() { log::info!("No breaches found") @@ -443,117 +369,38 @@ impl Watcher { breaches } - /// Filters a map of breaches between those that are valid and those that are not. + /// Responds to breaches. /// - /// Valid breaches are those resulting in a properly formatted [Transaction] once decrypted. - fn filter_breaches( - &self, - breaches: HashMap, - ) -> ( - HashMap, - HashMap, - ) { - let mut valid_breaches = HashMap::new(); - let mut invalid_breaches = HashMap::new(); - - // A cache of the already decrypted blobs so replicate decryption can be avoided - let mut decrypted_blobs: HashMap, Transaction> = HashMap::new(); + /// Decrypts triggered appointments using the dispute transaction ID and publishes them. + /// If the decryption fails for some appointments or if it succeeds but they get rejected when sent to the network, + /// they are marked as an invalid breaches and returned. + /// [None] is returned if none of these breaches are invalid. + fn handle_breaches(&self, breaches: HashMap) -> Option> { + let mut invalid_breaches = Vec::new(); - let locator_uuid_map = self.locator_uuid_map.lock().unwrap(); - let dbm = self.dbm.lock().unwrap(); for (locator, dispute_tx) in breaches.into_iter() { - for uuid in locator_uuid_map.get(&locator).unwrap() { - let appointment = dbm.load_appointment(*uuid).unwrap(); - match decrypted_blobs.get(appointment.encrypted_blob()) { - Some(penalty_tx) => { - valid_breaches - .insert(*uuid, Breach::new(dispute_tx.clone(), penalty_tx.clone())); - } - None => { - match cryptography::decrypt( - appointment.encrypted_blob(), - &dispute_tx.txid(), + // WARNING(deadlock): Don't lock `self.dbm` over the loop since `Responder::handle_breach` uses it as well. + let uuids = self.dbm.lock().unwrap().load_uuids(locator); + for uuid in uuids { + let appointment = self.dbm.lock().unwrap().load_appointment(uuid).unwrap(); + match cryptography::decrypt(appointment.encrypted_blob(), &dispute_tx.txid()) { + Ok(penalty_tx) => { + if let ConfirmationStatus::Rejected(_) = self.responder.handle_breach( + uuid, + Breach::new(dispute_tx.clone(), penalty_tx), + appointment.user_id, ) { - Ok(penalty_tx) => { - decrypted_blobs.insert( - appointment.encrypted_blob().clone(), - penalty_tx.clone(), - ); - valid_breaches - .insert(*uuid, Breach::new(dispute_tx.clone(), penalty_tx)); - } - Err(e) => { - invalid_breaches.insert(*uuid, e); - } + invalid_breaches.push(uuid); } } - } - } - } - - (valid_breaches, invalid_breaches) - } - - // DISCUSS:: For outdated data this may be nicer if implemented with a callback from the GK given that: - // - The GK is queried for the data to be deleted - // - Appointment and tracker data can be deleted in cascade when a user is deleted - // If done, the GK can notify the Watcher and Responder to delete data in memory and - // take care of the database itself. - - /// Deletes appointments from memory. - /// - /// The appointments are deleted from the appointments and locator_uuid_map maps. - /// Logs a different message depending on whether the appointments have been outdated, invalid, or accepted. - fn delete_appointments_from_memory(&self, uuids: &HashSet, reason: DeletionReason) { - let mut appointments = self.appointments.lock().unwrap(); - let mut locator_uuid_map = self.locator_uuid_map.lock().unwrap(); - - for uuid in uuids { - match reason { - DeletionReason::Outdated => { - log::info!("End time reached by {uuid} without breach. Deleting appointment") - } - DeletionReason::Invalid => log::info!( - "{uuid} cannot be completed, it contains invalid data. Deleting appointment" - ), - DeletionReason::Accepted => { - log::info!("{uuid} accepted by the Responder. Deleting appointment") - } - }; - match appointments.remove(uuid) { - Some(appointment) => { - let appointments = locator_uuid_map.get_mut(&appointment.locator).unwrap(); - - if appointments.len() == 1 { - locator_uuid_map.remove(&appointment.locator); - - log::info!("No more appointments for locator: {}", appointment.locator); - } else { - appointments.remove(uuid); + Err(_) => { + invalid_breaches.push(uuid); } } - None => { - // This should never happen. Logging just in case so we can fix it if so - log::error!("Appointment not found when cleaning: {uuid}"); - } } } - } - /// Deletes appointments from memory and the database. - fn delete_appointments( - &self, - uuids: &HashSet, - updated_users: &HashMap, - reason: DeletionReason, - ) { - if !uuids.is_empty() { - self.delete_appointments_from_memory(uuids, reason); - self.dbm - .lock() - .unwrap() - .batch_remove_appointments(uuids, updated_users); - } + (!invalid_breaches.is_empty()).then_some(invalid_breaches) } /// Ges the number of users currently registered with the tower. @@ -561,9 +408,9 @@ impl Watcher { self.gatekeeper.get_registered_users_count() } - /// Gets the total number of appointments stored in the [Watcher]. + /// Gets the total number of appointments excluding trackers. pub(crate) fn get_appointments_count(&self) -> usize { - self.appointments.lock().unwrap().len() + self.dbm.lock().unwrap().get_appointments_count() } /// Gets the total number of trackers in the [Responder]. @@ -603,7 +450,7 @@ impl Watcher { } /// Gets the data held by the tower about a given user. - pub(crate) fn get_user_info(&self, user_id: UserId) -> Option { + pub(crate) fn get_user_info(&self, user_id: UserId) -> Option<(UserInfo, Vec)> { self.gatekeeper.get_user_info(user_id) } @@ -626,28 +473,7 @@ impl Watcher { return Err(GetSubscriptionInfoFailure::SubscriptionExpired(expiry)); } - let subscription_info = self.gatekeeper.get_user_info(user_id).unwrap(); - let mut locators = Vec::new(); - - let appointments = self.appointments.lock().unwrap(); - let dbm = self.dbm.lock().unwrap(); - for uuid in subscription_info.appointments.keys() { - match appointments.get(uuid) { - Some(a) => locators.push(a.locator), - None => { - if self.responder.has_tracker(*uuid) { - if let Some(locator) = dbm.load_locator(*uuid) { - locators.push(locator) - } else { - log::error!("Tracker found in Responder but not in DB (uuid = {uuid})") - } - } else { - log::error!("Appointment found in the Gatekeeper but not in the Watcher nor the Responder (uuid = {uuid})") - } - } - } - } - + let (subscription_info, locators) = self.gatekeeper.get_user_info(user_id).unwrap(); Ok((subscription_info, locators)) } } @@ -683,54 +509,9 @@ impl chain::Listen for Watcher { .unwrap() .update(*header, &locator_tx_map); - if !self.appointments.lock().unwrap().is_empty() { - // Start by removing outdated data so it is not taken into account from this point on - self.delete_appointments_from_memory( - &self.gatekeeper.get_outdated_appointments(height), - DeletionReason::Outdated, - ); - - // Filter out those breaches that do not yield a valid transaction - let (valid_breaches, invalid_breaches) = - self.filter_breaches(self.get_breaches(locator_tx_map)); - - // Send data to the Responder - let mut appointments_to_delete = HashSet::from_iter(invalid_breaches.into_keys()); - let mut delivered_appointments = HashSet::new(); - for (uuid, breach) in valid_breaches { - log::info!("Notifying Responder and deleting appointment (uuid: {uuid})"); - - if let ConfirmationStatus::Rejected(_) = self.responder.handle_breach( - uuid, - breach, - self.appointments.lock().unwrap()[&uuid].user_id, - ) { - appointments_to_delete.insert(uuid); - } else { - delivered_appointments.insert(uuid); - } - } - - // Delete data - let appointments_to_delete_gatekeeper = { - let appointments = self.appointments.lock().unwrap(); - appointments_to_delete - .iter() - .map(|uuid| (*uuid, appointments[uuid].user_id)) - .collect() - }; - self.delete_appointments_from_memory(&delivered_appointments, DeletionReason::Accepted); - self.delete_appointments( - &appointments_to_delete, - &self - .gatekeeper - .delete_appointments_from_memory(&appointments_to_delete_gatekeeper), - DeletionReason::Invalid, - ); - - if self.appointments.lock().unwrap().is_empty() { - log::info!("No more pending appointments"); - } + // Get the breaches found in this block, handle them, and delete invalid ones. + if let Some(invalid_breaches) = self.handle_breaches(self.get_breaches(locator_tx_map)) { + self.gatekeeper.delete_appointments(invalid_breaches, false); } // Update last known block @@ -755,6 +536,8 @@ impl chain::Listen for Watcher { #[cfg(test)] mod tests { use super::*; + use std::collections::HashSet; + use std::iter::FromIterator; use std::ops::Deref; use std::sync::{Arc, Mutex}; @@ -763,42 +546,35 @@ mod tests { use crate::rpc_errors; use crate::test_utils::{ create_carrier, create_responder, create_watcher, generate_dummy_appointment, - generate_dummy_appointment_with_user, generate_uuid, get_random_breach, get_random_tx, - store_appointment_and_fks_to_db, BitcoindMock, BitcoindStopper, Blockchain, MockOptions, - MockedServerQuery, AVAILABLE_SLOTS, DURATION, EXPIRY_DELTA, SLOTS, START_HEIGHT, - SUBSCRIPTION_EXPIRY, SUBSCRIPTION_START, + generate_dummy_appointment_with_user, get_random_tx, BitcoindMock, BitcoindStopper, + Blockchain, MockOptions, MockedServerQuery, DURATION, EXPIRY_DELTA, SLOTS, START_HEIGHT, }; - use teos_common::cryptography::{get_random_bytes, get_random_keypair}; + use teos_common::cryptography::get_random_keypair; - use bitcoin::hash_types::Txid; - use bitcoin::hashes::Hash; use bitcoin::secp256k1::{PublicKey, Secp256k1}; use lightning::chain::Listen; impl PartialEq for Watcher { fn eq(&self, other: &Self) -> bool { - *self.appointments.lock().unwrap() == *other.appointments.lock().unwrap() - && *self.locator_uuid_map.lock().unwrap() == *other.locator_uuid_map.lock().unwrap() - && self.last_known_block_height.load(Ordering::Relaxed) - == other.last_known_block_height.load(Ordering::Relaxed) + // Same in-memory data. + self.last_known_block_height.load(Ordering::Relaxed) == other.last_known_block_height.load(Ordering::Relaxed) && + *self.locator_cache.lock().unwrap() == *other.locator_cache.lock().unwrap() && + // && Same DB data. + self.get_all_watcher_appointments() == other.get_all_watcher_appointments() } } impl Eq for Watcher {} impl Watcher { - pub(crate) fn add_dummy_tracker_to_responder( - &self, - uuid: UUID, - tracker: &TransactionTracker, - ) { - self.responder.add_dummy_tracker(uuid, tracker) + pub(crate) fn add_dummy_tracker_to_responder(&self, tracker: &TransactionTracker) { + self.responder.add_dummy_tracker(tracker) } - pub(crate) fn add_random_tracker_to_responder(&self, uuid: UUID) -> TransactionTracker { + pub(crate) fn add_random_tracker_to_responder(&self) -> TransactionTracker { // The confirmation status can be whatever here. Using the most common. self.responder - .add_random_tracker(uuid, ConfirmationStatus::ConfirmedIn(100)) + .add_random_tracker(ConfirmationStatus::ConfirmedIn(100)) } } @@ -859,11 +635,11 @@ mod tests { let (user_sk, user_pk) = get_random_keypair(); let user_id = UserId(user_pk); watcher.register(user_id).unwrap(); - let appointment = generate_dummy_appointment(None).inner; - // If we add some trackers to the system and create a new Responder reusing the same db + // If we add some appointments to the system and create a new Watcher reusing the same db // (as if simulating a bootstrap from existing data), the data should be properly loaded. for _ in 0..10 { + let appointment = generate_dummy_appointment(None).inner; let user_sig = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); watcher .add_appointment(appointment.clone(), user_sig.clone()) @@ -914,7 +690,6 @@ mod tests { // - if the appointment already exists for a given user, update the data // - if the appointment is already in the Responder, reject // - if the trigger for the appointment is in the cache, trigger straightaway - // - DISCUSS: if the appointment is accepted but bounces in the Responder, do not reduce the subscription count // In any of the cases where the appointment should be added to the Watcher, the appointment will be rejected if: // - the user does not have enough slots (either to add or update) // - the subscription has expired @@ -927,10 +702,10 @@ mod tests { let user_id = UserId(user_pk); watcher.register(user_id).unwrap(); let appointment = generate_dummy_appointment(None).inner; + let user_sig = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); // Add the appointment for a new user (twice so we can check that updates work) for _ in 0..2 { - let user_sig = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); let (receipt, slots, expiry) = watcher .add_appointment(appointment.clone(), user_sig.clone()) .unwrap(); @@ -950,32 +725,25 @@ mod tests { assert_appointment_added(slots, SLOTS - 1, expiry, receipt, &user2_sig, tower_id); - // There should be now two appointments in the Watcher and the same locator should have two different uuids - assert_eq!(watcher.appointments.lock().unwrap().len(), 2); - assert_eq!( - watcher.locator_uuid_map.lock().unwrap()[&appointment.locator].len(), - 2 - ); - - // Check data was added to the database - for uuid in watcher.appointments.lock().unwrap().keys() { - assert!(watcher - .dbm - .lock() - .unwrap() - .load_appointment(*uuid) - .is_some()); - } + // There should be now two appointments in the Watcher + assert_eq!(watcher.get_appointments_count(), 2); + assert_eq!(watcher.responder.get_trackers_count(), 0); // If an appointment is already in the Responder, it should bounce - let (uuid, triggered_appointment) = generate_dummy_appointment_with_user(user_id, None); + let dispute_tx = get_random_tx(); + let (uuid, triggered_appointment) = + generate_dummy_appointment_with_user(user_id, Some(&dispute_tx.txid())); let signature = cryptography::sign(&triggered_appointment.inner.to_vec(), &user_sk).unwrap(); - watcher + let (receipt, slots, expiry) = watcher .add_appointment(triggered_appointment.inner.clone(), signature.clone()) .unwrap(); - let breach = get_random_breach(); + assert_appointment_added(slots, SLOTS - 2, expiry, receipt, &signature, tower_id); + assert_eq!(watcher.get_appointments_count(), 3); + assert_eq!(watcher.responder.get_trackers_count(), 0); + + let breach = Breach::new(dispute_tx, get_random_tx()); watcher.responder.add_tracker( uuid, breach, @@ -988,6 +756,8 @@ mod tests { receipt, Err(AddAppointmentFailure::AlreadyTriggered) )); + assert_eq!(watcher.get_appointments_count(), 2); + assert_eq!(watcher.responder.get_trackers_count(), 1); // If the trigger is already in the cache, the appointment will go straight to the Responder let dispute_tx = tip_txs.last().unwrap(); @@ -995,24 +765,16 @@ mod tests { generate_dummy_appointment_with_user(user_id, Some(&dispute_tx.txid())); let user_sig = cryptography::sign(&appointment_in_cache.inner.to_vec(), &user_sk).unwrap(); let (receipt, slots, expiry) = watcher - .add_appointment(appointment_in_cache.inner.clone(), user_sig.clone()) + .add_appointment(appointment_in_cache.inner, user_sig.clone()) .unwrap(); - // The appointment should have been accepted, slots should have been decreased, and data should have been deleted from - // the Watcher's memory. Moreover, a new tracker should be found in the Responder + // The appointment should have been accepted, slots should have been decreased, and a new tracker should be found in the Responder assert_appointment_added(slots, SLOTS - 3, expiry, receipt, &user_sig, tower_id); - assert_eq!(watcher.appointments.lock().unwrap().len(), 3); - assert!(!watcher - .locator_uuid_map - .lock() - .unwrap() - .contains_key(&appointment_in_cache.locator())); + assert_eq!(watcher.get_appointments_count(), 2); + assert_eq!(watcher.responder.get_trackers_count(), 2); + // Data should be in the database assert!(watcher.responder.has_tracker(uuid)); - // Check data was added to the database - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_some()); - assert!(watcher.dbm.lock().unwrap().load_tracker(uuid).is_some()); - // If an appointment is rejected by the Responder, it is considered misbehavior and the slot count is kept // Wrong penalty let dispute_tx = &tip_txs[tip_txs.len() - 2]; @@ -1021,15 +783,15 @@ mod tests { invalid_appointment.inner.encrypted_blob.reverse(); let user_sig = cryptography::sign(&invalid_appointment.inner.to_vec(), &user_sk).unwrap(); let (receipt, slots, expiry) = watcher - .add_appointment(invalid_appointment.inner.clone(), user_sig.clone()) + .add_appointment(invalid_appointment.inner, user_sig.clone()) .unwrap(); assert_appointment_added(slots, SLOTS - 4, expiry, receipt, &user_sig, tower_id); - assert_eq!(watcher.appointments.lock().unwrap().len(), 3); - + assert_eq!(watcher.get_appointments_count(), 2); + assert_eq!(watcher.responder.get_trackers_count(), 2); // Data should not be in the database - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); - assert!(watcher.dbm.lock().unwrap().load_tracker(uuid).is_none()); + assert!(!watcher.responder.has_tracker(uuid)); + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid)); // Transaction rejected // Update the Responder with a new Carrier @@ -1040,17 +802,19 @@ mod tests { *watcher.responder.get_carrier().lock().unwrap() = carrier; let dispute_tx = &tip_txs[tip_txs.len() - 2]; - let invalid_appointment = generate_dummy_appointment(Some(&dispute_tx.txid())).inner; - let user_sig = cryptography::sign(&invalid_appointment.to_vec(), &user_sk).unwrap(); + let (uuid, invalid_appointment) = + generate_dummy_appointment_with_user(user_id, Some(&dispute_tx.txid())); + let user_sig = cryptography::sign(&invalid_appointment.inner.to_vec(), &user_sk).unwrap(); let (receipt, slots, expiry) = watcher - .add_appointment(invalid_appointment, user_sig.clone()) + .add_appointment(invalid_appointment.inner, user_sig.clone()) .unwrap(); - assert_appointment_added(slots, SLOTS - 4, expiry, receipt, &user_sig, tower_id); - assert_eq!(watcher.appointments.lock().unwrap().len(), 3); - + assert_appointment_added(slots, SLOTS - 5, expiry, receipt, &user_sig, tower_id); + assert_eq!(watcher.get_appointments_count(), 2); + assert_eq!(watcher.responder.get_trackers_count(), 2); // Data should not be in the database - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); + assert!(!watcher.responder.has_tracker(uuid)); + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid)); // FAIL cases (non-registered, subscription expired and not enough slots) @@ -1059,11 +823,11 @@ mod tests { let user3_sig = String::from_utf8((0..65).collect()).unwrap(); assert!(matches!( - watcher.add_appointment(appointment.clone(), user3_sig), + watcher.add_appointment(appointment, user3_sig), Err(AddAppointmentFailure::AuthenticationFailure) )); // Data should not be in the database - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid)); // If the user has no enough slots, the appointment is rejected. We do not test all possible cases since updates are // already tested int he Gatekeeper. Testing that it is rejected if the condition is met should suffice. @@ -1076,33 +840,30 @@ mod tests { .unwrap() .available_slots = 0; - let dispute_txid = Txid::from_slice(&get_random_bytes(32)).unwrap(); - let new_appointment = generate_dummy_appointment(Some(&dispute_txid)).inner; - let new_app_sig = cryptography::sign(&new_appointment.to_vec(), &user_sk).unwrap(); + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + let signature = cryptography::sign(&appointment.inner.to_vec(), &user_sk).unwrap(); assert!(matches!( - watcher.add_appointment(new_appointment, new_app_sig), + watcher.add_appointment(appointment.inner, signature), Err(AddAppointmentFailure::NotEnoughSlots) )); // Data should not be in the database - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid)); // If the user subscription has expired, the appointment should be rejected. watcher .gatekeeper - .get_registered_users() - .lock() - .unwrap() - .get_mut(&user2_id) - .unwrap() - .subscription_expiry = START_HEIGHT as u32; + .add_outdated_user(user2_id, START_HEIGHT as u32); + + let (uuid, appointment) = generate_dummy_appointment_with_user(user2_id, None); + let signature = cryptography::sign(&appointment.inner.to_vec(), &user2_sk).unwrap(); assert!(matches!( - watcher.add_appointment(appointment, user2_sig), + watcher.add_appointment(appointment.inner, signature), Err(AddAppointmentFailure::SubscriptionExpired { .. }) )); // Data should not be in the database - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid)); } #[tokio::test] @@ -1114,8 +875,10 @@ mod tests { let (_, user_pk) = get_random_keypair(); let user_id = UserId(user_pk); watcher.register(user_id).unwrap(); + let dispute_txid = get_random_tx().txid(); - let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); + let (uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&dispute_txid)); // Storing a new appointment should return New assert_eq!( @@ -1123,46 +886,22 @@ mod tests { StoredAppointment::New, ); assert_eq!( - *watcher.appointments.lock().unwrap(), - HashMap::from_iter([(uuid, appointment.get_summary())]) - ); - assert_eq!( - *watcher.locator_uuid_map.lock().unwrap(), - HashMap::from_iter([(appointment.locator(), HashSet::from_iter([uuid]))]) + watcher.get_all_watcher_appointments(), + HashMap::from_iter([(uuid, appointment)]) ); // Adding an appointment with the same UUID should be seen as an updated - // The appointment data here does not matter much, just the UUID and the locator since they are tied to each other. + // We are using a common dispute txid here to get the same uuid. + let (new_uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&dispute_txid)); + assert_eq!(new_uuid, uuid); assert_eq!( watcher.store_appointment(uuid, &appointment), StoredAppointment::Update, ); assert_eq!( - *watcher.appointments.lock().unwrap(), - HashMap::from_iter([(uuid, appointment.get_summary())]) - ); - assert_eq!( - *watcher.locator_uuid_map.lock().unwrap(), - HashMap::from_iter([(appointment.locator(), HashSet::from_iter([uuid]))]) - ); - - // Adding the same appointment (same locator) with a different UUID should be seen as a collision. - // This means that a different user is sending an appointment with the same locator. - let new_uuid = generate_uuid(); - assert_eq!( - watcher.store_appointment(new_uuid, &appointment), - StoredAppointment::Collision, - ); - assert_eq!( - *watcher.appointments.lock().unwrap(), - HashMap::from_iter([ - (uuid, appointment.get_summary()), - (new_uuid, appointment.get_summary()) - ]) - ); - assert_eq!( - *watcher.locator_uuid_map.lock().unwrap(), - HashMap::from_iter([(appointment.locator(), HashSet::from_iter([uuid, new_uuid]))]) + watcher.get_all_watcher_appointments(), + HashMap::from_iter([(uuid, appointment)]) ); } @@ -1187,7 +926,7 @@ mod tests { ); // In this case the appointment is kept in the Responder and, therefore, in the database assert!(watcher.responder.has_tracker(uuid)); - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_some()); + assert!(watcher.dbm.lock().unwrap().appointment_exists(uuid)); // A properly formatted but invalid transaction should be rejected by the Responder // Update the Responder with a new Carrier that will reject the transaction @@ -1205,19 +944,19 @@ mod tests { ); // In this case the appointment is not kept in the Responder nor in the database assert!(!watcher.responder.has_tracker(uuid)); - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid)); // Invalid triggered appointments should not be passed to the Responder // Use a dispute_tx that does not match the appointment to replicate a decryption error // (the same applies to invalid formatted transactions) - let uuid = generate_uuid(); + let (uuid, appointment) = generate_dummy_appointment_with_user(user_id, None); assert_eq!( - watcher.store_triggered_appointment(uuid, &appointment, user_id, &get_random_tx()), + watcher.store_triggered_appointment(uuid, &appointment, user_id, &dispute_tx), TriggeredAppointment::Invalid, ); // The appointment is not kept anywhere assert!(!watcher.responder.has_tracker(uuid)); - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid)); } #[tokio::test] @@ -1225,9 +964,10 @@ mod tests { let mut chain = Blockchain::default().with_height(START_HEIGHT); let (watcher, _s) = init_watcher(&mut chain).await; - let appointment = generate_dummy_appointment(None).inner; + let dispute_tx = get_random_tx(); + let appointment = generate_dummy_appointment(Some(&dispute_tx.txid())).inner; - // If the user cannot be properly identified, the request will fail. This can be simulated by providing a wrong signature + // If the user cannot be properly identified, the request will fail. This can be simulated by providing a wrong signature let wrong_sig = String::from_utf8((0..65).collect()).unwrap(); assert!(matches!( watcher.get_appointment(appointment.locator, &wrong_sig), @@ -1260,29 +1000,16 @@ mod tests { // If the appointment is in the Responder (in the form of a Tracker), data should be also returned - // Remove the data from the Watcher memory first (data is kept in the db tho) + // Remove the data from the Watcher memory first. let uuid = UUID::new(appointment.locator, user_id); - watcher.appointments.lock().unwrap().remove(&uuid); - watcher - .locator_uuid_map - .lock() - .unwrap() - .remove(&appointment.locator); // Add data to the Responder - let breach = get_random_breach(); - let tracker = TransactionTracker::new( - breach.clone(), - user_id, - ConfirmationStatus::InMempoolSince(chain.get_block_count()), - ); - - watcher.responder.add_tracker( - uuid, - breach, - user_id, - ConfirmationStatus::InMempoolSince(chain.get_block_count()), - ); + let breach = Breach::new(dispute_tx, get_random_tx()); + let status = ConfirmationStatus::InMempoolSince(chain.get_block_count()); + watcher + .responder + .add_tracker(uuid, breach.clone(), user_id, status); + let tracker = TransactionTracker::new(breach, user_id, status); let tracker_message = format!("get appointment {}", appointment.locator); let tracker_signature = cryptography::sign(tracker_message.as_bytes(), &user_sk).unwrap(); @@ -1297,8 +1024,8 @@ mod tests { AppointmentInfo::Tracker(t) => assert_eq!(t, tracker), } - // If the user does exists but the requested locator does not belong to any of their associated appointments, NotFound - // should be returned. + // If the user does exists but the requested locator does not belong to any of their associated appointments, + // NotFound should be returned. let (user2_sk, user2_pk) = get_random_keypair(); let user2_id = UserId(user2_pk); watcher.register(user2_id).unwrap(); @@ -1312,12 +1039,7 @@ mod tests { // If the user subscription has expired, the request will fail watcher .gatekeeper - .get_registered_users() - .lock() - .unwrap() - .get_mut(&user_id) - .unwrap() - .subscription_expiry = START_HEIGHT as u32; + .add_outdated_user(user_id, START_HEIGHT as u32); assert!(matches!( watcher.get_appointment(appointment.locator, &signature), @@ -1328,276 +1050,168 @@ mod tests { #[tokio::test] async fn test_get_breaches() { let mut chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 10); - let txs = chain.blocks.last().unwrap().txdata.clone(); let (watcher, _s) = init_watcher(&mut chain).await; // Let's create some locators based on the transactions in the last block - let mut locator_tx_map = HashMap::new(); - for tx in txs { - locator_tx_map.insert(Locator::new(tx.txid()), tx.clone()); - } + let locator_tx_map: HashMap<_, _> = (0..10) + .map(|_| get_random_tx()) + .map(|tx| (Locator::new(tx.txid()), tx)) + .collect(); + + let (user_sk, user_pk) = get_random_keypair(); + let user_id = UserId(user_pk); + watcher.register(user_id).unwrap(); // Add some of them to the Watcher - for (i, locator) in locator_tx_map.keys().enumerate() { + let mut breaches = HashMap::new(); + for (i, (l, tx)) in locator_tx_map.iter().enumerate() { + // Track some of the these transactions. if i % 2 == 0 { - watcher - .locator_uuid_map - .lock() - .unwrap() - .insert(*locator, HashSet::from_iter(vec![generate_uuid()])); + let appointment = generate_dummy_appointment(Some(&tx.txid())).inner; + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + watcher.add_appointment(appointment, signature).unwrap(); + breaches.insert(*l, tx.clone()); } } // Check that breaches are correctly detected - let breaches = watcher.get_breaches(locator_tx_map); - let locator_uuid_map = watcher.locator_uuid_map.lock().unwrap(); - assert!( - breaches.len() == locator_uuid_map.len() - && breaches.keys().all(|k| locator_uuid_map.contains_key(k)) - ); + assert_eq!(watcher.get_breaches(locator_tx_map), breaches); } #[tokio::test] - async fn test_filter_breaches() { - let mut chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 12); - let txs = chain.blocks.last().unwrap().txdata.clone(); + async fn test_handle_breaches_accepted() { + let mut chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 10); let (watcher, _s) = init_watcher(&mut chain).await; // Let's create some locators based on the transactions in the last block - let mut locator_tx_map = HashMap::new(); - for tx in txs { - locator_tx_map.insert(Locator::new(tx.txid()), tx.clone()); - } + let breaches: HashMap<_, _> = (0..10) + .map(|_| get_random_tx()) + .map(|tx| (Locator::new(tx.txid()), tx)) + .collect(); - // Add some of them to the Watcher - let mut local_valid = Vec::new(); - let mut local_invalid = Vec::new(); - - for (i, (locator, tx)) in locator_tx_map.iter().enumerate() { - let uuid = generate_uuid(); - let tx_id = tx.txid(); - let mut dispute_txid = None; - - // Add 1/3 as valid breaches, 1/3 as invalid, leave 1/3 out - if i % 3 < 2 { - match i % 3 { - 0 => { - dispute_txid = Some(&tx_id); - local_valid.push(uuid); - } - _ => local_invalid.push(uuid), - } + let (user_sk, user_pk) = get_random_keypair(); + let user_id = UserId(user_pk); + watcher.register(user_id).unwrap(); - let appointment = generate_dummy_appointment(dispute_txid); + // Let the watcher track these breaches. + for (_, tx) in breaches.iter() { + let appointment = generate_dummy_appointment(Some(&tx.txid())).inner; + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + watcher.add_appointment(appointment, signature).unwrap(); + } - watcher - .appointments - .lock() - .unwrap() - .insert(uuid, appointment.get_summary()); - watcher - .locator_uuid_map - .lock() - .unwrap() - .insert(*locator, HashSet::from_iter(vec![uuid])); + assert!(watcher.handle_breaches(breaches).is_none()) + } - // Store data in the database (the user needs to be there as well since it is a FK for appointments) - store_appointment_and_fks_to_db(&watcher.dbm.lock().unwrap(), uuid, &appointment); - } - } + #[tokio::test] + async fn test_handle_breaches_rejected_decryption() { + let mut chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 10); + let (watcher, _s) = init_watcher(&mut chain).await; - let breaches = watcher.get_breaches(locator_tx_map.clone()); - let (valid, invalid) = watcher.filter_breaches(breaches); + // Let's create some locators based on the transactions in the last block + let breaches: HashMap<_, _> = (0..10) + .map(|_| get_random_tx()) + .map(|tx| (Locator::new(tx.txid()), tx)) + .collect(); - // Check valid + invalid add up to 2/3 - assert_eq!(2 * locator_tx_map.len() / 3, valid.len() + invalid.len()); + let (user_sk, user_pk) = get_random_keypair(); + let user_id = UserId(user_pk); + watcher.register(user_id).unwrap(); - // Check valid breaches match - assert!(valid.len() == local_valid.len() && valid.keys().all(|k| local_valid.contains(k))); + let mut rejected = HashSet::new(); + // Let the watcher track these breaches. + for (i, (_, tx)) in breaches.iter().enumerate() { + let (uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&tx.txid())); + let mut appointment = appointment.inner; + if i % 2 == 0 { + // Mal-format some appointments + appointment.encrypted_blob.reverse(); + rejected.insert(uuid); + }; + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + watcher.add_appointment(appointment, signature).unwrap(); + } - // Check invalid breaches match - assert!( - invalid.len() == local_invalid.len() - && invalid.keys().all(|k| local_invalid.contains(k)) + assert_eq!( + rejected, + HashSet::from_iter(watcher.handle_breaches(breaches).unwrap()) ); - - // All invalid breaches should be AED errors (the decryption key was invalid) - invalid - .values() - .all(|v| matches!(v, cryptography::DecryptingError::AED { .. })); } #[tokio::test] - async fn test_delete_appointments_from_memory() { - let mut chain = Blockchain::default().with_height(START_HEIGHT); + async fn test_handle_breaches_rejected_by_responder_backend() { + let mut chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 10); let (watcher, _s) = init_watcher(&mut chain).await; - // Add some appointments both to memory and to the database - let mut to_be_deleted = HashMap::new(); + // Replace the carrier with an erroneous one + let (carrier, _s) = create_carrier( + MockedServerQuery::Error(rpc_errors::RPC_VERIFY_ERROR as i64), + chain.tip().deref().height, + ); + *watcher.responder.get_carrier().lock().unwrap() = carrier; - for _ in 0..10 { - let uuid = generate_uuid(); - let appointment = generate_dummy_appointment(None); - watcher - .appointments - .lock() - .unwrap() - .insert(uuid, appointment.get_summary()); - watcher - .locator_uuid_map - .lock() - .unwrap() - .insert(appointment.locator(), HashSet::from_iter([uuid])); + // Let's create some locators based on the transactions in the last block + let breaches: HashMap<_, _> = (0..10) + .map(|_| get_random_tx()) + .map(|tx| (Locator::new(tx.txid()), tx)) + .collect(); - store_appointment_and_fks_to_db(&watcher.dbm.lock().unwrap(), uuid, &appointment); - to_be_deleted.insert(uuid, appointment.locator()); + let (user_sk, user_pk) = get_random_keypair(); + let user_id = UserId(user_pk); + watcher.register(user_id).unwrap(); + + let mut uuids = HashSet::new(); + // Let the watcher track these breaches. + for (_, (_, tx)) in breaches.iter().enumerate() { + let (uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&tx.txid())); + let appointment = appointment.inner; + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + watcher.add_appointment(appointment, signature).unwrap(); + uuids.insert(uuid); } - // Delete and check data is not in memory (the reason does not matter for the test) - watcher.delete_appointments_from_memory( - &to_be_deleted.keys().cloned().collect(), - DeletionReason::Outdated, + assert_eq!( + uuids, + HashSet::from_iter(watcher.handle_breaches(breaches).unwrap()) ); - - for (uuid, locator) in to_be_deleted { - // Data is not in memory - assert!(!watcher.appointments.lock().unwrap().contains_key(&uuid)); - assert!(!watcher - .locator_uuid_map - .lock() - .unwrap() - .contains_key(&locator)); - - // But it can be found in the database - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_some()); - } } #[tokio::test] - async fn test_delete_appointments() { - // TODO: This is an adaptation of Responder::test_delete_trackers, merge together once the method - // is implemented using generics. - let mut chain = Blockchain::default().with_height(START_HEIGHT); + async fn test_handle_breaches_rejected_by_responder_malformed() { + let mut chain = Blockchain::default().with_height_and_txs(START_HEIGHT, 10); let (watcher, _s) = init_watcher(&mut chain).await; - // Delete appointments removes data from the appointments and locator_uuid_map - // Add data to the map first - let mut all_appointments = HashSet::new(); - let mut target_appointments = HashSet::new(); - let mut uuid_locator_map = HashMap::new(); - let mut locator_with_multiple_uuids = HashSet::new(); - let mut updated_users = HashMap::new(); - - for i in 0..10 { - let uuid = generate_uuid(); - let appointment = generate_dummy_appointment(None); - watcher - .appointments - .lock() - .unwrap() - .insert(uuid, appointment.clone().get_summary()); - watcher - .locator_uuid_map - .lock() - .unwrap() - .insert(appointment.locator(), HashSet::from_iter([uuid])); - - // Add data to the database to check data deletion - store_appointment_and_fks_to_db(&watcher.dbm.lock().unwrap(), uuid, &appointment); - - // Make it so some of the locators have multiple associated uuids - if i % 3 == 0 { - // We don't need to store this properly since they will not be targeted - let uuid2 = generate_uuid(); - watcher - .locator_uuid_map - .lock() - .unwrap() - .get_mut(&appointment.locator()) - .unwrap() - .insert(uuid2); - locator_with_multiple_uuids.insert(appointment.locator()); - } + // Let's create some locators based on the transactions in the last block + let breaches: HashMap<_, _> = (0..10) + .map(|_| get_random_tx()) + .map(|tx| (Locator::new(tx.txid()), tx)) + .collect(); - all_appointments.insert(uuid); - uuid_locator_map.insert(uuid, appointment.locator()); + let (user_sk, user_pk) = get_random_keypair(); + let user_id = UserId(user_pk); + watcher.register(user_id).unwrap(); - // Add some appointments to be deleted + let mut rejected_breaches = HashSet::new(); + // Let the watcher track these breaches. + for (i, (_, tx)) in breaches.iter().enumerate() { + let (uuid, appointment) = + generate_dummy_appointment_with_user(user_id, Some(&tx.txid())); + let mut appointment = appointment.inner; if i % 2 == 0 { - // Users will also be updated once the data is deleted. - // We can made up the numbers here just to check they are updated. - target_appointments.insert(uuid); - updated_users.insert( - appointment.user_id, - UserInfo::new( - AVAILABLE_SLOTS + i, - SUBSCRIPTION_START + i, - SUBSCRIPTION_EXPIRY + i, - ), - ); - } + // Mal-format some appointments, they should be returned as rejected. + appointment.encrypted_blob.reverse(); + rejected_breaches.insert(uuid); + }; + let signature = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); + watcher.add_appointment(appointment, signature).unwrap(); } - // The deletion reason does not matter here, it only changes the logged message when deleting data - watcher.delete_appointments( - &target_appointments, - &updated_users, - DeletionReason::Accepted, + assert_eq!( + rejected_breaches, + HashSet::from_iter(watcher.handle_breaches(breaches).unwrap()) ); - - // Only appointments in the target_appointments map should have been removed from - // the Watcher's data structures. - for uuid in all_appointments { - if target_appointments.contains(&uuid) { - assert!(!watcher.appointments.lock().unwrap().contains_key(&uuid)); - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); - - let locator = &uuid_locator_map[&uuid]; - // If the penalty had more than one associated uuid, only one has been deleted - // (because that's how the test has been designed) - if locator_with_multiple_uuids.contains(locator) { - assert_eq!( - watcher - .locator_uuid_map - .lock() - .unwrap() - .get(locator) - .unwrap() - .len(), - 1 - ); - } else { - // Otherwise the whole structure is removed, given it is now empty - assert!(!watcher - .locator_uuid_map - .lock() - .unwrap() - .contains_key(locator)); - } - } else { - assert!(watcher.appointments.lock().unwrap().contains_key(&uuid)); - assert!(watcher - .locator_uuid_map - .lock() - .unwrap() - .contains_key(&uuid_locator_map[&uuid])); - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_some()); - } - } - - // The users that needed to be updated in the database have been (just checking the slot count) - for (id, info) in updated_users { - assert_eq!( - watcher - .dbm - .lock() - .unwrap() - .load_user(id) - .unwrap() - .available_slots, - info.available_slots - ); - } } #[tokio::test] @@ -1623,9 +1237,11 @@ mod tests { // If there are appointments to watch, the Watcher will: // - Check if any new transaction is a trigger // - Check if a trigger is valid, if so pass the data to the Responder - // - Delete invalid appointments. + // - Delete invalid appointments (decryption error or rejection by responder). // - Delete appointments that have been outdated (i.e. have expired without a trigger) - // - Delete invalid appointments also from the Gatekeeper (not outdated tough, the GK will take care of those via it's own Listen) + // - Delete invalid appointments also from the Gatekeeper + // + // We will also test that appointments for outdated users are removed by the GK. // Let's first check how data gets outdated (create two users, add an appointment to both and outdate only one) let (user_sk, user_pk) = get_random_keypair(); @@ -1635,177 +1251,111 @@ mod tests { watcher.register(user_id).unwrap(); watcher.register(user2_id).unwrap(); - let appointment = generate_dummy_appointment(None); - let uuid1 = UUID::new(appointment.locator(), user_id); - let uuid2 = UUID::new(appointment.locator(), user2_id); + let appointment = generate_dummy_appointment(None).inner; + let uuid1 = UUID::new(appointment.locator, user_id); + let uuid2 = UUID::new(appointment.locator, user2_id); - let user_sig = cryptography::sign(&appointment.inner.to_vec(), &user_sk).unwrap(); - watcher - .add_appointment(appointment.inner.clone(), user_sig) - .unwrap(); - let user2_sig = cryptography::sign(&appointment.inner.to_vec(), &user2_sk).unwrap(); + let user_sig = cryptography::sign(&appointment.to_vec(), &user_sk).unwrap(); watcher - .add_appointment(appointment.inner.clone(), user2_sig) + .add_appointment(appointment.clone(), user_sig) .unwrap(); + let user2_sig = cryptography::sign(&appointment.to_vec(), &user2_sk).unwrap(); + watcher.add_appointment(appointment, user2_sig).unwrap(); + // Outdate the first user's registration. watcher .gatekeeper - .get_registered_users() - .lock() - .unwrap() - .get_mut(&user_id) - .unwrap() - .subscription_expiry = chain.get_block_count() - EXPIRY_DELTA + 1; + .add_outdated_user(user_id, chain.get_block_count()); // Both appointments can be found before mining a block, only the user's 2 can be found afterwards - for uuid in &[uuid1, uuid2] { - assert!(watcher.appointments.lock().unwrap().contains_key(uuid)); - assert!( - watcher.locator_uuid_map.lock().unwrap()[&appointment.locator()].contains(uuid) - ); + for &uuid in &[uuid1, uuid2] { + assert!(watcher.dbm.lock().unwrap().appointment_exists(uuid)); } - assert!( - watcher.gatekeeper.get_registered_users().lock().unwrap()[&user_id] - .appointments - .contains_key(&uuid1) - ); - assert!( - watcher.gatekeeper.get_registered_users().lock().unwrap()[&user2_id] - .appointments - .contains_key(&uuid2) - ); - watcher.block_connected(&chain.generate(None), chain.get_block_count()); + // We always need to connect the gatekeeper first so it cleans up outdated users and their data. + let block = chain.generate(None); + watcher + .gatekeeper + .block_connected(&block, chain.get_block_count()); + watcher.block_connected(&block, chain.get_block_count()); - assert!(!watcher.appointments.lock().unwrap().contains_key(&uuid1)); - assert!(!watcher.locator_uuid_map.lock().unwrap()[&appointment.locator()].contains(&uuid1)); - // Data is still in the Gatekeeper and in the database, since it'll be deleted in cascade by the - // Gatekeeper on user's deletion (given the user was outdated in the test). - assert!( - watcher.gatekeeper.get_registered_users().lock().unwrap()[&user_id] - .appointments - .contains_key(&uuid1) - ); - assert!(watcher - .dbm + // uuid1 and user1 should have been deleted while uuid2 and user2 still exists. + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid1)); + assert!(!watcher + .gatekeeper + .get_registered_users() .lock() .unwrap() - .load_appointment(uuid1) - .is_some()); - - assert!(watcher.appointments.lock().unwrap().contains_key(&uuid2)); - assert!(watcher.locator_uuid_map.lock().unwrap()[&appointment.locator()].contains(&uuid2)); - assert!( - watcher.gatekeeper.get_registered_users().lock().unwrap()[&user2_id] - .appointments - .contains_key(&uuid2) - ); + .contains_key(&user_id)); + assert!(watcher.dbm.lock().unwrap().appointment_exists(uuid2)); assert!(watcher - .dbm + .gatekeeper + .get_registered_users() .lock() .unwrap() - .load_appointment(uuid2) - .is_some()); + .contains_key(&user2_id)); // Check triggers. Add a new appointment and trigger it with valid data. let dispute_tx = get_random_tx(); - let appointment = generate_dummy_appointment(Some(&dispute_tx.txid())); + let (uuid, appointment) = + generate_dummy_appointment_with_user(user2_id, Some(&dispute_tx.txid())); let sig = cryptography::sign(&appointment.inner.to_vec(), &user2_sk).unwrap(); - let uuid = UUID::new(appointment.locator(), user2_id); watcher.add_appointment(appointment.inner, sig).unwrap(); - assert!(watcher.appointments.lock().unwrap().contains_key(&uuid)); - - watcher.block_connected( - &chain.generate(Some(vec![dispute_tx])), - chain.get_block_count(), - ); + assert!(watcher.dbm.lock().unwrap().appointment_exists(uuid)); - // Data should have been moved to the Responder and kept in the Gatekeeper, since it is still part of the system. - assert!(!watcher.appointments.lock().unwrap().contains_key(&uuid)); - assert!(watcher - .responder - .get_trackers() - .lock() - .unwrap() - .contains_key(&uuid)); - assert!( - watcher.gatekeeper.get_registered_users().lock().unwrap()[&user2_id] - .appointments - .contains_key(&uuid) - ); + let block = chain.generate(Some(vec![dispute_tx])); + watcher + .gatekeeper + .block_connected(&block, chain.get_block_count()); + watcher.block_connected(&block, chain.get_block_count()); // Data should have been kept in the database - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_some()); - assert!(watcher.dbm.lock().unwrap().load_tracker(uuid).is_some()); + assert!(watcher.responder.has_tracker(uuid)); + + // Checks invalid triggers. Add a new appointment and trigger it with invalid data. + let dispute_tx = get_random_tx(); + let (uuid, mut appointment) = + generate_dummy_appointment_with_user(user2_id, Some(&dispute_tx.txid())); + // Modify the encrypted blob so the data is invalid. + appointment.inner.encrypted_blob.reverse(); + let sig = cryptography::sign(&appointment.inner.to_vec(), &user2_sk).unwrap(); + watcher.add_appointment(appointment.inner, sig).unwrap(); + + let block = chain.generate(Some(vec![dispute_tx])); + watcher + .gatekeeper + .block_connected(&block, chain.get_block_count()); + watcher.block_connected(&block, chain.get_block_count()); + + // Data should have been wiped from the database + assert!(!watcher.responder.has_tracker(uuid)); + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid)); // Check triggering with a valid formatted transaction but that is rejected by the Responder. let dispute_tx = get_random_tx(); - let appointment = generate_dummy_appointment(Some(&dispute_tx.txid())); + let (uuid, appointment) = + generate_dummy_appointment_with_user(user2_id, Some(&dispute_tx.txid())); let sig = cryptography::sign(&appointment.inner.to_vec(), &user2_sk).unwrap(); - let uuid = UUID::new(appointment.locator(), user2_id); watcher.add_appointment(appointment.inner, sig).unwrap(); // Set the carrier response - let (carrier, _as) = create_carrier( + // Both non-decryptable blobs and blobs with invalid transactions will yield an invalid trigger. + let (carrier, _s) = create_carrier( MockedServerQuery::Error(rpc_errors::RPC_VERIFY_ERROR as i64), chain.tip().deref().height, ); *watcher.responder.get_carrier().lock().unwrap() = carrier; - watcher.block_connected( - &chain.generate(Some(vec![dispute_tx])), - chain.get_block_count(), - ); - - // Data should not be in the Responder, in the Watcher nor in the Gatekeeper - assert!(!watcher.appointments.lock().unwrap().contains_key(&uuid)); - assert!(!watcher - .responder - .get_trackers() - .lock() - .unwrap() - .contains_key(&uuid)); - assert!( - !watcher.gatekeeper.get_registered_users().lock().unwrap()[&user2_id] - .appointments - .contains_key(&uuid) - ); - // Data should also have been deleted from the database - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); - assert!(watcher.dbm.lock().unwrap().load_tracker(uuid).is_none()); - - // Checks invalid triggers. Add a new appointment and trigger it with invalid data. - let dispute_tx = get_random_tx(); - let mut appointment = generate_dummy_appointment(Some(&dispute_tx.txid())); - // Modify the encrypted blob so the data is invalid. - //Both non-decryptable blobs and blobs with invalid transactions will yield an invalid trigger - appointment.inner.encrypted_blob.reverse(); - let sig = cryptography::sign(&appointment.inner.to_vec(), &user2_sk).unwrap(); - let uuid = UUID::new(appointment.locator(), user2_id); + let block = chain.generate(Some(vec![dispute_tx])); watcher - .add_appointment(appointment.inner.clone(), sig) - .unwrap(); - - watcher.block_connected( - &chain.generate(Some(vec![dispute_tx])), - chain.get_block_count(), - ); + .gatekeeper + .block_connected(&block, chain.get_block_count()); + watcher.block_connected(&block, chain.get_block_count()); - // Data has been wiped since it was invalid - assert!(!watcher.appointments.lock().unwrap().contains_key(&uuid)); - assert!(!watcher - .responder - .get_trackers() - .lock() - .unwrap() - .contains_key(&uuid)); - assert!( - !watcher.gatekeeper.get_registered_users().lock().unwrap()[&user2_id] - .appointments - .contains_key(&uuid) - ); - assert!(watcher.dbm.lock().unwrap().load_appointment(uuid).is_none()); + // Data should have been wiped from the database + assert!(!watcher.responder.has_tracker(uuid)); + assert!(!watcher.dbm.lock().unwrap().appointment_exists(uuid)); } #[tokio::test]