From f0a81f314adc60db753e3e6050c8fe46bfb96f43 Mon Sep 17 00:00:00 2001
From: Amirreza Amirfatahi <amir.r.amirfatahi@gmail.com>
Date: Wed, 22 Jan 2025 23:09:28 -0500
Subject: [PATCH] refactor how loading the mock data is done

---
 .github/workflows/test.yml                  |  5 +-
 Cargo.toml                                  |  4 ++
 README.md                                   | 33 ++++-----
 benches/setup.rs                            |  4 +-
 examples/from_file.rs                       |  4 +-
 src/lib.rs                                  |  2 +-
 src/mock_db.rs                              | 80 +++++++++++++++++++++
 src/models/post/details.rs                  |  6 +-
 src/models/user/details.rs                  |  4 +-
 src/service.rs                              |  4 +-
 src/setup.rs                                | 78 ++++++++++----------
 src/watcher.rs                              |  5 +-
 tests/service/post/view.rs                  | 36 +++++-----
 tests/service/stream/post/reach/timeline.rs | 12 ++--
 tests/utils.rs                              | 63 +---------------
 tests/watcher/utils/watcher.rs              |  6 +-
 16 files changed, 186 insertions(+), 160 deletions(-)
 create mode 100644 src/mock_db.rs

diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index de5cb12d..b2273ea1 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -56,9 +56,12 @@ jobs:
 
       # - name: Install dependencies
       #   run: cargo build --release
+      
+      - name: Load Mock Data
+        run: cargo run --bin mockdb
 
       - name: Run integration tests
-        run: SYNC_DB=true cargo test
+        run: cargo test
 
       - name: Tear down Docker Compose
         if: always()
diff --git a/Cargo.toml b/Cargo.toml
index b8700d12..130b136f 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -63,6 +63,10 @@ path = "src/service.rs"
 name = "watcher"
 path = "src/watcher.rs"
 
+[[bin]]
+name = "mockdb"
+path = "src/mock_db.rs"
+
 [[bench]]
 name = "user"
 harness = false
diff --git a/README.md b/README.md
index ba2a2098..0af23bfc 100644
--- a/README.md
+++ b/README.md
@@ -90,18 +90,22 @@ To contribute to Nexus, follow these steps:
 
 ### Running Tests
 
-To run all tests:
+Running tests requires setting up mock data into Neo4j and Redis.  
 
+Use the `mockdb` binary to load the mock data.
 ```bash
-cargo test
+cargo run --bin mockdb [database]
 ```
 
-You can optionally pass the `SYNC_DB` env var to control setting up the testing data in mocks folder.
-You can pass:
+`database` is optional and can be either `graph` or `redis`. Not providing any value will sync all the databases.
+
+You can optionally pass the `GRAPH_CONTAINER_NAME` env var if your neo4j container in docker has a different name. Defaults to `neo4j`.
 
-- `true`: To set up the data in graph and reindex.
-- `false`: To skip setting up database for test. (when you've already done so)
-- `graph`: Only run the graph database mocks.
+Then to run all tests:
+
+```bash
+cargo test
+```
 
 To test specific modules or features:
 
@@ -118,20 +122,7 @@ cargo bench --bench user get_user_view_by_id
 
 ## ⚠️ Troubleshooting
 
-If tests or the development environment seem out of sync, follow these steps to reset:
-
-1. **Reset Neo4j**:
-
-   ```bash
-   docker exec neo4j bash -c "cypher-shell -u neo4j -p 12345678 'MATCH (n) DETACH DELETE n;'"
-   docker exec neo4j bash /db-graph/run-queries.sh
-   ```
-
-2. **Re-index Redis Cache**:
-
-   ```bash
-   REINDEX=true cargo run
-   ```
+If tests or the development environment seem out of sync, follow the [Running Tests](#running-tests) steps to reload the mock data.
 
 ## 🌐 Useful Links
 
diff --git a/benches/setup.rs b/benches/setup.rs
index 99dc687b..9701945b 100644
--- a/benches/setup.rs
+++ b/benches/setup.rs
@@ -1,4 +1,4 @@
-use pubky_nexus::{setup, Config};
+use pubky_nexus::{Config, StackManager};
 use std::{env, sync::Once};
 use tokio::runtime::Runtime;
 
@@ -10,7 +10,7 @@ pub fn run_setup() {
         env::set_var("RUST_LOG", "error");
         rt.block_on(async {
             let config = Config::from_env();
-            setup(&config).await;
+            StackManager::setup(&config).await;
         });
     });
 }
diff --git a/examples/from_file.rs b/examples/from_file.rs
index 8171b70b..fa6466c4 100644
--- a/examples/from_file.rs
+++ b/examples/from_file.rs
@@ -1,5 +1,5 @@
 use anyhow::Result;
-use pubky_nexus::{setup, types::DynError, Config, EventProcessor};
+use pubky_nexus::{types::DynError, Config, EventProcessor, StackManager};
 use std::fs::File;
 use std::io::{self, BufRead};
 use std::path::Path;
@@ -12,7 +12,7 @@ const FILE_PATH: &str = "examples/events.txt";
 #[tokio::main]
 async fn main() -> Result<(), DynError> {
     let config = Config::from_env();
-    setup(&config).await;
+    StackManager::setup(&config).await;
 
     let mut event_processor = EventProcessor::from_config(&config).await?;
 
diff --git a/src/lib.rs b/src/lib.rs
index da242e7c..6afbd96a 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -18,6 +18,6 @@ pub use db::kv::traits::RedisOps;
 pub use error::{Error, Result};
 pub use events::processor::EventProcessor;
 pub use reindex::reindex;
-pub use setup::setup;
+pub use setup::StackManager;
 
 extern crate const_format;
diff --git a/src/mock_db.rs b/src/mock_db.rs
new file mode 100644
index 00000000..1e447222
--- /dev/null
+++ b/src/mock_db.rs
@@ -0,0 +1,80 @@
+use log::info;
+
+use std::process::Stdio;
+
+use neo4rs::query;
+
+use pubky_nexus::{
+    db::connectors::redis::get_redis_conn, get_neo4j_graph, reindex, Config, StackManager,
+};
+
+#[tokio::main]
+async fn main() {
+    let config = Config::from_env();
+    StackManager::setup(&config).await;
+    info!("Running mock db sync");
+    let args: Vec<String> = std::env::args().collect();
+    match args.get(1).map(String::as_str) {
+        Some("graph") => {
+            MockDB::sync_graph().await;
+        }
+        Some("redis") => {
+            MockDB::sync_redis().await;
+        }
+        None => {
+            MockDB::sync_graph().await;
+            MockDB::sync_redis().await;
+        }
+        Some(_) => {
+            panic!("Invalid argument. Use 'graph' or 'redis'");
+        }
+    }
+}
+
+pub struct MockDB {}
+
+impl MockDB {
+    async fn sync_graph() {
+        let graph = get_neo4j_graph().expect("Failed to get Neo4j graph connection");
+
+        // drop and run the queries again
+        let drop_all_query = query("MATCH (n) DETACH DELETE n;");
+        graph
+            .lock()
+            .await
+            .run(drop_all_query)
+            .await
+            .expect("Could not drop graph nodes.");
+
+        let graph_env = std::env::var("GRAPH_CONTAINER_NAME").unwrap_or("neo4j".to_string());
+        // Run the run-queries.sh script on the Docker host using docker exec
+        tokio::process::Command::new("docker")
+            .args(&[
+                "exec",
+                graph_env.as_str(),
+                "bash",
+                "/db-graph/run-queries.sh",
+            ])
+            .stdout(Stdio::inherit())
+            .stderr(Stdio::inherit())
+            .status()
+            .await
+            .expect("Failed to run run-queries.sh");
+    }
+
+    async fn sync_redis() {
+        // Drop all keys in Redis
+        let mut redis_conn = get_redis_conn()
+            .await
+            .expect("Could not get the redis connection");
+
+        redis::cmd("FLUSHALL")
+            .exec_async(&mut redis_conn)
+            .await
+            .expect("Failed to flush Redis");
+
+        // Reindex
+        info!("Starting reindexing process.");
+        reindex().await;
+    }
+}
diff --git a/src/models/post/details.rs b/src/models/post/details.rs
index 4638020a..710e4e25 100644
--- a/src/models/post/details.rs
+++ b/src/models/post/details.rs
@@ -183,7 +183,7 @@ impl PostDetails {
 
 #[cfg(test)]
 mod tests {
-    use crate::{setup, Config};
+    use crate::{Config, StackManager};
 
     use super::*;
 
@@ -193,9 +193,9 @@ mod tests {
 
     #[tokio_shared_rt::test(shared)]
     async fn test_post_details_get_from_graph() {
-        // Open connections against ddbb
         let config = Config::from_env();
-        setup(&config).await;
+        StackManager::setup(&config).await;
+
         let _res = PostDetails::get_by_id(AUTHOR_A_ID, REPLY_ID).await.unwrap();
         let replies = PostStream::get_post_replies(AUTHOR_A_ID, POST_ID, None, None, None)
             .await
diff --git a/src/models/user/details.rs b/src/models/user/details.rs
index 0760d1f5..1e59a512 100644
--- a/src/models/user/details.rs
+++ b/src/models/user/details.rs
@@ -110,7 +110,7 @@ impl UserDetails {
 
 #[cfg(test)]
 mod tests {
-    use crate::{setup, Config};
+    use crate::{Config, StackManager};
 
     use super::*;
 
@@ -128,7 +128,7 @@ mod tests {
     #[tokio_shared_rt::test(shared)]
     async fn test_get_by_ids_from_redis() {
         let config = Config::from_env();
-        setup(&config).await;
+        StackManager::setup(&config).await;
 
         let user_details = UserDetails::get_by_ids(&USER_IDS).await.unwrap();
         assert_eq!(user_details.len(), USER_IDS.len());
diff --git a/src/service.rs b/src/service.rs
index d506a5a9..46384374 100644
--- a/src/service.rs
+++ b/src/service.rs
@@ -1,11 +1,11 @@
 use log::info;
-use pubky_nexus::{redis_is_empty, reindex, routes, setup, Config};
+use pubky_nexus::{redis_is_empty, reindex, routes, Config, StackManager};
 use tokio::net::TcpListener;
 
 #[tokio::main]
 async fn main() {
     let config = Config::from_env();
-    setup(&config).await;
+    StackManager::setup(&config).await;
 
     // Reindex if REINDEX is set to true or Redis is empty
     let should_reindex = config.reindex || redis_is_empty().await.unwrap_or(false);
diff --git a/src/setup.rs b/src/setup.rs
index ba0e0f20..37b318a7 100644
--- a/src/setup.rs
+++ b/src/setup.rs
@@ -1,47 +1,53 @@
-use crate::config::Config;
-use crate::db::connectors::{
-    neo4j::{Neo4jConnector, NEO4J_CONNECTOR},
-    redis::{RedisConnector, REDIS_CONNECTOR},
-};
 use crate::db::graph::setup::setup_graph;
+use crate::{
+    db::connectors::{
+        neo4j::{Neo4jConnector, NEO4J_CONNECTOR},
+        redis::{RedisConnector, REDIS_CONNECTOR},
+    },
+    Config,
+};
 use log::{debug, info};
 
-async fn setup_redis(config: &Config) {
-    let redis_connector = RedisConnector::new_connection(&config.redis_uri())
-        .await
-        .expect("Failed to connect to Redis");
+pub struct StackManager {}
 
-    match REDIS_CONNECTOR.set(redis_connector) {
-        Err(e) => debug!("RedisConnector was already set: {:?}", e),
-        Ok(()) => info!("RedisConnector successfully set"),
-    }
-}
+impl StackManager {
+    async fn setup_redis(config: &Config) {
+        let redis_connector = RedisConnector::new_connection(&config.redis_uri())
+            .await
+            .expect("Failed to connect to Redis");
 
-async fn setup_neo4j(config: &Config) {
-    let neo4j_connector = Neo4jConnector::new_connection(
-        &config.neo4j_uri(),
-        &config.neo4j_username,
-        &config.neo4j_password,
-    )
-    .await
-    .expect("Failed to connect to Neo4j");
-
-    match NEO4J_CONNECTOR.set(neo4j_connector) {
-        Err(e) => debug!("Neo4jConnector was already set: {:?}", e),
-        Ok(()) => info!("Neo4jConnector successfully set"),
+        match REDIS_CONNECTOR.set(redis_connector) {
+            Err(e) => debug!("RedisConnector was already set: {:?}", e),
+            Ok(()) => info!("RedisConnector successfully set"),
+        }
     }
 
-    // Set Neo4J graph data constraints
-    setup_graph().await.unwrap_or_default();
-}
+    async fn setup_neo4j(config: &Config) {
+        let neo4j_connector = Neo4jConnector::new_connection(
+            &config.neo4j_uri(),
+            &config.neo4j_username,
+            &config.neo4j_password,
+        )
+        .await
+        .expect("Failed to connect to Neo4j");
+
+        match NEO4J_CONNECTOR.set(neo4j_connector) {
+            Err(e) => debug!("Neo4jConnector was already set: {:?}", e),
+            Ok(()) => info!("Neo4jConnector successfully set"),
+        }
 
-pub async fn setup(config: &Config) {
-    match env_logger::try_init() {
-        Ok(_) => info!("Env logger initiated"),
-        Err(err) => debug!("Env logger was already set: {}", err),
+        // Set Neo4J graph data constraints
+        setup_graph().await.unwrap_or_default();
     }
 
-    // Initialize Redis and Neo4j
-    setup_redis(config).await;
-    setup_neo4j(config).await;
+    pub async fn setup(config: &Config) {
+        match env_logger::try_init() {
+            Ok(_) => info!("Env logger initiated"),
+            Err(err) => debug!("Env logger was already set: {}", err),
+        }
+
+        // Initialize Redis and Neo4j
+        Self::setup_redis(config).await;
+        Self::setup_neo4j(config).await;
+    }
 }
diff --git a/src/watcher.rs b/src/watcher.rs
index e3d64e29..df6e0f0f 100644
--- a/src/watcher.rs
+++ b/src/watcher.rs
@@ -1,14 +1,15 @@
 use log::error;
 use log::info;
 use pubky_nexus::PubkyConnector;
-use pubky_nexus::{setup, Config, EventProcessor};
+use pubky_nexus::{Config, EventProcessor, StackManager};
 use tokio::time::{sleep, Duration};
 
 /// Watches over a homeserver `/events` and writes into the Nexus databases
 #[tokio::main]
 async fn main() -> Result<(), Box<dyn std::error::Error + Sync + Send>> {
     let config = Config::from_env();
-    setup(&config).await;
+    StackManager::setup(&config).await;
+
     PubkyConnector::initialise(&config, None)?;
     let mut event_processor = EventProcessor::from_config(&config).await?;
 
diff --git a/tests/service/post/view.rs b/tests/service/post/view.rs
index 9cb932c9..efce5aff 100644
--- a/tests/service/post/view.rs
+++ b/tests/service/post/view.rs
@@ -1,27 +1,22 @@
 use crate::service::{
     post::{CAIRO_USER, ENCRYPTION_TAG, ROOT_PATH},
     stream::post::{POST_H, TAG_LABEL_2},
-    utils::{get_request, HOST_URL},
+    utils::{get_request, invalid_get_request},
 };
 use anyhow::Result;
+use axum::http::StatusCode;
 use pubky_nexus::models::tag::TagDetails;
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_get_post_view() -> Result<()> {
-    let client = httpc_test::new_client(HOST_URL)?;
-
     let author_id = "y4euc58gnmxun9wo87gwmanu6kztt9pgw1zz1yp1azp7trrsjamy";
     let post_id = "2ZCW1TGR5BKG0";
 
-    let res = client
-        .do_get(&format!(
-            "/v0/post/{}/{}?viewer_id={}",
-            author_id, post_id, author_id
-        ))
-        .await?;
-    assert_eq!(res.status(), 200);
-
-    let body = res.json_body()?;
+    let body = get_request(&format!(
+        "/v0/post/{}/{}?viewer_id={}",
+        author_id, post_id, author_id
+    ))
+    .await?;
 
     assert_eq!(body["details"]["content"], "I am told we can reply now!");
     assert_eq!(body["details"]["indexed_at"].as_u64(), Some(1718616844478));
@@ -48,15 +43,16 @@ async fn test_get_post_view() -> Result<()> {
     assert_eq!(post_tag.label, "pubky");
 
     // Test non-existing post
-    let res = client
-        .do_get(&format!("/v0/post/{}/{}", author_id, "no_post"))
-        .await?;
-    assert_eq!(res.status(), 404);
+    invalid_get_request(
+        &format!("/v0/post/{}/{}", author_id, "no_post"),
+        StatusCode::NOT_FOUND,
+    )
+    .await?;
 
     Ok(())
 }
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_get_post_view_with_limit_tags() -> Result<()> {
     let path = format!("{}/{}/{}?limit_tags=1", ROOT_PATH, CAIRO_USER, POST_H);
 
@@ -82,7 +78,7 @@ async fn test_get_post_view_with_limit_tags() -> Result<()> {
     Ok(())
 }
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_get_post_view_with_limit_taggers() -> Result<()> {
     let path = format!("{}/{}/{}?limit_taggers=2", ROOT_PATH, CAIRO_USER, POST_H);
 
@@ -114,7 +110,7 @@ async fn test_get_post_view_with_limit_taggers() -> Result<()> {
     Ok(())
 }
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_get_post_view_with_limit_tags_and_taggers() -> Result<()> {
     let path = format!(
         "{}/{}/{}?limit_tags=1&limit_taggers=2",
diff --git a/tests/service/stream/post/reach/timeline.rs b/tests/service/stream/post/reach/timeline.rs
index f39f4945..a611684e 100644
--- a/tests/service/stream/post/reach/timeline.rs
+++ b/tests/service/stream/post/reach/timeline.rs
@@ -46,7 +46,7 @@ async fn test_stream_posts_followers() -> Result<()> {
 const START_TIME: usize = 1980477299321;
 const END_TIME: usize = 1980477299312;
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_stream_posts_following_with_start() -> Result<()> {
     let path = format!(
         "{ROOT_PATH}?observer_id={}&source=following&viewer_id={}&start={}&limit=5",
@@ -84,7 +84,7 @@ async fn test_stream_posts_following_with_start() -> Result<()> {
     Ok(())
 }
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_stream_posts_following_with_start_and_end() -> Result<()> {
     let path = format!(
         "{ROOT_PATH}?observer_id={}&source=following&viewer_id={}&start={}&end={}",
@@ -119,7 +119,7 @@ async fn test_stream_posts_following_with_start_and_end() -> Result<()> {
 const START_TIME_ERS: usize = 1719308316919;
 const END_TIME_ERS: usize = 1693823567880;
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_stream_posts_followers_with_start() -> Result<()> {
     let path = format!(
         "{ROOT_PATH}?observer_id={}&source=followers&viewer_id={}&start={}&limit=5",
@@ -151,7 +151,7 @@ async fn test_stream_posts_followers_with_start() -> Result<()> {
     Ok(())
 }
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_stream_posts_followers_with_start_and_end() -> Result<()> {
     let path = format!(
         "{ROOT_PATH}?observer_id={}&source=followers&viewer_id={}&start={}&end={}",
@@ -183,7 +183,7 @@ async fn test_stream_posts_followers_with_start_and_end() -> Result<()> {
     Ok(())
 }
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_stream_posts_friend_with_start() -> Result<()> {
     let path = format!(
         "{ROOT_PATH}?observer_id={}&source=friends&viewer_id={}&start={}&limit=5",
@@ -215,7 +215,7 @@ async fn test_stream_posts_friend_with_start() -> Result<()> {
     Ok(())
 }
 
-#[tokio::test]
+#[tokio_shared_rt::test(shared)]
 async fn test_stream_posts_friend_with_start_and_end() -> Result<()> {
     let path = format!(
         "{ROOT_PATH}?observer_id={}&source=friends&viewer_id={}&start={}&end={}",
diff --git a/tests/utils.rs b/tests/utils.rs
index af6e462e..3af28c46 100644
--- a/tests/utils.rs
+++ b/tests/utils.rs
@@ -1,11 +1,8 @@
-use std::{process::Stdio, sync::Arc};
+use std::sync::Arc;
 
 use anyhow::Result;
 use log::info;
-use neo4rs::query;
-use pubky_nexus::{
-    db::connectors::redis::get_redis_conn, get_neo4j_graph, reindex, routes, setup, Config,
-};
+use pubky_nexus::{routes, Config, StackManager};
 use tokio::{
     net::TcpListener,
     sync::{Mutex, OnceCell},
@@ -33,63 +30,9 @@ impl TestServiceServer {
             .to_owned()
     }
 
-    async fn sync_graph() {
-        let graph = get_neo4j_graph().expect("Failed to get Neo4j graph connection");
-
-        // drop and run the queries again
-        let drop_all_query = query("MATCH (n) DETACH DELETE n;");
-        graph
-            .lock()
-            .await
-            .run(drop_all_query)
-            .await
-            .expect("Could not drop graph nodes.");
-
-        // Run the run-queries.sh script on the Docker host using docker exec
-        tokio::process::Command::new("docker")
-            .args(&["exec", "neo4j", "bash", "/db-graph/run-queries.sh"])
-            .stdout(Stdio::inherit())
-            .stderr(Stdio::inherit())
-            .status()
-            .await
-            .expect("Failed to run run-queries.sh");
-    }
-
-    async fn sync_redis() {
-        // Drop all keys in Redis
-        let mut redis_conn = get_redis_conn()
-            .await
-            .expect("Could not get the redis connection");
-
-        redis::cmd("FLUSHALL")
-            .exec_async(&mut redis_conn)
-            .await
-            .expect("Failed to flush Redis");
-
-        // Reindex
-        info!("Starting reindexing process.");
-        reindex().await;
-    }
-
     async fn start_server() -> Result<()> {
         let config = Config::from_env();
-        setup(&config).await;
-
-        // make sure DBs are in sync with mock data
-        let sync_db_env = std::env::var("SYNC_DB").unwrap_or("false".to_string());
-        match sync_db_env.as_str() {
-            "true" => {
-                Self::sync_graph().await;
-                Self::sync_redis().await;
-            }
-            "graph" => {
-                Self::sync_graph().await;
-            }
-            "false" => {}
-            _ => {
-                panic!("Invalid value for SYNC_DB");
-            }
-        }
+        StackManager::setup(&config).await;
 
         // App router
         let app = routes::routes();
diff --git a/tests/watcher/utils/watcher.rs b/tests/watcher/utils/watcher.rs
index 5125d985..8a46bff4 100644
--- a/tests/watcher/utils/watcher.rs
+++ b/tests/watcher/utils/watcher.rs
@@ -7,7 +7,9 @@ use pubky_app_specs::{
 };
 use pubky_common::crypto::Keypair;
 use pubky_homeserver::Homeserver;
-use pubky_nexus::{events::Event, setup, types::DynError, Config, EventProcessor, PubkyConnector};
+use pubky_nexus::{
+    events::Event, types::DynError, Config, EventProcessor, PubkyConnector, StackManager,
+};
 
 /// Struct to hold the setup environment for tests
 pub struct WatcherTest {
@@ -33,7 +35,7 @@ impl WatcherTest {
     /// event processor, and other test setup details.
     pub async fn setup() -> Result<Self> {
         let config = Config::from_env();
-        setup(&config).await;
+        StackManager::setup(&config).await;
 
         TestnetDHTNetwork::initialise(10)?;
         let testnet = TestnetDHTNetwork::get_testnet_dht_nodes()?;