Skip to content

Commit

Permalink
add some info for debug
Browse files Browse the repository at this point in the history
  • Loading branch information
jackzhhuang committed Jan 13, 2025
1 parent e3c6150 commit b00c0ae
Showing 1 changed file with 32 additions and 4 deletions.
36 changes: 32 additions & 4 deletions flexidag/src/blockdag.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,14 @@ impl BlockDAG {
}

pub fn has_block_connected(&self, block_header: &BlockHeader) -> anyhow::Result<bool> {
let _ghostdata = match self.storage.ghost_dag_store.get_data(block_header.id()) {
std::result::Result::Ok(data) => data,
match self.storage.ghost_dag_store.has(block_header.id()) {
std::result::Result::Ok(true) => (),
std::result::Result::Ok(false) =>{
warn!(
"failed to get ghostdata by hash, the block should be re-executed",
);
return anyhow::Result::Ok(false);
}
Err(e) => {
warn!(
"failed to get ghostdata by hash: {:?}, the block should be re-executed",
Expand All @@ -114,8 +120,14 @@ impl BlockDAG {
}
};

let _dag_header = match self.storage.header_store.get_header(block_header.id()) {
std::result::Result::Ok(header) => header,
match self.storage.header_store.has(block_header.id()) {
std::result::Result::Ok(true) => (),
std::result::Result::Ok(false) => {
warn!(
"failed to get header by hash, the block should be re-executed",
);
return anyhow::Result::Ok(false)
}
Err(e) => {
warn!(
"failed to get header by hash: {:?}, the block should be re-executed",
Expand Down Expand Up @@ -359,6 +371,8 @@ impl BlockDAG {
self.storage.reachability_store.upgradable_read(),
);

info!("start to commit via batch1, header id: {:?}", header.id());

// Store ghostdata
process_key_already_error(self.storage.ghost_dag_store.insert_batch(
&mut batch,
Expand All @@ -367,6 +381,8 @@ impl BlockDAG {
))
.expect("failed to ghostdata in batch");

info!("start to commit via batch2, header id: {:?}", header.id());

// Update reachability store
debug!(
"start to update reachability data for block: {:?}, number: {:?}",
Expand All @@ -380,6 +396,8 @@ impl BlockDAG {
.collect::<Vec<_>>()
.into_iter();

info!("start to commit via batch3, header id: {:?}, count of mergeset: {:?}, ", header.id(), merge_set.len());

match inquirer::add_block(
&mut stage,
header.id(),
Expand All @@ -401,13 +419,17 @@ impl BlockDAG {
},
}

info!("start to commit via batch4, header id: {:?}", header.id());

process_key_already_error(self.storage.relations_store.write().insert_batch(
&mut batch,
header.id(),
BlockHashes::new(parents),
))
.expect("failed to insert relations in batch");

info!("start to commit via batch5, header id: {:?}", header.id());

// Store header store
process_key_already_error(self.storage.header_store.insert(
header.id(),
Expand All @@ -416,18 +438,24 @@ impl BlockDAG {
))
.expect("failed to insert header in batch");

info!("start to commit via batch6, header id: {:?}", header.id());

// the read lock will be updated to the write lock
// and then write the batch
// and then release the lock
stage
.commit(&mut batch)
.expect("failed to write the stage reachability in batch");

info!("start to commit via batch7, header id: {:?}", header.id());

// write the data just one time
self.storage
.write_batch(batch)
.expect("failed to write dag data in batch");

info!("start to commit via batch8, header id: {:?}", header.id());

drop(lock_guard);
info!("finish writing the batch, head id: {:?}", header.id());

Expand Down

0 comments on commit b00c0ae

Please sign in to comment.