Skip to content

Commit

Permalink
address PR feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
jcnelson committed Nov 25, 2020
1 parent 14b1fc1 commit ee12847
Show file tree
Hide file tree
Showing 15 changed files with 275 additions and 215 deletions.
3 changes: 1 addition & 2 deletions docs/event-dispatcher.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,7 @@ Example:
{
"recipient": "ST31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZZ239N96",
"coinbase_amount": "1000",
"tx_fees_anchored_shared": "800",
"tx_fees_anchored_exclusive": "0",
"tx_fees_anchored": "800",
"tx_fees_streamed_confirmed": "0",
"from_stacks_block_hash": "0xf5d4ce0efe1d42c963d615ce57f0d014f263a985175e4ece766eceff10e0a358",
"from_index_block_hash": "0x329efcbcc6daf5ac3f264522e0df50eddb5be85df6ee8a9fc2384c54274d7afc",
Expand Down
29 changes: 9 additions & 20 deletions src/chainstate/stacks/db/blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -990,9 +990,7 @@ impl StacksChainState {
) -> Result<Option<StagingBlock>, Error> {
let sql = "SELECT * FROM staging_blocks WHERE index_block_hash = ?1 AND orphaned = 0";
let args: &[&dyn ToSql] = &[&index_block_hash];
let mut rows =
query_rows::<StagingBlock, _>(block_conn, sql, args).map_err(Error::DBError)?;
Ok(rows.pop())
query_row::<StagingBlock, _>(block_conn, sql, args).map_err(Error::DBError)
}

#[cfg(test)]
Expand Down Expand Up @@ -1082,7 +1080,7 @@ impl StacksChainState {
return Ok(None);
}
};
header.microblock_pubkey_hash.clone()
header.microblock_pubkey_hash
}
};
Ok(Some(pubkey_hash))
Expand All @@ -1097,12 +1095,9 @@ impl StacksChainState {
parent_index_block_hash: &StacksBlockId,
microblock_hash: &BlockHeaderHash,
) -> Result<Option<StagingMicroblock>, Error> {
let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND orphaned = 0 LIMIT 1".to_string();
let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND orphaned = 0 LIMIT 1";
let args: &[&dyn ToSql] = &[&parent_index_block_hash, &microblock_hash];
let mut rows =
query_rows::<StagingMicroblock, _>(blocks_conn, &sql, args).map_err(Error::DBError)?;

Ok(rows.pop())
query_row::<StagingMicroblock, _>(blocks_conn, sql, args).map_err(Error::DBError)
}

/// Load up a preprocessed microblock's staging info (processed or not), via its index
Expand All @@ -1113,12 +1108,9 @@ impl StacksChainState {
blocks_conn: &DBConn,
index_microblock_hash: &StacksBlockId,
) -> Result<Option<StagingMicroblock>, Error> {
let sql = "SELECT * FROM staging_microblocks WHERE index_microblock_hash = ?1 AND orphaned = 0 LIMIT 1".to_string();
let sql = "SELECT * FROM staging_microblocks WHERE index_microblock_hash = ?1 AND orphaned = 0 LIMIT 1";
let args: &[&dyn ToSql] = &[&index_microblock_hash];
let mut rows =
query_rows::<StagingMicroblock, _>(blocks_conn, &sql, args).map_err(Error::DBError)?;

Ok(rows.pop())
query_row::<StagingMicroblock, _>(blocks_conn, sql, args).map_err(Error::DBError)
}

/// Load up a preprocessed microblock (processed or not)
Expand Down Expand Up @@ -1208,10 +1200,7 @@ impl StacksChainState {
last_seq.saturating_sub(1)
);

if last_seq < u16::MAX
&& microblock.header.sequence < u16::MAX
&& microblock.header.sequence > 0
{
if last_seq < u16::MAX && microblock.header.sequence < u16::MAX {
// should always decrease by 1
assert_eq!(
microblock.header.sequence + 1,
Expand Down Expand Up @@ -4278,8 +4267,8 @@ impl StacksChainState {
/// The header info will be pulled from the headers DB, so this method only succeeds if the
/// parent block has been processed.
/// If it's not known, return None.
fn get_parent_header_info<'a>(
chainstate_tx: &mut ChainstateTx<'a>,
fn get_parent_header_info(
chainstate_tx: &mut ChainstateTx,
next_staging_block: &StagingBlock,
) -> Result<Option<StacksHeaderInfo>, Error> {
let parent_block_header_info = match StacksChainState::get_anchored_block_header_info(
Expand Down
203 changes: 195 additions & 8 deletions src/chainstate/stacks/db/transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,19 @@ use chainstate::burn::db::sortdb::*;

use net::Error as net_error;

use vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData};
use vm::types::{
AssetIdentifier, BuffData, PrincipalData, QualifiedContractIdentifier, SequenceData,
StandardPrincipalData, TupleData, TypeSignature, Value,
};

use vm::contexts::{AssetMap, AssetMapEntry};
use vm::contexts::{AssetMap, AssetMapEntry, Environment};

use vm::analysis::run_analysis;
use vm::ast::build_ast;
use vm::costs::ExecutionCost;

use vm::types::{AssetIdentifier, Value};
use vm::costs::cost_functions;
use vm::costs::CostTracker;
use vm::costs::ExecutionCost;

use vm::clarity::{
ClarityBlockConnection, ClarityConnection, ClarityInstance, ClarityTransactionConnection,
Expand All @@ -63,6 +67,9 @@ use vm::database::ClarityDatabase;

use vm::contracts::Contract;

use vm::representations::ClarityName;
use vm::representations::ContractName;

// make it possible to have a set of Values
impl std::hash::Hash for Value {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
Expand Down Expand Up @@ -579,6 +586,188 @@ impl StacksChainState {
return true;
}

/// Given two microblock headers, were they signed by the same key?
/// Return the pubkey hash if so; return Err otherwise
fn check_microblock_header_signer(
mblock_hdr_1: &StacksMicroblockHeader,
mblock_hdr_2: &StacksMicroblockHeader,
) -> Result<Hash160, Error> {
let pkh1 = mblock_hdr_1.check_recover_pubkey().map_err(|e| {
Error::InvalidStacksTransaction(
format!("Failed to recover public key: {:?}", &e),
false,
)
})?;

let pkh2 = mblock_hdr_2.check_recover_pubkey().map_err(|e| {
Error::InvalidStacksTransaction(
format!("Failed to recover public key: {:?}", &e),
false,
)
})?;

if pkh1 != pkh2 {
let msg = format!(
"Invalid PoisonMicroblock transaction -- signature pubkey hash {} != {}",
&pkh1, &pkh2
);
warn!("{}", &msg);
return Err(Error::InvalidStacksTransaction(msg, false));
}
Ok(pkh1)
}

/// Process a poison-microblock transaction within a Clarity environment.
/// The code in vm::contexts will call this, via a similarly-named method.
/// Returns a Value that represents the miner slashed:
/// * contains the block height of the block with the slashed microblock public key hash
/// * contains the microblock public key hash
/// * contains the sender that reported the poison-microblock
/// * contains the sequence number at which the fork occured
pub fn handle_poison_microblock(
env: &mut Environment,
mblock_header_1: &StacksMicroblockHeader,
mblock_header_2: &StacksMicroblockHeader,
) -> Result<Value, Error> {
let cost_before = env.global_context.cost_track.get_total();

// encodes MARF reads for loading microblock height and current height, and loading and storing a
// poison-microblock report
runtime_cost!(cost_functions::POISON_MICROBLOCK, env, 0)
.map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?;

let sender_principal = match &env.sender {
Some(ref sender) => {
let sender_principal = sender.clone().expect_principal();
if let PrincipalData::Standard(sender_principal) = sender_principal {
sender_principal
} else {
panic!(
"BUG: tried to handle poison microblock without a standard principal sender"
);
}
}
None => {
panic!("BUG: tried to handle poison microblock without a sender");
}
};

// is this valid -- were both headers signed by the same key?
let pubkh =
StacksChainState::check_microblock_header_signer(mblock_header_1, mblock_header_2)?;

let microblock_height_opt = env
.global_context
.database
.get_microblock_pubkey_hash_height(&pubkh);
let current_height = env.global_context.database.get_current_block_height();

// for the microblock public key hash we had to process
env.add_memory(20)
.map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?;

// for the block height we had to load
env.add_memory(4)
.map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?;

// was the referenced public key hash used anytime in the past
// MINER_REWARD_MATURITY blocks?
let mblock_pubk_height = match microblock_height_opt {
None => {
// public key has never been seen before
let msg = format!(
"Invalid Stacks transaction: microblock public key hash {} never seen in this fork",
&pubkh
);
warn!("{}", &msg);

return Err(Error::InvalidStacksTransaction(msg, false));
}
Some(height) => {
if height
.checked_add(MINER_REWARD_MATURITY as u32)
.expect("BUG: too many blocks")
< current_height
{
let msg = format!("Invalid Stacks transaction: microblock public key hash from height {} has matured relative to current height {}", height, current_height);
warn!("{}", &msg);

return Err(Error::InvalidStacksTransaction(msg, false));
}
height
}
};

// add punishment / commission record, if one does not already exist at lower sequence
let (reporter_principal, reported_seq) = if let Some((reporter, seq)) = env
.global_context
.database
.get_microblock_poison_report(mblock_pubk_height)
{
// account for report loaded
env.add_memory(TypeSignature::PrincipalType.size() as u64)
.map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?;

// u128 sequence
env.add_memory(16)
.map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?;

if mblock_header_1.sequence < seq {
// this sender reports a point lower in the stream where a fork occurred, and is now
// entitled to a commission of the punished miner's coinbase
debug!("Sender {} reports a better poison-miroblock record (at {}) for key {} at height {} than {} (at {})", &sender_principal, mblock_header_1.sequence, &pubkh, mblock_pubk_height, &reporter, seq);
env.global_context.database.insert_microblock_poison(
mblock_pubk_height,
&sender_principal,
mblock_header_1.sequence,
)?;
(sender_principal.clone(), mblock_header_1.sequence)
} else {
// someone else beat the sender to this report
debug!("Sender {} reports an equal or worse poison-microblock record (at {}, but already have one for {}); dropping...", &sender_principal, mblock_header_1.sequence, seq);
(reporter, seq)
}
} else {
// first-ever report of a fork
debug!(
"Sender {} reports a poison-microblock record at seq {} for key {} at height {}",
&sender_principal, mblock_header_1.sequence, &pubkh, &mblock_pubk_height
);
env.global_context.database.insert_microblock_poison(
mblock_pubk_height,
&sender_principal,
mblock_header_1.sequence,
)?;
(sender_principal.clone(), mblock_header_1.sequence)
};

let hash_data = BuffData {
data: pubkh.as_bytes().to_vec(),
};
let tuple_data = TupleData::from_data(vec![
(
ClarityName::try_from("block_height").expect("BUG: valid string representation"),
Value::UInt(mblock_pubk_height as u128),
),
(
ClarityName::try_from("microblock_pubkey_hash")
.expect("BUG: valid string representation"),
Value::Sequence(SequenceData::Buffer(hash_data)),
),
(
ClarityName::try_from("reporter").expect("BUG: valid string representation"),
Value::Principal(PrincipalData::Standard(reporter_principal)),
),
(
ClarityName::try_from("sequence").expect("BUG: valid string representation"),
Value::UInt(reported_seq as u128),
),
])
.expect("BUG: valid tuple representation");

Ok(Value::Tuple(tuple_data))
}

/// Process the transaction's payload, and run the post-conditions against the resulting state.
/// Returns the number of STX burned.
pub fn process_transaction_payload(
Expand Down Expand Up @@ -6948,10 +7137,8 @@ pub mod test {
let err =
StacksChainState::process_transaction(&mut conn, &signed_tx_poison_microblock, false)
.unwrap_err();
if let Error::ClarityError(clarity_error::Interpreter(InterpreterError::Interpreter(
::vm::errors::InterpreterError::InvalidPoisonMicroblockTransaction(_),
))) = err
{
if let Error::ClarityError(clarity_error::BadTransaction(msg)) = err {
assert!(msg.find("never seen in this fork").is_some());
} else {
assert!(false);
}
Expand Down
2 changes: 0 additions & 2 deletions src/chainstate/stacks/miner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8034,8 +8034,6 @@ pub mod test {
}
}

//

// TODO: invalid block with duplicate microblock public key hash (okay between forks, but not
// within the same fork)
// TODO: (BLOCKED) build off of different points in the same microblock stream
Expand Down
27 changes: 27 additions & 0 deletions src/chainstate/stacks/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,12 @@ use vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalDat
use vm::errors::Error as clarity_interpreter_error;

use vm::clarity::Error as clarity_error;
use vm::costs::CostErrors;
use vm::costs::ExecutionCost;
use vm::representations::{ClarityName, ContractName};

use vm::contexts::GlobalContext;

pub type StacksPublicKey = secp256k1::Secp256k1PublicKey;
pub type StacksPrivateKey = secp256k1::Secp256k1PrivateKey;

Expand Down Expand Up @@ -318,6 +321,30 @@ impl From<clarity_interpreter_error> for Error {
}
}

impl Error {
pub fn from_cost_error(
err: CostErrors,
cost_before: ExecutionCost,
context: &GlobalContext,
) -> Error {
match err {
CostErrors::CostOverflow => {
let cur_cost = context.cost_track.get_total();
let budget = context.cost_track.get_limit();
Error::CostOverflowError(cost_before, cur_cost, budget)
}
CostErrors::CostBalanceExceeded(used, budget) => {
Error::CostOverflowError(cost_before, used, budget)
}
CostErrors::MemoryBalanceExceeded(_, _) => {
let cur_cost = context.cost_track.get_total();
let budget = context.cost_track.get_limit();
Error::CostOverflowError(cost_before, cur_cost, budget)
}
}
}
}

impl Txid {
/// A Stacks transaction ID is a sha512/256 hash (not a double-sha256 hash)
pub fn from_stacks_tx(txdata: &[u8]) -> Txid {
Expand Down
4 changes: 3 additions & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,14 +66,16 @@ pub mod util;
#[macro_use]
pub mod net;

#[macro_use]
pub mod vm;

#[macro_use]
pub mod chainstate;

pub mod address;
pub mod burnchains;
pub mod core;
pub mod deps;
pub mod vm;

pub mod clarity;

Expand Down
Loading

0 comments on commit ee12847

Please sign in to comment.