Skip to content

Commit

Permalink
fix(derive): Doc Touchups and Telemetry (#105)
Browse files Browse the repository at this point in the history
* fix(derive): doc touchups

* fix(derive): readme k256 fall-through docs
  • Loading branch information
refcell authored Apr 15, 2024
1 parent bcffef4 commit 483059b
Show file tree
Hide file tree
Showing 6 changed files with 61 additions and 50 deletions.
25 changes: 23 additions & 2 deletions crates/derive/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,26 @@

> **Notice**: This crate is a WIP.
A `no_std` compatible implementation of the OP Stack's
[derivation pipeline](https://specs.optimism.io/protocol/derivation.html#l2-chain-derivation-specification).
A `no_std` compatible implementation of the OP Stack's [derivation pipeline][derive].

[derive]: (https://specs.optimism.io/protocol/derivation.html#l2-chain-derivation-specification).

## Features

The most up-to-date feature list will be available on the [docs.rs `Feature Flags` tab][ff] of the `kona-derive` crate.

Some features include the following.
- `serde`: Serialization and Deserialization support for `kona-derive` types.
- `k256`: [secp256k1][k] public key recovery support.
- `online`: Exposes an [alloy-provider][ap] powered data source using "online" HTTP requests.

By default, `kona-derive` enables features `serde` and `k256`.

Key recovery using the [secp256k1][k] curve sits behind a `k256` feature flag so that when compiled in `offline` mode,
secp recovery can fall through to the fpp host, accelerating key recovery. This was necessary since invalid instructions
were found when compiling `k256` recovery down to a bare-metal MIPS target. Since public key recovery requires elliptic
curve pairings, `k256` fall-through host recovery should drastically accelerate derivation on the FPVM.

[k]: https://en.bitcoin.it/wiki/Secp256k1
[ap]: https://docs.rs/crate/alloy-providers/latest
[ff]: https://docs.rs/crate/kona-derive/latest/features
13 changes: 10 additions & 3 deletions crates/derive/src/sources/blobs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use alloy_consensus::{Transaction, TxEip4844Variant, TxEnvelope, TxType};
use alloy_primitives::{Address, Bytes, TxKind};
use anyhow::Result;
use async_trait::async_trait;
use tracing::warn;

/// A data iterator that reads from a blob.
#[derive(Debug, Clone)]
Expand Down Expand Up @@ -94,8 +95,14 @@ where
continue;
}
if !calldata.is_empty() {
// TODO(refcell): Add a warning log here if the blob data is not empty
// https://github.com/ethereum-optimism/optimism/blob/develop/op-node/rollup/derive/blob_data_source.go#L136
let hash = match &tx {
TxEnvelope::Legacy(tx) => Some(tx.hash()),
TxEnvelope::Eip2930(tx) => Some(tx.hash()),
TxEnvelope::Eip1559(tx) => Some(tx.hash()),
TxEnvelope::Eip4844(blob_tx_wrapper) => Some(blob_tx_wrapper.hash()),
_ => None,
};
warn!("Blob tx has calldata, which will be ignored: {hash:?}");
}
let blob_hashes = if let Some(b) = blob_hashes {
b
Expand Down Expand Up @@ -186,7 +193,7 @@ where
match next_data.decode() {
Ok(d) => Some(Ok(d)),
Err(_) => {
// TODO(refcell): Add a warning log here if decoding fails
warn!("Failed to decode blob data, skipping");
self.next().await
}
}
Expand Down
2 changes: 0 additions & 2 deletions crates/derive/src/stages/attributes_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,8 +157,6 @@ where
{
async fn reset(&mut self, _: BlockInfo, _: &SystemConfig) -> StageResult<()> {
info!("resetting attributes queue");
// TODO: metrice the reset using telemetry
// telemetry can provide a method of logging and metricing
self.batch = None;
self.is_last_in_span = false;
Err(StageError::Eof)
Expand Down
14 changes: 6 additions & 8 deletions crates/derive/src/stages/frame_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use alloy_primitives::Bytes;
use anyhow::anyhow;
use async_trait::async_trait;
use core::fmt::Debug;
use tracing::debug;
use tracing::{debug, error};

/// Provides data frames for the [FrameQueue] stage.
#[async_trait]
Expand Down Expand Up @@ -63,16 +63,14 @@ where
if let Ok(frames) = into_frames(Ok(data)) {
self.queue.extend(frames);
} else {
// TODO: log parsing frame error
// Failed to parse frames, but there may be more frames in the queue for
//
// the pipeline to advance, so don't return an error here.
// There may be more frames in the queue for the
// pipeline to advance, so don't return an error here.
error!("Failed to parse frames from data.");
}
}
Err(e) => {
// TODO: log retrieval error
// The error must be bubbled up without a wrapper in case it's an EOF error.
return Err(e);
error!("Failed to retrieve data: {:?}", e);
return Err(e); // Bubble up potential EOF error without wrapping.
}
}
}
Expand Down
55 changes: 22 additions & 33 deletions crates/derive/src/types/batch/single_batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use crate::types::{BlockID, BlockInfo, L2BlockInfo, RawTransaction, RollupConfig
use alloc::vec::Vec;
use alloy_primitives::BlockHash;
use alloy_rlp::{Decodable, Encodable};
use tracing::{info, warn};

/// Represents a single batch: a single encoded L2 block
#[derive(Debug, Default, Clone, PartialEq, Eq)]
Expand Down Expand Up @@ -43,39 +44,39 @@ impl SingleBatch {
) -> BatchValidity {
// Sanity check input consistency
if l1_blocks.is_empty() {
// TODO: log a warning: "missing L1 block input, cannot proceed with batch checking"
warn!("missing L1 block input, cannot proceed with batch checking");
return BatchValidity::Undecided;
}

let epoch = l1_blocks[0];
let next_timestamp = l2_safe_head.block_info.timestamp + cfg.block_time;
if self.timestamp > next_timestamp {
// TODO: trace log: "received out-of-order batch for future processing after next batch"
info!("received out-of-order batch for future processing after next batch");
return BatchValidity::Future;
}
if self.timestamp < next_timestamp {
// TODO: warn log: "dropping batch with old timestamp", "min_timestamp", next_timestamp
warn!("dropping batch with old timestamp, min_timestamp: {next_timestamp}");
return BatchValidity::Drop;
}

// Dependent on the above timestamp check.
// If the timestamp is correct, then it must build on top of the safe head.
if self.parent_hash != l2_safe_head.block_info.hash {
// TODO: warn log: "ignoring batch with mismatching parent hash", "current_safe_head",
// l2_safe_head.info.hash
let h = l2_safe_head.block_info.hash;
warn!("ignoring batch with mismatching parent hash, current_safe_head: {h}");
return BatchValidity::Drop;
}

// Filter out batches that were included too late.
if self.epoch_num + cfg.seq_window_size < inclusion_block.number {
// TODO: warn log: "batch was included too late, sequence window expired"
warn!("batch was included too late, sequence window expired");
return BatchValidity::Drop;
}

// Check the L1 origin of the batch
let mut batch_origin = epoch;
if self.epoch_num < epoch.number {
// TODO: warn log: "dropped batch, epoch is too old", "minimum", epoch.id()
warn!("dropped batch, epoch is too old, minimum: {}", epoch.id());
return BatchValidity::Drop;
} else if self.epoch_num == epoch.number {
// Batch is sticking to the current epoch, continue.
Expand All @@ -86,45 +87,40 @@ impl SingleBatch {
// more information otherwise the eager algorithm may diverge from a non-eager
// algorithm.
if l1_blocks.len() < 2 {
// TODO: info log: "eager batch wants to advance epoch, but could not without more
// L1 blocks", "current_epoch", epoch.id()
info!("eager batch wants to advance epoch, but could not without more L1 blocks at epoch: {}", epoch.id());
return BatchValidity::Undecided;
}
batch_origin = l1_blocks[1];
} else {
// TODO: warn log: "batch is for future epoch too far ahead, while it has the next
// timestamp, so it must be invalid", "current_epoch", epoch.id()
warn!("dropped batch, epoch is too far ahead, maximum: {}", epoch.id());
return BatchValidity::Drop;
}

// Validate the batch epoch hash
if self.epoch_hash != batch_origin.hash {
// TODO: warn log: "batch is for different L1 chain, epoch hash does not match",
// "expected", batch_origin.id()
warn!("dropped batch, epoch hash does not match, expected: {}", batch_origin.id());
return BatchValidity::Drop;
}

if self.timestamp < batch_origin.timestamp {
// TODO: warn log: "batch timestamp is less than L1 origin timestamp", "l2_timestamp",
// self.timestamp, "l1_timestamp", batch_origin.timestamp, "origin", batch_origin.id()
warn!("dropped batch, batch timestamp is less than L1 origin timestamp, l2_timestamp: {}, l1_timestamp: {}, origin: {}", self.timestamp, batch_origin.timestamp, batch_origin.id());
return BatchValidity::Drop;
}

// Check if we ran out of sequencer time drift
let max = if let Some(max) = batch_origin.timestamp.checked_add(cfg.max_sequencer_drift) {
max
} else {
// TODO: log that the batch exceeds time drift.
warn!("dropped batch, timestamp exceeds configured max sequencer drift, origin timestamp: {}, max drift: {}", batch_origin.timestamp, cfg.max_sequencer_drift);
return BatchValidity::Drop;
};

let no_txs = self.transactions.is_empty();
if self.timestamp > max && !no_txs {
// If the sequencer is ignoring the time drift rule, then drop the batch and force an
// empty batch instead, as the sequencer is not allowed to include anything
// past this point without moving to the next epoch. TODO: warn log: "batch
// exceeded sequencer time drift, sequencer must adopt new L1 origin to include
// transactions again", "max_time", max
// past this point without moving to the next epoch.
warn!("batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again, max time: {max}");
return BatchValidity::Drop;
}
if self.timestamp > max && no_txs {
Expand All @@ -134,35 +130,28 @@ impl SingleBatch {
// epoch advancement regardless of time drift is allowed.
if epoch.number == batch_origin.number {
if l1_blocks.len() < 2 {
// TODO: info log: "without the next L1 origin we cannot determine yet if this
// empty batch that exceeds the time drift is still valid"
info!("without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid");
return BatchValidity::Undecided;
}
let next_origin = l1_blocks[1];
// Check if the next L1 Origin could have been adopted
if self.timestamp >= next_origin.timestamp {
// TODO: log that the batch exceeded the time drift without adopting the next
// origin.
info!("empty batch that exceeds the time drift without adopting next L1 origin, dropping");
return BatchValidity::Drop;
} else {
// TODO: log that we are continuing with an empty batch before the late L1 block
// to preserve the L2 time invariant. TODO: metrice empty
// batch continuation
info!("empty batch continuation, preserving L2 time invariant");
}
}
}

// We can do this check earlier, but it's a more intensive one, so we do this last.
// TODO: metrice & allow configurability to measure the time it takes to check the batch.
for tx in self.transactions.iter() {
// We can do this check earlier, but it's intensive so we do it last for the sad-path.
for (i, tx) in self.transactions.iter().enumerate() {
if tx.is_empty() {
// TODO: warn log: "transaction data must not be empty, but found empty tx",
// "tx_index", i
warn!("transaction data must not be empty, but found empty tx at index {i}");
return BatchValidity::Drop;
}
if tx.is_deposit() {
// TODO: warn log: "sequencers may not embed any deposits into batch data, but found
// tx that has one", "tx_index", i
warn!("sequencers may not embed any deposits into batch data, but found tx that has one at index: {i}");
return BatchValidity::Drop;
}
}
Expand Down
2 changes: 0 additions & 2 deletions crates/derive/src/types/batch/span_batch/bits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ impl SpanBatchBits {
return Err(SpanBatchError::TooBigSpanBatchSize);
}

// TODO(refcell): This can definitely be optimized.
let bits = if b.len() < buffer_len {
let mut bits = vec![0; buffer_len];
bits[..b.len()].copy_from_slice(b);
Expand Down Expand Up @@ -77,7 +76,6 @@ impl SpanBatchBits {
if buf_len > MAX_SPAN_BATCH_SIZE {
return Err(SpanBatchError::TooBigSpanBatchSize);
}
// TODO(refcell): This can definitely be optimized.
let mut buf = vec![0; buf_len];
buf[buf_len - bits.0.len()..].copy_from_slice(bits.as_ref());
w.extend_from_slice(&buf);
Expand Down

0 comments on commit 483059b

Please sign in to comment.