Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add missing docs to sov-db #568

Merged
merged 3 commits into from
Jul 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 14 additions & 5 deletions full-node/db/sov-db/src/ledger_db/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,21 @@ pub struct LedgerDB {
next_item_numbers: Arc<Mutex<ItemNumbers>>,
}

/// A SlotNumber, BatchNumber, TxNumber, and EventNumber which are grouped together, typically representing
/// the respective heights at the start or end of slot processing.
#[derive(Default, Clone, Debug)]
pub struct ItemNumbers {
/// The slot number
pub slot_number: u64,
/// The batch number
pub batch_number: u64,
/// The transaction number
pub tx_number: u64,
/// The event number
pub event_number: u64,
}

/// All of the data to be commited to the ledger db for a single slot.
#[derive(Debug)]
pub struct SlotCommit<S: SlotData, B, T> {
slot_data: S,
Expand All @@ -48,16 +55,17 @@ pub struct SlotCommit<S: SlotData, B, T> {
}

impl<S: SlotData, B, T> SlotCommit<S, B, T> {
/// Returns a reference to the commit's slot_data
pub fn slot_data(&self) -> &S {
&self.slot_data
}

/// Returns a reference to the commit's batch_receipts
pub fn batch_receipts(&self) -> &[BatchReceipt<B, T>] {
&self.batch_receipts
}
}

impl<S: SlotData, B, T> SlotCommit<S, B, T> {
/// Create a new SlotCommit from the given slot data
pub fn new(slot_data: S) -> Self {
Self {
slot_data,
Expand All @@ -66,9 +74,7 @@ impl<S: SlotData, B, T> SlotCommit<S, B, T> {
num_events: 0,
}
}
}

impl<S: SlotData, B, T> SlotCommit<S, B, T> {
/// Add a `batch` (of transactions) to the commit
pub fn add_batch(&mut self, batch: BatchReceipt<B, T>) {
self.num_txs += batch.tx_receipts.len();
let events_this_batch: usize = batch.tx_receipts.iter().map(|r| r.events.len()).sum();
Expand All @@ -78,6 +84,8 @@ impl<S: SlotData, B, T> SlotCommit<S, B, T> {
}

impl LedgerDB {
/// Open a [`LedgerDB`] (backed by RocksDB) at the specified path.
/// The returned instance will be at the path `{path}/ledger-db`.
pub fn with_path(path: impl AsRef<Path>) -> Result<Self, anyhow::Error> {
let path = path.as_ref().join(LEDGER_DB_PATH_SUFFIX);
let inner = DB::open(
Expand All @@ -102,6 +110,7 @@ impl LedgerDB {
})
}

/// Get the next slot, block, transaction, and event numbers
pub fn get_next_items_numbers(&self) -> ItemNumbers {
self.next_item_numbers.lock().unwrap().clone()
}
Expand Down
20 changes: 14 additions & 6 deletions full-node/db/sov-db/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,20 @@
//! Defines the database used by the Sovereign SDK.
//!
//! - Types and traits for storing and retrieving ledger data can be found in the [`ledger_db`] module
//! - DB "Table" definitions can be found in the [`schema`] module
//! - Types and traits for storing state data can be found in the [`state_db`] module
//! - The default db configuration is generated in the [`rocks_db_config`] module
#![forbid(unsafe_code)]
#![deny(missing_docs)]

use state_db::StateDB;

/// Implements a wrapper around RocksDB meant for storing rollup history ("the ledger").
/// This wrapper implements helper traits for writing blocks to the ledger, and for
/// serving historical data via RPC
pub mod ledger_db;
/// Implements helpers for configuring RocksDB.
pub mod rocks_db_config;
/// Defines the tables used by the Sovereign SDK.
pub mod schema;
/// Implements a wrapper around RocksDB meant for storing rollup state. This is primarily used
/// as the backing store for the JMT.
pub mod state_db;

pub struct SovereignDB {
_state_db: StateDB,
}
16 changes: 6 additions & 10 deletions full-node/db/sov-db/src/rocks_db_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,13 @@ use rocksdb::Options;
/// for detailed explanations.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct RocksdbConfig {
/// The maximum number of files that can be open concurrently. Defaults to 5000
pub max_open_files: i32,
/// Once write-ahead logs exceed this size, RocksDB will start forcing the flush of column
/// families whose memtables are backed by the oldest live WAL file. Defaults to 1GB
pub max_total_wal_size: u64,
/// The maximum number of background threads, including threads for flushing and compaction. Defaults to 16.
pub max_background_jobs: i32,
pub block_cache_size: u64,
pub block_size: u64,
pub cache_index_and_filter_blocks: bool,
}

impl Default for RocksdbConfig {
Expand All @@ -25,19 +26,14 @@ impl Default for RocksdbConfig {
// For now we set the max total WAL size to be 1G. This config can be useful when column
// families are updated at non-uniform frequencies.
max_total_wal_size: 1u64 << 30,
// This includes threads for flashing and compaction. Rocksdb will decide the # of
// This includes threads for flushing and compaction. Rocksdb will decide the # of
// threads to use internally.
max_background_jobs: 16,
// Default block cache size is 8MB,
block_cache_size: 8 * (1u64 << 20),
// Default block cache size is 4KB,
block_size: 4 * (1u64 << 10),
// Whether cache index and filter blocks into block cache.
cache_index_and_filter_blocks: false,
}
}
}

/// Generate [`rocksdb::Options`] corresponding to the given [`RocksdbConfig`].
pub fn gen_rocksdb_options(config: &RocksdbConfig, readonly: bool) -> Options {
let mut db_opts = Options::default();
db_opts.set_max_open_files(config.max_open_files);
Expand Down
3 changes: 3 additions & 0 deletions full-node/db/sov-db/src/schema/mod.rs
Original file line number Diff line number Diff line change
@@ -1,2 +1,5 @@
pub mod tables;
/// Defines the on-disk representation of all types which are stored by the SDK in a format other than
/// their native format. Notable examples including slots, blocks, transactions and events, which
/// are split into their constituent parts and stored in separate tables for easy retrieval.
pub mod types;
9 changes: 9 additions & 0 deletions full-node/db/sov-db/src/schema/tables.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@
//! Event Tables:
//! - (EventKey, TxNumber) -> EventNumber
//! - EventNumber -> (EventKey, EventValue)
//!
//! JMT Tables:
//! - KeyHash -> Key
//! - (Key, Version) -> JmtValue
//! - NodeKey -> Node

use borsh::{maybestd, BorshDeserialize, BorshSerialize};
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
Expand All @@ -29,12 +34,16 @@ use super::types::{
StoredTransaction, TxNumber,
};

/// A list of all tables used by the StateDB. These tables store rollup state - meaning
/// account balances, nonces, etc.
pub const STATE_TABLES: &[&str] = &[
KeyHashToKey::table_name(),
JmtValues::table_name(),
JmtNodes::table_name(),
];

/// A list of all tables used by the LedgerDB. These tables store rollup "history" - meaning
/// transaction, events, receipts, etc.
pub const LEDGER_TABLES: &[&str] = &[
SlotByNumber::table_name(),
SlotByHash::table_name(),
Expand Down
32 changes: 20 additions & 12 deletions full-node/db/sov-db/src/schema/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use sov_rollup_interface::stf::{Event, EventKey, TransactionReceipt};
pub struct DbBytes(Arc<Vec<u8>>);

impl DbBytes {
/// Create `DbBytes` from a `Vec<u8>`
pub fn new(contents: Vec<u8>) -> Self {
Self(Arc::new(contents))
}
Expand All @@ -30,41 +31,40 @@ impl From<Vec<u8>> for DbBytes {
}
}

#[derive(
Debug, Clone, Copy, BorshDeserialize, BorshSerialize, PartialEq, Eq, Serialize, Deserialize,
)]
pub enum Status {
Applied,
Skipped,
Reverted,
}

impl AsRef<[u8]> for DbBytes {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}

/// A hash stored in the database
pub type DbHash = [u8; 32];
/// The "value" half of a key/value pair from the JMT
pub type JmtValue = Option<Vec<u8>>;
pub(crate) type StateKey = Vec<u8>;

/// The on-disk format of a slot. Specifies the batches contained in the slot
/// and the hash of the da block. TODO(@preston-evans98): add any additional data
/// required to reconstruct the da block proof
/// required to reconstruct the da block proof.
#[derive(Debug, PartialEq, BorshDeserialize, BorshSerialize)]
pub struct StoredSlot {
/// The slot's hash, as reported by the DA layer.
pub hash: DbHash,
/// Any extra data which the rollup decides to store relating to this slot.
pub extra_data: DbBytes,
/// The range of batches which occurred in this slot.
pub batches: std::ops::Range<BatchNumber>,
}

/// The on-disk format for a batch. Stores the hash and identifies the range of transactions
/// included in the batch
/// included in the batch.
#[derive(Debug, PartialEq, BorshDeserialize, BorshSerialize)]
pub struct StoredBatch {
/// The hash of the batch, as reported by the DA layer.
pub hash: DbHash,
/// The range of transactions which occurred in this batch.
pub txs: std::ops::Range<TxNumber>,
/// A customer "receipt" for this batch defined by the rollup.
pub custom_receipt: DbBytes,
}

Expand All @@ -84,10 +84,13 @@ impl<B: DeserializeOwned, T> TryFrom<StoredBatch> for BatchResponse<B, T> {
/// and identifies the events emitted by this transaction
#[derive(Debug, PartialEq, BorshSerialize, BorshDeserialize, Clone)]
pub struct StoredTransaction {
/// The hash of the transaction.
pub hash: DbHash,
/// The range of event-numbers emitted by this transaction
/// The range of event-numbers emitted by this transaction.
pub events: std::ops::Range<EventNumber>,
/// The serialized transaction data, if the rollup decides to store it.
pub body: Option<Vec<u8>>,
/// A customer "receipt" for this transaction defined by the rollup.
pub custom_receipt: DbBytes,
}

Expand All @@ -103,6 +106,7 @@ impl<R: DeserializeOwned> TryFrom<StoredTransaction> for TxResponse<R> {
}
}

/// Split a `TransactionReceipt` into a `StoredTransaction` and a list of `Event`s for storage in the database.
pub fn split_tx_for_storage<R: Serialize>(
tx: TransactionReceipt<R>,
event_offset: u64,
Expand All @@ -122,7 +126,9 @@ pub fn split_tx_for_storage<R: Serialize>(
/// An identifier that specifies a single event
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub enum EventIdentifier {
/// A unique identifier for an event consiting of a [`TxIdentifier`] and an offset into that transaction's event list
TxIdAndIndex((TxIdentifier, u64)),
/// A unique identifier for an event consiting of a [`TxIdentifier`] and an event key
TxIdAndKey((TxIdentifier, EventKey)),
/// The monotonically increasing number of the event, ordered by the DA layer For example, if the first tx
/// contains 7 events, tx 2 contains 11 events, and tx 3 contains 7 txs,
Expand All @@ -133,7 +139,9 @@ pub enum EventIdentifier {
/// An identifier for a group of related events
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub enum EventGroupIdentifier {
/// All of the events which occurred in a particular transaction
TxId(TxIdentifier),
/// All events wich a particular key (typically, these events will have been emitted by several different transactions)
Key(Vec<u8>),
}

Expand Down
15 changes: 15 additions & 0 deletions full-node/db/sov-db/src/state_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,24 @@ use crate::rocks_db_config::gen_rocksdb_options;
use crate::schema::tables::{JmtNodes, JmtValues, KeyHashToKey, STATE_TABLES};
use crate::schema::types::StateKey;

/// A typed wrapper around RocksDB for storing rollup state. Internally,
/// this is roughly just an `Arc<SchemaDB>`.
///
/// StateDB implements several convenience functions for state storage -
/// notably the `TreeReader` and `TreeWriter` traits.
#[derive(Clone)]
pub struct StateDB {
/// The underlying RocksDB instance, wrapped in an [`Arc`] for convenience and [`SchemaDB`] for type safety
db: Arc<DB>,
/// The [`Version`] that will be used for the next batch of writes to the DB.
next_version: Arc<Mutex<Version>>,
}

const STATE_DB_PATH_SUFFIX: &str = "state";

impl StateDB {
/// Open a [`StateDB`] (backed by RocksDB) at the specified path.
/// The returned instance will be at the path `{path}/state-db`.
pub fn with_path(path: impl AsRef<Path>) -> Result<Self, anyhow::Error> {
let path = path.as_ref().join(STATE_DB_PATH_SUFFIX);
let inner = DB::open(
Expand All @@ -35,10 +44,13 @@ impl StateDB {
})
}

/// Put the preimage of a hashed key into the database. Note that the preimage is not checked for correctness,
/// since the DB is unaware of the hash function used by the JMT.
pub fn put_preimage(&self, key_hash: KeyHash, key: &Vec<u8>) -> Result<(), anyhow::Error> {
self.db.put::<KeyHashToKey>(&key_hash.0, key)
}

/// Get an optional value from the database, given a version and a key hash.
pub fn get_value_option_by_key(
&self,
version: Version,
Expand All @@ -62,6 +74,7 @@ impl StateDB {
}
}

/// Store an item in the database, given a key, a key hash, a version, and a value
pub fn update_db(
&self,
key: StateKey,
Expand All @@ -74,11 +87,13 @@ impl StateDB {
Ok(())
}

/// Increment the `next_version` counter by 1.
pub fn inc_next_version(&self) {
let mut version = self.next_version.lock().unwrap();
*version += 1;
}

/// Get the current value of the `next_version` counter
pub fn get_next_version(&self) -> Version {
let version = self.next_version.lock().unwrap();
*version
Expand Down