Skip to content

Commit

Permalink
IndexNext -> IndexV2
Browse files Browse the repository at this point in the history
  • Loading branch information
cpubot committed Dec 6, 2024
1 parent 2c6a43e commit 1aaa647
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 26 deletions.
2 changes: 1 addition & 1 deletion ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2501,7 +2501,7 @@ impl Blockstore {
match index {
Ok(index) => Ok(index),
Err(_) => {
let index_next: IndexNext = bincode::deserialize(slice)?;
let index_next: IndexV2 = bincode::deserialize(slice)?;
Ok(index_next.into())
}
}
Expand Down
50 changes: 25 additions & 25 deletions ledger/src/blockstore_meta.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,14 +116,14 @@ pub struct Index {
}

#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
pub struct IndexNext {
pub struct IndexV2 {
pub slot: Slot,
data: ShredIndexNext,
coding: ShredIndexNext,
data: ShredIndexV2,
coding: ShredIndexV2,
}

impl From<IndexNext> for Index {
fn from(index: IndexNext) -> Self {
impl From<IndexV2> for Index {
fn from(index: IndexV2) -> Self {
Index {
slot: index.slot,
data: index.data.into(),
Expand All @@ -132,9 +132,9 @@ impl From<IndexNext> for Index {
}
}

impl From<Index> for IndexNext {
impl From<Index> for IndexV2 {
fn from(index: Index) -> Self {
IndexNext {
IndexV2 {
slot: index.slot,
data: index.data.into(),
coding: index.coding.into(),
Expand Down Expand Up @@ -330,12 +330,12 @@ const MAX_U64S_PER_SLOT: usize = (MAX_DATA_SHREDS_PER_SLOT + 63) / 64;
/// - **Simplified Serialization**: The contiguous memory layout allows for efficient
/// serialization/deserialization without tree reconstruction.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ShredIndexNext {
pub struct ShredIndexV2 {
index: [u64; MAX_U64S_PER_SLOT],
num_shreds: usize,
}

impl Default for ShredIndexNext {
impl Default for ShredIndexV2 {
fn default() -> Self {
Self {
index: [0; MAX_U64S_PER_SLOT],
Expand All @@ -344,7 +344,7 @@ impl Default for ShredIndexNext {
}
}

impl Serialize for ShredIndexNext {
impl Serialize for ShredIndexV2 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
Expand Down Expand Up @@ -372,7 +372,7 @@ impl Serialize for ShredIndexNext {
}
}

impl<'de> Deserialize<'de> for ShredIndexNext {
impl<'de> Deserialize<'de> for ShredIndexV2 {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
Expand All @@ -393,7 +393,7 @@ impl<'de> Deserialize<'de> for ShredIndexNext {
}
}

impl ShredIndexNext {
impl ShredIndexV2 {
pub fn num_shreds(&self) -> usize {
self.num_shreds
}
Expand Down Expand Up @@ -546,24 +546,24 @@ impl ShredIndexNext {
}
}

impl FromIterator<u64> for ShredIndexNext {
impl FromIterator<u64> for ShredIndexV2 {
fn from_iter<T: IntoIterator<Item = u64>>(iter: T) -> Self {
let mut next_index = ShredIndexNext::default();
let mut next_index = ShredIndexV2::default();
for idx in iter {
next_index.insert(idx);
}
next_index
}
}

impl From<ShredIndex> for ShredIndexNext {
impl From<ShredIndex> for ShredIndexV2 {
fn from(value: ShredIndex) -> Self {
value.index.into_iter().collect()
}
}

impl From<ShredIndexNext> for ShredIndex {
fn from(value: ShredIndexNext) -> Self {
impl From<ShredIndexV2> for ShredIndex {
fn from(value: ShredIndexV2) -> Self {
ShredIndex {
index: value.iter().collect(),
}
Expand Down Expand Up @@ -957,9 +957,9 @@ mod test {

#[test]
fn shred_index_next_serde() {
let index: ShredIndexNext = (0..MAX_DATA_SHREDS_PER_SLOT as u64).skip(3).collect();
let index: ShredIndexV2 = (0..MAX_DATA_SHREDS_PER_SLOT as u64).skip(3).collect();
let serialized = bincode::serialize(&index).unwrap();
let deserialized = bincode::deserialize::<ShredIndexNext>(&serialized).unwrap();
let deserialized = bincode::deserialize::<ShredIndexV2>(&serialized).unwrap();
assert_eq!(index, deserialized);
}

Expand All @@ -972,18 +972,18 @@ mod test {
}
let serialized = bincode::serialize(&index).unwrap();
// Attempt to deserialize as `ShredIndexNext`
let deserialized = bincode::deserialize::<ShredIndexNext>(&serialized);
let deserialized = bincode::deserialize::<ShredIndexV2>(&serialized);
assert!(deserialized.is_err());
}

#[test]
fn shred_index_next_collision() {
let index = ShredIndexNext::default();
let index = ShredIndexV2::default();
let serialized = bincode::serialize(&index).unwrap();
let deserialized = bincode::deserialize::<ShredIndex>(&serialized);
assert!(deserialized.is_err());

let index: ShredIndexNext = (0..MAX_DATA_SHREDS_PER_SLOT as u64).skip(3).collect();
let index: ShredIndexV2 = (0..MAX_DATA_SHREDS_PER_SLOT as u64).skip(3).collect();
let serialized = bincode::serialize(&index).unwrap();
let deserialized = bincode::deserialize::<ShredIndex>(&serialized);
assert!(deserialized.is_err());
Expand All @@ -993,7 +993,7 @@ mod test {
fn shred_index_legacy_compat() {
use rand::Rng;
let mut legacy = ShredIndex::default();
let mut next_index = ShredIndexNext::default();
let mut next_index = ShredIndexV2::default();

for i in (0..MAX_DATA_SHREDS_PER_SLOT as u64).skip(3) {
next_index.insert(i);
Expand All @@ -1012,13 +1012,13 @@ mod test {
legacy.range(0..rand_range).sum::<u64>()
);

assert_eq!(ShredIndexNext::from(legacy.clone()), next_index);
assert_eq!(ShredIndexV2::from(legacy.clone()), next_index);
assert_eq!(ShredIndex::from(next_index), legacy);
}

#[test]
fn test_shred_index_next_boundary_conditions() {
let mut index = ShredIndexNext::default();
let mut index = ShredIndexV2::default();

// First possible index
index.insert(0);
Expand Down

0 comments on commit 1aaa647

Please sign in to comment.