Skip to content

Commit

Permalink
Refactor some tests
Browse files Browse the repository at this point in the history
  • Loading branch information
mininny committed Sep 13, 2024
1 parent 1dfb0a0 commit b47f409
Show file tree
Hide file tree
Showing 2 changed files with 168 additions and 151 deletions.
299 changes: 152 additions & 147 deletions rvgo/fast/memory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,129 +13,6 @@ import (
"github.com/stretchr/testify/require"
)

const (
smallDataset = 1_000
mediumDataset = 100_000
largeDataset = 1_000_000
)

func BenchmarkMemoryOperations(b *testing.B) {
benchmarks := []struct {
name string
fn func(b *testing.B, m *Memory)
}{
{"RandomReadWrite_Small", benchRandomReadWrite(smallDataset)},
{"RandomReadWrite_Medium", benchRandomReadWrite(mediumDataset)},
{"RandomReadWrite_Large", benchRandomReadWrite(largeDataset)},
{"SequentialReadWrite_Small", benchSequentialReadWrite(smallDataset)},
{"SequentialReadWrite_Large", benchSequentialReadWrite(largeDataset)},
{"SparseMemoryUsage", benchSparseMemoryUsage},
{"DenseMemoryUsage", benchDenseMemoryUsage},
{"SmallFrequentUpdates", benchSmallFrequentUpdates},
{"MerkleProofGeneration_Small", benchMerkleProofGeneration(smallDataset)},
{"MerkleProofGeneration_Large", benchMerkleProofGeneration(largeDataset)},
{"MerkleRootCalculation_Small", benchMerkleRootCalculation(smallDataset)},
{"MerkleRootCalculation_Large", benchMerkleRootCalculation(largeDataset)},
}

for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
m := NewMemory()
b.ResetTimer()
bm.fn(b, m)
})
}
}

func benchRandomReadWrite(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
addresses := make([]uint64, size)
for i := range addresses {
addresses[i] = mathrand.Uint64()
}
data := make([]byte, 8)

b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := addresses[i%len(addresses)]
if i%2 == 0 {
m.SetUnaligned(addr, data)
} else {
m.GetUnaligned(addr, data)
}
}
}
}

func benchSequentialReadWrite(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i % size)
if i%2 == 0 {
m.SetUnaligned(addr, data)
} else {
m.GetUnaligned(addr, data)
}
}
}
}

func benchSparseMemoryUsage(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i) * 1000000 // Large gaps between addresses
m.SetUnaligned(addr, data)
}
}

func benchDenseMemoryUsage(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i) * 8 // Contiguous 8-byte allocations
m.SetUnaligned(addr, data)
}
}

func benchSmallFrequentUpdates(b *testing.B, m *Memory) {
data := make([]byte, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := mathrand.Uint64() % 1000000 // Confined to a smaller range
m.SetUnaligned(addr, data)
}
}

func benchMerkleProofGeneration(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
// Setup: allocate some memory
for i := 0; i < size; i++ {
m.SetUnaligned(uint64(i)*8, []byte{byte(i)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(mathrand.Intn(size) * 8)
_ = m.MerkleProof(addr)
}
}
}

func benchMerkleRootCalculation(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
// Setup: allocate some memory
for i := 0; i < size; i++ {
m.SetUnaligned(uint64(i)*8, []byte{byte(i)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = m.MerkleRoot()
}
}
}

func TestMemoryMerkleProof(t *testing.T) {
t.Run("nearly empty tree", func(t *testing.T) {
m := NewMemory()
Expand Down Expand Up @@ -419,30 +296,35 @@ func TestMemoryMerkleRoot(t *testing.T) {
require.Equal(t, zeroHashes[64-5], root, "zero still")
})

//t.Run("random few pages", func(t *testing.T) {
// m := NewMemory()
// m.SetUnaligned(PageSize*3, []byte{1})
// m.SetUnaligned(PageSize*5, []byte{42})
// m.SetUnaligned(PageSize*6, []byte{123})
// p3 := m.MerkleizeNode(m.radix, (1<<PageKeySize)|3, 0)
// p5 := m.MerkleizeNode(m.radix, (1<<PageKeySize)|5, 0)
// p6 := m.MerkleizeNode(m.radix, (1<<PageKeySize)|6, 0)
// z := zeroHashes[PageAddrSize-5]
// r1 := HashPair(
// HashPair(
// HashPair(z, z), // 0,1
// HashPair(z, p3), // 2,3
// ),
// HashPair(
// HashPair(z, p5), // 4,5
// HashPair(p6, z), // 6,7
// ),
// )
// r2 := m.MerkleizeNode(m.radix, 1<<(PageKeySize-3), 0)
// r3 := m.MerkleizeNode3(m.radix, 1, 0)
// require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func")
// require.Equal(t, r3, r2, "expecting manual page combination to match subtree merkle func")
//})
t.Run("random few pages", func(t *testing.T) {
m := NewMemory()
m.SetUnaligned(PageSize*3, []byte{1})
m.SetUnaligned(PageSize*5, []byte{42})
m.SetUnaligned(PageSize*6, []byte{123})

p0 := m.MerkleizeNodeLevel1(m.radix, 0, 8)
p1 := m.MerkleizeNodeLevel1(m.radix, 0, 9)
p2 := m.MerkleizeNodeLevel1(m.radix, 0, 10)
p3 := m.MerkleizeNodeLevel1(m.radix, 0, 11)
p4 := m.MerkleizeNodeLevel1(m.radix, 0, 12)
p5 := m.MerkleizeNodeLevel1(m.radix, 0, 13)
p6 := m.MerkleizeNodeLevel1(m.radix, 0, 14)
p7 := m.MerkleizeNodeLevel1(m.radix, 0, 15)

r1 := HashPair(
HashPair(
HashPair(p0, p1), // 0,1
HashPair(p2, p3), // 2,3
),
HashPair(
HashPair(p4, p5), // 4,5
HashPair(p6, p7), // 6,7
),
)
r2 := m.MerkleizeNodeLevel1(m.radix, 0, 1)
require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func")
})

t.Run("invalidate page", func(t *testing.T) {
m := NewMemory()
m.SetUnaligned(0xF000, []byte{0})
Expand Down Expand Up @@ -517,3 +399,126 @@ func TestMemoryJSON(t *testing.T) {
m.GetUnaligned(8, dest[:])
require.Equal(t, uint8(123), dest[0])
}

const (
smallDataset = 1_000
mediumDataset = 100_000
largeDataset = 1_000_000
)

func BenchmarkMemoryOperations(b *testing.B) {
benchmarks := []struct {
name string
fn func(b *testing.B, m *Memory)
}{
{"RandomReadWrite_Small", benchRandomReadWrite(smallDataset)},
{"RandomReadWrite_Medium", benchRandomReadWrite(mediumDataset)},
{"RandomReadWrite_Large", benchRandomReadWrite(largeDataset)},
{"SequentialReadWrite_Small", benchSequentialReadWrite(smallDataset)},
{"SequentialReadWrite_Large", benchSequentialReadWrite(largeDataset)},
{"SparseMemoryUsage", benchSparseMemoryUsage},
{"DenseMemoryUsage", benchDenseMemoryUsage},
{"SmallFrequentUpdates", benchSmallFrequentUpdates},
{"MerkleProofGeneration_Small", benchMerkleProofGeneration(smallDataset)},
{"MerkleProofGeneration_Large", benchMerkleProofGeneration(largeDataset)},
{"MerkleRootCalculation_Small", benchMerkleRootCalculation(smallDataset)},
{"MerkleRootCalculation_Large", benchMerkleRootCalculation(largeDataset)},
}

for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
m := NewMemory()
b.ResetTimer()
bm.fn(b, m)
})
}
}

func benchRandomReadWrite(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
addresses := make([]uint64, size)
for i := range addresses {
addresses[i] = mathrand.Uint64()
}
data := make([]byte, 8)

b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := addresses[i%len(addresses)]
if i%2 == 0 {
m.SetUnaligned(addr, data)
} else {
m.GetUnaligned(addr, data)
}
}
}
}

func benchSequentialReadWrite(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i % size)
if i%2 == 0 {
m.SetUnaligned(addr, data)
} else {
m.GetUnaligned(addr, data)
}
}
}
}

func benchSparseMemoryUsage(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i) * 10_000_000 // Large gaps between addresses
m.SetUnaligned(addr, data)
}
}

func benchDenseMemoryUsage(b *testing.B, m *Memory) {
data := make([]byte, 8)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(i) * 8 // Contiguous 8-byte allocations
m.SetUnaligned(addr, data)
}
}

func benchSmallFrequentUpdates(b *testing.B, m *Memory) {
data := make([]byte, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := mathrand.Uint64() % 1000000 // Confined to a smaller range
m.SetUnaligned(addr, data)
}
}

func benchMerkleProofGeneration(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
// Setup: allocate some memory
for i := 0; i < size; i++ {
m.SetUnaligned(uint64(i)*8, []byte{byte(i)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
addr := uint64(mathrand.Intn(size) * 8)
_ = m.MerkleProof(addr)
}
}
}

func benchMerkleRootCalculation(size int) func(b *testing.B, m *Memory) {
return func(b *testing.B, m *Memory) {
// Setup: allocate some memory
for i := 0; i < size; i++ {
m.SetUnaligned(uint64(i)*8, []byte{byte(i)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = m.MerkleRoot()
}
}
}
20 changes: 16 additions & 4 deletions rvgo/fast/radix.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,6 @@ func (m *Memory) Invalidate(addr uint64) {
}

func (m *Memory) MerkleizeNodeLevel1(node *RadixNodeLevel1, addr, gindex uint64) [32]byte {

depth := uint64(bits.Len64(gindex))

if depth <= BF1 {
Expand All @@ -157,15 +156,18 @@ func (m *Memory) MerkleizeNodeLevel1(node *RadixNodeLevel1, addr, gindex uint64)

r := HashPair(left, right)
node.Hashes[gindex] = r
//node.HashExists[hashIndex] |= 1 << hashBit
node.HashValid[hashIndex] |= 1 << hashBit
return r
}
} else {
return zeroHashes[64-5+1-depth]
}
}

if depth > BF1<<1 {
panic("gindex too deep")

Check warning on line 168 in rvgo/fast/radix.go

View check run for this annotation

Codecov / codecov/patch

rvgo/fast/radix.go#L168

Added line #L168 was not covered by tests
}

childIndex := gindex - 1<<BF1
if node.Children[childIndex] == nil {
return zeroHashes[64-5+1-depth]
Expand Down Expand Up @@ -200,6 +202,10 @@ func (m *Memory) MerkleizeNodeLevel2(node *RadixNodeLevel2, addr, gindex uint64)
}
}

if depth > BF2<<1 {
panic("gindex too deep")

Check warning on line 206 in rvgo/fast/radix.go

View check run for this annotation

Codecov / codecov/patch

rvgo/fast/radix.go#L206

Added line #L206 was not covered by tests
}

childIndex := gindex - 1<<BF2
if node.Children[childIndex] == nil {
return zeroHashes[64-5+1-(depth+BF1)]
Expand Down Expand Up @@ -233,6 +239,10 @@ func (m *Memory) MerkleizeNodeLevel3(node *RadixNodeLevel3, addr, gindex uint64)
}
}

if depth > BF3<<1 {
panic("gindex too deep")

Check warning on line 243 in rvgo/fast/radix.go

View check run for this annotation

Codecov / codecov/patch

rvgo/fast/radix.go#L243

Added line #L243 was not covered by tests
}

childIndex := gindex - 1<<BF3
if node.Children[childIndex] == nil {
return zeroHashes[64-5+1-(depth+BF1+BF2)]
Expand Down Expand Up @@ -267,6 +277,10 @@ func (m *Memory) MerkleizeNodeLevel4(node *RadixNodeLevel4, addr, gindex uint64)
}
}

if depth > BF4<<1 {
panic("gindex too deep")

Check warning on line 281 in rvgo/fast/radix.go

View check run for this annotation

Codecov / codecov/patch

rvgo/fast/radix.go#L281

Added line #L281 was not covered by tests
}

childIndex := gindex - 1<<BF4
if node.Children[childIndex] == nil {
return zeroHashes[64-5+1-(depth+BF1+BF2+BF3)]
Expand Down Expand Up @@ -382,7 +396,6 @@ func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte {
proofIndex += BF2
levelProofs := m.GenerateProof2(currentLevel2, addr>>(PageAddrSize+BF5+BF4+BF3+BF2), branch2)
copy(proofs[60-proofIndex:60-proofIndex+BF2], levelProofs)

} else {
fillZeroHashes(proofs[:], 0, 60-proofIndex)
return encodeProofs(proofs)
Expand All @@ -395,7 +408,6 @@ func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte {
proofIndex += BF3
levelProofs := m.GenerateProof3(currentLevel3, addr>>(PageAddrSize+BF5+BF4+BF3), branch3)
copy(proofs[60-proofIndex:60-proofIndex+BF3], levelProofs)

} else {
fillZeroHashes(proofs[:], 0, 60-proofIndex)
return encodeProofs(proofs)
Expand Down

0 comments on commit b47f409

Please sign in to comment.