diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index 51e6086..e4632bf 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -167,10 +167,10 @@ func TestMemoryMerkleProof(t *testing.T) { t.Run("large addresses", func(t *testing.T) { m := NewMemory() addresses := []uint64{ - 0x3FFFFFFFFFFFC, - 0x3FFFFFFFFFFFD, - 0x3FFFFFFFFFFFE, - 0x3FFFFFFFFFFF, + 0x10_00_00_00_00_00_00_00, + 0x10_00_00_00_00_00_00_02, + 0x10_00_00_00_00_00_00_04, + 0x10_00_00_00_00_00_00_06, } for i, addr := range addresses { m.SetUnaligned(addr, []byte{byte(i + 1)}) @@ -182,6 +182,70 @@ func TestMemoryMerkleProof(t *testing.T) { } }) } +func TestMerkleProofWithPartialPaths(t *testing.T) { + testCases := []struct { + name string + setupMemory func(*Memory) + proofAddr uint64 + }{ + { + name: "Path ends at level 1", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_00_00_00_00_00_00_00, []byte{1}) + }, + proofAddr: 0x20_00_00_00_00_00_00_00, + }, + { + name: "Path ends at level 2", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_00_00_00_00_00_00_00, []byte{1}) + }, + proofAddr: 0x11_00_00_00_00_00_00_00, + }, + { + name: "Path ends at level 3", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_10_00_00_00_00_00_00, []byte{1}) + }, + proofAddr: 0x10_11_00_00_00_00_00_00, + }, + { + name: "Path ends at level 4", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_10_10_00_00_00_00_00, []byte{1}) + }, + proofAddr: 0x10_10_11_00_00_00_00_00, + }, + { + name: "Full path to level 5, page doesn't exist", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_10_10_10_00_00_00_00, []byte{1}) + }, + proofAddr: 0x10_10_10_10_10_00_00_00, // Different page in the same level 5 node + }, + { + name: "Path ends at level 3, check different page offsets", + setupMemory: func(m *Memory) { + m.SetUnaligned(0x10_10_00_00_00_00_00_00, []byte{1}) + m.SetUnaligned(0x10_10_00_00_00_00_10_00, []byte{2}) + }, + proofAddr: 0x10_10_00_00_00_00_20_00, // Different offset in the same page + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + m := NewMemory() + tc.setupMemory(m) + + proof := m.MerkleProof(tc.proofAddr) + + // Check that the proof is filled correctly + verifyProof(t, m.MerkleRoot(), proof, tc.proofAddr) + //checkProof(t, proof, tc.expectedDepth) + }) + } +} func verifyProof(t *testing.T, expectedRoot [32]byte, proof [ProofLen * 32]byte, addr uint64) { node := *(*[32]byte)(proof[:32]) diff --git a/rvgo/fast/radix.go b/rvgo/fast/radix.go index 9bbec1e..e7c3e81 100644 --- a/rvgo/fast/radix.go +++ b/rvgo/fast/radix.go @@ -13,20 +13,6 @@ const ( BF5 = 12 ) -type RadixNode interface { - merkleize(m *Memory, addr, gindex uint64) [32]byte - //getChild(index uint64) RadixNode - //setChild(index uint64, child RadixNode) - invalidateHashes(branch uint64) -} - -//func (n *baseRadixNode) invalidateHashes(branch uint64) { -// for index := branch + (1 << 10); index > 0; index /= 2 { -// n.HashCache[index] = false -// n.Hashes[index] = [32]byte{} -// } -//} - type RadixNodeLevel1 struct { Children [1 << BF1]*RadixNodeLevel2 Hashes [2 * 1 << BF1][32]byte @@ -171,6 +157,9 @@ func (m *Memory) MerkleizeNodeLevel2(node *RadixNodeLevel2, addr, gindex uint64) } depth := uint64(bits.Len64(gindex)) + if node == nil { + return zeroHashes[64-5+1-depth] + } if node.HashCache[gindex] { if node.Hashes[gindex] == [32]byte{} { @@ -350,38 +339,43 @@ func (m *Memory) GenerateProof5(node *RadixNodeLevel5, addr, target uint64) [][3 return proofs } + func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { var proofs [60][32]byte - proofIndex := 0 // Start from the beginning, as we're building the proof from page to root branchPaths := m.addressToBranchPath(addr) - // Page-level proof - pageGindex := PageSize>>5 + (addr&PageAddrMask)>>5 - pageIndex := addr >> PageAddrSize + // Level 1 + proofIndex := BF1 + currentLevel1 := m.radix + branch1 := branchPaths[0] + + levelProofs := m.GenerateProof1(currentLevel1, 0, branch1) + copy(proofs[60-proofIndex:60], levelProofs) + + // Level 2 + currentLevel2 := m.radix.Children[branchPaths[0]] + if currentLevel2 != nil { + branch2 := branchPaths[1] + proofIndex += BF2 + levelProofs := m.GenerateProof2(currentLevel2, addr>>(PageAddrSize+BF5+BF4+BF3+BF2), branch2) + copy(proofs[60-proofIndex:60-proofIndex+BF2], levelProofs) - if p, ok := m.pages[pageIndex]; ok { - proofs[proofIndex] = p.MerkleizeSubtree(pageGindex) - proofIndex++ - for idx := pageGindex; idx > 1; idx /= 2 { - sibling := idx ^ 1 - proofs[proofIndex] = p.MerkleizeSubtree(uint64(sibling)) - proofIndex++ - } } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+7, 12) - proofIndex += 8 + fillZeroHashes(proofs[:], 0, 60-proofIndex) + return encodeProofs(proofs) } - // Level 5 - currentLevel5 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]].Children[branchPaths[3]] - if currentLevel5 != nil { - branch5 := branchPaths[4] - levelProofs := m.GenerateProof5(currentLevel5, addr>>(pageKeySize-BF1-BF2-BF3-BF4), branch5) - copy(proofs[proofIndex:proofIndex+12], levelProofs) - proofIndex += 12 + // Level 3 + currentLevel3 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]] + if currentLevel3 != nil { + branch3 := branchPaths[2] + proofIndex += BF3 + levelProofs := m.GenerateProof3(currentLevel3, addr>>(PageAddrSize+BF5+BF4+BF3), branch3) + copy(proofs[60-proofIndex:60-proofIndex+BF3], levelProofs) + } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 22) + fillZeroHashes(proofs[:], 0, 60-proofIndex) return encodeProofs(proofs) } @@ -389,50 +383,52 @@ func (m *Memory) MerkleProof(addr uint64) [ProofLen * 32]byte { currentLevel4 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]] if currentLevel4 != nil { branch4 := branchPaths[3] - levelProofs := m.GenerateProof4(currentLevel4, addr>>(pageKeySize-BF1-BF2-BF3), branch4) - copy(proofs[proofIndex:proofIndex+10], levelProofs) - proofIndex += 10 + levelProofs := m.GenerateProof4(currentLevel4, addr>>(PageAddrSize+BF5+BF4), branch4) + proofIndex += BF4 + copy(proofs[60-proofIndex:60-proofIndex+BF4], levelProofs) } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 32) + fillZeroHashes(proofs[:], 0, 60-proofIndex) return encodeProofs(proofs) } - // Level 3 - currentLevel3 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]] - if currentLevel3 != nil { - branch3 := branchPaths[2] - levelProofs := m.GenerateProof3(currentLevel3, addr>>(pageKeySize-BF1-BF2), branch3) - copy(proofs[proofIndex:proofIndex+10], levelProofs) - proofIndex += 10 + // Level 5 + currentLevel5 := m.radix.Children[branchPaths[0]].Children[branchPaths[1]].Children[branchPaths[2]].Children[branchPaths[3]] + if currentLevel5 != nil { + branch5 := branchPaths[4] + levelProofs := m.GenerateProof5(currentLevel5, addr>>(PageAddrSize+BF5), branch5) + proofIndex += BF5 + copy(proofs[60-proofIndex:60-proofIndex+BF5], levelProofs) } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 42) + fillZeroHashes(proofs[:], 0, 60-proofIndex) return encodeProofs(proofs) } - // Level 2 - currentLevel2 := m.radix.Children[branchPaths[0]] - if currentLevel2 != nil { - branch2 := branchPaths[1] - levelProofs := m.GenerateProof2(currentLevel2, addr>>(pageKeySize-BF1), branch2) - copy(proofs[proofIndex:proofIndex+10], levelProofs) - proofIndex += 10 + // Page-level proof + pageGindex := PageSize>>5 + (addr&PageAddrMask)>>5 + pageIndex := addr >> PageAddrSize + + proofIndex = 0 + if p, ok := m.pages[pageIndex]; ok { + proofs[proofIndex] = p.MerkleizeSubtree(pageGindex) + for idx := pageGindex; idx > 1; idx /= 2 { + sibling := idx ^ 1 + proofIndex++ + proofs[proofIndex] = p.MerkleizeSubtree(uint64(sibling)) + } } else { - fillZeroHashes(proofs[:], proofIndex, proofIndex+9, 52) - return encodeProofs(proofs) + fillZeroHashes(proofs[:], 0, 7) } - // Level 1 - currentLevel1 := m.radix - branch1 := branchPaths[0] - levelProofs := m.GenerateProof1(currentLevel1, 0, branch1) - copy(proofs[proofIndex:proofIndex+10], levelProofs) - return encodeProofs(proofs) } -func fillZeroHashes(proofs [][32]byte, start, end int, startingBitDepth int) { +func fillZeroHashes(proofs [][32]byte, start, end int) { + if start == 0 { + proofs[0] = zeroHashes[0] + start++ + } for i := start; i <= end; i++ { - proofs[i] = zeroHashes[startingBitDepth-(i-start)] + proofs[i] = zeroHashes[i-1] } } @@ -472,24 +468,31 @@ func (m *Memory) AllocPage(pageIndex uint64) *CachedPage { if currentLevel1.Children[branch1] == nil { currentLevel1.Children[branch1] = &RadixNodeLevel2{} } + currentLevel1.invalidateHashes(branchPaths[0]) currentLevel2 := currentLevel1.Children[branch1] branch2 := branchPaths[1] if currentLevel2.Children[branch2] == nil { currentLevel2.Children[branch2] = &RadixNodeLevel3{} } + currentLevel2.invalidateHashes(branchPaths[1]) currentLevel3 := currentLevel2.Children[branch2] branch3 := branchPaths[2] if currentLevel3.Children[branch3] == nil { currentLevel3.Children[branch3] = &RadixNodeLevel4{} } + currentLevel3.invalidateHashes(branchPaths[2]) currentLevel4 := currentLevel3.Children[branch3] branch4 := branchPaths[3] if currentLevel4.Children[branch4] == nil { currentLevel4.Children[branch4] = &RadixNodeLevel5{} } + currentLevel4.invalidateHashes(branchPaths[3]) + + currentLevel5 := currentLevel4.Children[branchPaths[3]] + currentLevel5.invalidateHashes(branchPaths[4]) // For Level 5, we don't need to allocate a child node