Skip to content

Commit

Permalink
Merge branch 'Layr-Labs:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
Inkvi authored Jan 27, 2025
2 parents da53b3d + 5530ac4 commit 5613557
Show file tree
Hide file tree
Showing 12 changed files with 349 additions and 189 deletions.
File renamed without changes.
319 changes: 193 additions & 126 deletions README.md

Large diffs are not rendered by default.

19 changes: 19 additions & 0 deletions SECURITY.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Security Policy

## Version Information

Please see [Releases](https://github.com/Layr-Labs/eigenda-proxy/releases)

## Audit reports

Audit reports are published in the `docs/audits` folder: https://github.com/Layr-Labs/eigenda-proxy/main/docs/audits

| Date | Report Link |
| ------- | ----------- |
| 202501 | [pdf](https://github.com/Layr-Labs/eigenda-proxy/blob/main/docs/audits/Sigma_Prime_EigenDA_Proxy_Security_Assessment_Report.pdf) |

## Reporting a Vulnerability

**Please do not file a public ticket** mentioning the vulnerability.

Please report security vulnerabilities to [email protected] with the all the relavent details included in the email.
8 changes: 8 additions & 0 deletions common/consts/consts.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
package consts

// EthHappyPathFinalizationDepth is the number of blocks that must be included on top of a block for it to be considered "final",
// under happy-path aka normal network conditions.
//
// See https://www.alchemy.com/overviews/ethereum-commitment-levels for a quick TLDR explanation,
// or https://eth2book.info/capella/part3/transition/epoch/#finalisation for full details.
var EthHappyPathFinalizationDepthBlocks = uint8(64)
11 changes: 8 additions & 3 deletions flags/eigendaflags/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@ package eigendaflags

import (
"fmt"
"log"
"strconv"
"time"

"github.com/Layr-Labs/eigenda-proxy/common/consts"
"github.com/Layr-Labs/eigenda/api/clients"
"github.com/Layr-Labs/eigenda/api/clients/codecs"
"github.com/urfave/cli/v2"
Expand Down Expand Up @@ -118,6 +118,7 @@ func CLIFlags(envPrefix, category string) []cli.Flag {
Category: category,
},
&cli.BoolFlag{
// This flag is DEPRECATED. Use ConfirmationDepthFlagName, which accept "finalization" or a number <64.
Name: WaitForFinalizationFlagName,
Usage: "Wait for blob finalization before returning from PutBlob.",
EnvVars: []string{withEnvPrefix(envPrefix, "WAIT_FOR_FINALIZATION")},
Expand Down Expand Up @@ -209,8 +210,12 @@ func validateConfirmationFlag(val string) error {
return fmt.Errorf("confirmation-depth must be either 'finalized' or a number, got: %s", val)
}

if depth >= 64 {
log.Printf("Warning: confirmation depth set to %d, which is > 2 epochs (64). Consider using 'finalized' instead.\n", depth)
if depth >= uint64(consts.EthHappyPathFinalizationDepthBlocks) {
// We keep this low (<128) to avoid requiring an archive node (see how this is used in CertVerifier).
// Note: assuming here that no sane person would ever need to set this to a number >64.
// But perhaps someone testing crazy reorg scenarios where finalization takes >2 epochs might want to set this to a higher number.
// Do keep in mind if you ever change this that it might affect a LOT of validators on your rollup who would now need an archival node.
return fmt.Errorf("confirmation depth set to %d, which is > 2 epochs (64). Use 'finalized' instead", depth)
}

return nil
Expand Down
Binary file added resources/payload-blob-poly-lifecycle.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added resources/sequence-diagram.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
8 changes: 4 additions & 4 deletions server/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func (svr *Server) handleGetStdCommitment(w http.ResponseWriter, r *http.Request
CertVersion: versionByte,
}

rawCommitmentHex, ok := mux.Vars(r)[routingVarNameRawCommitmentHex]
rawCommitmentHex, ok := mux.Vars(r)[routingVarNamePayloadHex]
if !ok {
return fmt.Errorf("commitment not found in path: %s", r.URL.Path)
}
Expand All @@ -63,7 +63,7 @@ func (svr *Server) handleGetOPKeccakCommitment(w http.ResponseWriter, r *http.Re
CertVersion: byte(commitments.CertV0),
}

rawCommitmentHex, ok := mux.Vars(r)[routingVarNameRawCommitmentHex]
rawCommitmentHex, ok := mux.Vars(r)[routingVarNamePayloadHex]
if !ok {
return fmt.Errorf("commitment not found in path: %s", r.URL.Path)
}
Expand All @@ -86,7 +86,7 @@ func (svr *Server) handleGetOPGenericCommitment(w http.ResponseWriter, r *http.R
CertVersion: versionByte,
}

rawCommitmentHex, ok := mux.Vars(r)[routingVarNameRawCommitmentHex]
rawCommitmentHex, ok := mux.Vars(r)[routingVarNamePayloadHex]
if !ok {
return fmt.Errorf("commitment not found in path: %s", r.URL.Path)
}
Expand Down Expand Up @@ -146,7 +146,7 @@ func (svr *Server) handlePostOPKeccakCommitment(w http.ResponseWriter, r *http.R
CertVersion: byte(commitments.CertV0),
}

rawCommitmentHex, ok := mux.Vars(r)[routingVarNameRawCommitmentHex]
rawCommitmentHex, ok := mux.Vars(r)[routingVarNamePayloadHex]
if !ok {
return fmt.Errorf("commitment not found in path: %s", r.URL.Path)
}
Expand Down
10 changes: 5 additions & 5 deletions server/routing.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import (
)

const (
routingVarNameRawCommitmentHex = "raw_commitment_hex"
routingVarNamePayloadHex = "payload_hex"
routingVarNameVersionByteHex = "version_byte_hex"
routingVarNameCommitTypeByteHex = "commit_type_byte_hex"
)
Expand All @@ -20,7 +20,7 @@ func (svr *Server) registerRoutes(r *mux.Router) {
subrouterGET.HandleFunc("/"+
"{optional_prefix:(?:0x)?}"+ // commitments can be prefixed with 0x
"{"+routingVarNameVersionByteHex+":[0-9a-fA-F]{2}}"+ // should always be 0x00 for now but we let others through to return a 404
"{"+routingVarNameRawCommitmentHex+":[0-9a-fA-F]*}",
"{"+routingVarNamePayloadHex+":[0-9a-fA-F]*}",
withLogging(withMetrics(svr.handleGetStdCommitment, svr.m, commitments.Standard), svr.log),
).Queries("commitment_mode", "standard")
// op keccak256 commitments (write to S3)
Expand All @@ -30,7 +30,7 @@ func (svr *Server) registerRoutes(r *mux.Router) {
// we don't use version_byte for keccak commitments, because not expecting keccak commitments to change,
// but perhaps we should (in case we want a v2 to use another hash for eg?)
// "{version_byte_hex:[0-9a-fA-F]{2}}"+ // should always be 0x00 for now but we let others through to return a 404
"{"+routingVarNameRawCommitmentHex+"}",
"{"+routingVarNamePayloadHex+"}",
withLogging(withMetrics(svr.handleGetOPKeccakCommitment, svr.m, commitments.OptimismKeccak), svr.log),
)
// op generic commitments (write to EigenDA)
Expand All @@ -39,7 +39,7 @@ func (svr *Server) registerRoutes(r *mux.Router) {
"{"+routingVarNameCommitTypeByteHex+":01}"+ // 01 for generic commitments
"{da_layer_byte:[0-9a-fA-F]{2}}"+ // should always be 0x00 for eigenDA but we let others through to return a 404
"{"+routingVarNameVersionByteHex+":[0-9a-fA-F]{2}}"+ // should always be 0x00 for now but we let others through to return a 404
"{"+routingVarNameRawCommitmentHex+"}",
"{"+routingVarNamePayloadHex+"}",
withLogging(withMetrics(svr.handleGetOPGenericCommitment, svr.m, commitments.OptimismGeneric), svr.log),
)
// unrecognized op commitment type (not 00 or 01)
Expand All @@ -66,7 +66,7 @@ func (svr *Server) registerRoutes(r *mux.Router) {
// we don't use version_byte for keccak commitments, because not expecting keccak commitments to change,
// but perhaps we should (in case we want a v2 to use another hash for eg?)
// "{version_byte_hex:[0-9a-fA-F]{2}}"+ // should always be 0x00 for now but we let others through to return a 404
"{"+routingVarNameRawCommitmentHex+"}",
"{"+routingVarNamePayloadHex+"}",
withLogging(withMetrics(svr.handlePostOPKeccakCommitment, svr.m, commitments.OptimismKeccak), svr.log),
)
// op generic commitments (write to EigenDA)
Expand Down
111 changes: 86 additions & 25 deletions verify/cert.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,11 @@ import (
"bytes"
"context"
"fmt"
"math"
"math/big"
"time"

"github.com/Layr-Labs/eigenda-proxy/common/consts"
"github.com/Layr-Labs/eigenda/api/grpc/disperser"
binding "github.com/Layr-Labs/eigenda/contracts/bindings/EigenDAServiceManager"

Expand All @@ -22,14 +24,27 @@ import (
// CertVerifier verifies the DA certificate against on-chain EigenDA contracts
// to ensure disperser returned fields haven't been tampered with
type CertVerifier struct {
l log.Logger
l log.Logger
// ethConfirmationDepth is used to verify that a blob's batch commitment has been bridged to the EigenDAServiceManager contract at least
// this many blocks in the past. To do so we make an eth_call to the contract at the current block_number - ethConfirmationDepth.
// Hence in order to not require an archive node, this value should be kept low. We force it to be < 64 (consts.EthHappyPathFinalizationDepthBlocks).
// waitForFinalization should be used instead of ethConfirmationDepth if the user wants to wait for finality (typically 64 blocks in happy case).
ethConfirmationDepth uint64
waitForFinalization bool
manager *binding.ContractEigenDAServiceManagerCaller
ethClient *ethclient.Client
// The two fields below are fetched from the EigenDAServiceManager contract in the constructor.
// They are used to verify the quorums in the received certificates.
// See getQuorumParametersAtLatestBlock for more details.
quorumsRequired []uint8
quorumAdversaryThresholds map[uint8]uint8
}

func NewCertVerifier(cfg *Config, l log.Logger) (*CertVerifier, error) {
if cfg.EthConfirmationDepth >= uint64(consts.EthHappyPathFinalizationDepthBlocks) {
// We keep this low (<128) to avoid requiring an archive node.
return nil, fmt.Errorf("confirmation depth must be less than 64; consider using cfg.WaitForFinalization=true instead")
}
log.Info("Enabling certificate verification", "confirmation_depth", cfg.EthConfirmationDepth)

client, err := ethclient.Dial(cfg.RPCURL)
Expand All @@ -43,11 +58,18 @@ func NewCertVerifier(cfg *Config, l log.Logger) (*CertVerifier, error) {
return nil, err
}

quorumsRequired, quorumAdversaryThresholds, err := getQuorumParametersAtLatestBlock(m)
if err != nil {
return nil, fmt.Errorf("failed to fetch quorum parameters from EigenDAServiceManager: %w", err)
}

return &CertVerifier{
l: l,
manager: m,
ethConfirmationDepth: cfg.EthConfirmationDepth,
ethClient: client,
l: l,
manager: m,
ethConfirmationDepth: cfg.EthConfirmationDepth,
ethClient: client,
quorumsRequired: quorumsRequired,
quorumAdversaryThresholds: quorumAdversaryThresholds,
}, nil
}

Expand All @@ -56,27 +78,22 @@ func NewCertVerifier(cfg *Config, l log.Logger) (*CertVerifier, error) {
func (cv *CertVerifier) verifyBatchConfirmedOnChain(
ctx context.Context, batchID uint32, batchMetadata *disperser.BatchMetadata,
) error {
// 1. Verify batch is actually onchain at the batchMetadata's state confirmedBlockNumber.
// This is super unlikely if the disperser is honest, but it could technically happen that a confirmed batch's block gets reorged out,
// yet the tx is included in an earlier or later block, making the batchMetadata received from the disperser
// no longer valid. The eigenda batcher does check for these reorgs and updates the batch's confirmation block number:
// https://github.com/Layr-Labs/eigenda/blob/bee55ed9207f16153c3fd8ebf73c219e68685def/disperser/batcher/finalizer.go#L198
// TODO: We could require the disperser for the new batch, or try to reconstruct it ourselves by querying the chain,
// but for now we opt to simply fail the verification, which will force the batcher to resubmit the batch to eigenda.
confirmationBlockNumber := batchMetadata.GetConfirmationBlockNumber()
confirmationBlockNumberBigInt := big.NewInt(0).SetInt64(int64(confirmationBlockNumber))
_, err := cv.retrieveBatchMetadataHash(ctx, batchID, confirmationBlockNumberBigInt)
if err != nil {
return fmt.Errorf("batch not found onchain at supposedly confirmed block %d: %w", confirmationBlockNumber, err)
}

// 2. Verify that the confirmation status has been reached.
// 1. Verify that the confirmation status has been reached.
// The eigenda-client already checks for this, but it is possible for either
// 1. a reorg to happen, causing the batch to be confirmed by fewer number of blocks than required
// 2. proxy's node is behind the eigenda_client's node that deemed the batch confirmed, or
// even if we use the same url, that the connection drops and we get load-balanced to a different eth node.
// We retry up to 60 seconds (allowing for reorgs up to 5 blocks deep), but we only wait 3 seconds between each retry,
// in case (2) is the case and the node simply needs to resync, which could happen fast.
//
// Note that we don't verify that the batch is actually onchain at the batchMetadata's state confirmedBlockNumber, because that would require an archive node.
// This is super unlikely if the disperser is honest, but it could technically happen that a confirmed batch's block gets reorged out,
// yet the tx is included in an earlier or later block, making the batchMetadata received from the disperser
// no longer valid. The eigenda batcher does check for these reorgs and updates the batch's confirmation block number:
// https://github.com/Layr-Labs/eigenda/blob/bee55ed9207f16153c3fd8ebf73c219e68685def/disperser/batcher/finalizer.go#L198
// confirmedBlockNum currentBlock-confirmationDepth currentBlock
// | (don't verify here, need archive node) | (verify here) |
// +-----------------------------------------------------------+-----------------------------+
onchainHash, err := retry.Do(ctx, 20, retry.Fixed(3*time.Second), func() ([32]byte, error) {
blockNumber, err := cv.getConfDeepBlockNumber(ctx)
if err != nil {
Expand All @@ -85,23 +102,23 @@ func (cv *CertVerifier) verifyBatchConfirmedOnChain(
return cv.retrieveBatchMetadataHash(ctx, batchID, blockNumber)
})
if err != nil {
return fmt.Errorf("retrieving batch that was confirmed at block %v: %w", confirmationBlockNumber, err)
return fmt.Errorf("retrieving batch that was confirmed at block %v: %w", batchMetadata.GetConfirmationBlockNumber(), err)
}

// 3. Compute the hash of the batch metadata received as argument.
// 2. Compute the hash of the batch metadata received as argument.
header := &binding.IEigenDAServiceManagerBatchHeader{
BlobHeadersRoot: [32]byte(batchMetadata.GetBatchHeader().GetBatchRoot()),
QuorumNumbers: batchMetadata.GetBatchHeader().GetQuorumNumbers(),
ReferenceBlockNumber: batchMetadata.GetBatchHeader().GetReferenceBlockNumber(),
SignedStakeForQuorums: batchMetadata.GetBatchHeader().GetQuorumSignedPercentages(),
}
recordHash := [32]byte(batchMetadata.GetSignatoryRecordHash())
computedHash, err := HashBatchMetadata(header, recordHash, confirmationBlockNumber)
computedHash, err := HashBatchMetadata(header, recordHash, batchMetadata.GetConfirmationBlockNumber())
if err != nil {
return fmt.Errorf("failed to hash batch metadata: %w", err)
}

// 4. Ensure that hash generated from local cert matches one stored on-chain.
// 3. Ensure that hash generated from local cert matches one stored on-chain.
equal := slices.Equal(onchainHash[:], computedHash[:])
if !equal {
return fmt.Errorf("batch hash mismatch, onchain: %x, computed: %x", onchainHash, computedHash)
Expand Down Expand Up @@ -155,7 +172,10 @@ func (cv *CertVerifier) getConfDeepBlockNumber(ctx context.Context) (*big.Int, e
}

// retrieveBatchMetadataHash retrieves the batch metadata hash stored on-chain at a specific blockNumber for a given batchID
// returns an error if some problem calling the contract happens, or the hash is not found
// returns an error if some problem calling the contract happens, or the hash is not found.
// We make an eth_call to the EigenDAServiceManager at the given blockNumber to retrieve the hash.
// Therefore, make sure that blockNumber is <128 blocks behind the latest block, to avoid requiring an archive node.
// This is currently enforced by having EthConfirmationDepth be <64.
func (cv *CertVerifier) retrieveBatchMetadataHash(ctx context.Context, batchID uint32, blockNumber *big.Int) ([32]byte, error) {
onchainHash, err := cv.manager.BatchIdToBatchMetadataHash(&bind.CallOpts{Context: ctx, BlockNumber: blockNumber}, batchID)
if err != nil {
Expand All @@ -166,3 +186,44 @@ func (cv *CertVerifier) retrieveBatchMetadataHash(ctx context.Context, batchID u
}
return onchainHash, nil
}

// getQuorumParametersAtLatestBlock fetches the required quorums and quorum adversary thresholds
// from the EigenDAServiceManager contract at the latest block.
// We then cache these parameters and use them in the Verifier to verify the certificates.
//
// Note: this strategy (fetching once and caching) only works because these parameters are immutable.
// They might be different in different environments (e.g. on a devnet or testnet), but they are fixed on a given network.
// We used to allow these parameters to change (via a setter function on the contract), but that then forced us here in the proxy
// to query for these parameters on every request, at the batch's reference block number (RBN).
// This in turn required rollup validators running this proxy to have an archive node, in case the RBN was >128 blocks in the past,
// which was not ideal. So we decided to make these parameters immutable, and cache them here.
func getQuorumParametersAtLatestBlock(
manager *binding.ContractEigenDAServiceManagerCaller,
) ([]uint8, map[uint8]uint8, error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
requiredQuorums, err := manager.QuorumNumbersRequired(&bind.CallOpts{Context: ctx})
if err != nil {
return nil, nil, fmt.Errorf("failed to fetch QuorumNumbersRequired from EigenDAServiceManager: %w", err)
}
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
thresholds, err := manager.QuorumAdversaryThresholdPercentages(&bind.CallOpts{Context: ctx})
if err != nil {
return nil, nil, fmt.Errorf("failed to fetch QuorumAdversaryThresholdPercentages from EigenDAServiceManager: %w", err)
}
if len(thresholds) > math.MaxUint8 {
return nil, nil, fmt.Errorf("thresholds received from EigenDAServiceManager contains %d > 256 quorums, which isn't possible", len(thresholds))
}
var quorumAdversaryThresholds = make(map[uint8]uint8)
for quorumNum, threshold := range thresholds {
quorumAdversaryThresholds[uint8(quorumNum)] = threshold //nolint:gosec // disable G115 // We checked the length of thresholds above
}
// Sanity check: ensure that the required quorums are a subset of the quorums for which we have adversary thresholds
for _, quorum := range requiredQuorums {
if _, ok := quorumAdversaryThresholds[quorum]; !ok {
return nil, nil, fmt.Errorf("required quorum %d does not have an adversary threshold. Was the EigenDAServiceManager properly deployed?", quorum)
}
}
return requiredQuorums, quorumAdversaryThresholds, nil
}
Loading

0 comments on commit 5613557

Please sign in to comment.