From 8c5a62cd74542fd5400b38c452bcfc50c1ec1bc8 Mon Sep 17 00:00:00 2001 From: Andrew Gouin Date: Thu, 4 Jan 2024 22:16:41 -0700 Subject: [PATCH 1/7] Add ics20 memo limit --- cmd/config.go | 1 + cmd/start.go | 1 + cmd/tx.go | 4 +- .../chains/mock/mock_chain_processor_test.go | 2 +- relayer/channel.go | 3 + relayer/connection.go | 1 + relayer/processor/path_end_runtime.go | 62 +++++++++++++++++-- relayer/processor/path_processor.go | 9 ++- relayer/processor/path_processor_internal.go | 4 +- relayer/strategies.go | 4 ++ 10 files changed, 78 insertions(+), 13 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index a6337ed94..2c7c17330 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -493,6 +493,7 @@ type GlobalConfig struct { Memo string `yaml:"memo" json:"memo"` LightCacheSize int `yaml:"light-cache-size" json:"light-cache-size"` LogLevel string `yaml:"log-level" json:"log-level"` + ICS20MemoLimit int `yaml:"ics20-memo-limit" json:"ics20-memo-limit"` } // newDefaultGlobalConfig returns a global config with defaults set diff --git a/cmd/start.go b/cmd/start.go index 3527de993..16375dbfa 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -160,6 +160,7 @@ $ %s start demo-path2 --max-tx-size 10`, appName, appName, appName, appName)), chains, paths, maxMsgLength, + a.config.Global.ICS20MemoLimit, a.config.memo(cmd), clientUpdateThresholdTime, flushInterval, diff --git a/cmd/tx.go b/cmd/tx.go index fba474708..4012e4ac5 100644 --- a/cmd/tx.go +++ b/cmd/tx.go @@ -4,10 +4,11 @@ import ( "context" "errors" "fmt" - "github.com/avast/retry-go/v4" "strings" "time" + "github.com/avast/retry-go/v4" + sdk "github.com/cosmos/cosmos-sdk/types" chantypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" "github.com/cosmos/relayer/v2/relayer" @@ -933,6 +934,7 @@ $ %s tx flush demo-path channel-0`, chains, paths, maxMsgLength, + a.config.Global.ICS20MemoLimit, a.config.memo(cmd), 0, 0, diff --git a/relayer/chains/mock/mock_chain_processor_test.go b/relayer/chains/mock/mock_chain_processor_test.go index 99440eee7..fa6099cac 100644 --- a/relayer/chains/mock/mock_chain_processor_test.go +++ b/relayer/chains/mock/mock_chain_processor_test.go @@ -63,7 +63,7 @@ func TestMockChainAndPathProcessors(t *testing.T) { flushInterval := 6 * time.Hour pathProcessor := processor.NewPathProcessor(log, pathEnd1, pathEnd2, metrics, "", - clientUpdateThresholdTime, flushInterval, relayer.DefaultMaxMsgLength) + clientUpdateThresholdTime, flushInterval, relayer.DefaultMaxMsgLength, 0) eventProcessor := processor.NewEventProcessor(). WithChainProcessors( diff --git a/relayer/channel.go b/relayer/channel.go index f8d0ba8f7..5f1a16fa8 100644 --- a/relayer/channel.go +++ b/relayer/channel.go @@ -60,6 +60,7 @@ func (c *Chain) CreateOpenChannels( DefaultClientUpdateThreshold, DefaultFlushInterval, DefaultMaxMsgLength, + 0, ) c.log.Info("Starting event processor for channel handshake", @@ -133,6 +134,7 @@ func (c *Chain) CloseChannel( DefaultClientUpdateThreshold, DefaultFlushInterval, DefaultMaxMsgLength, + 0, )). WithInitialBlockHistory(0). WithMessageLifecycle(&processor.FlushLifecycle{}). @@ -171,6 +173,7 @@ func (c *Chain) CloseChannel( DefaultClientUpdateThreshold, DefaultFlushInterval, DefaultMaxMsgLength, + 0, )). WithInitialBlockHistory(0). WithMessageLifecycle(&processor.ChannelCloseLifecycle{ diff --git a/relayer/connection.go b/relayer/connection.go index ba51d3939..823d60053 100644 --- a/relayer/connection.go +++ b/relayer/connection.go @@ -41,6 +41,7 @@ func (c *Chain) CreateOpenConnections( DefaultClientUpdateThreshold, DefaultFlushInterval, DefaultMaxMsgLength, + 0, ) var connectionSrc, connectionDst string diff --git a/relayer/processor/path_end_runtime.go b/relayer/processor/path_end_runtime.go index c6c21ee2d..762e8834c 100644 --- a/relayer/processor/path_end_runtime.go +++ b/relayer/processor/path_end_runtime.go @@ -2,9 +2,11 @@ package processor import ( "context" + "fmt" "sync" "time" + transfertypes "github.com/cosmos/ibc-go/v8/modules/apps/transfer/types" clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" conntypes "github.com/cosmos/ibc-go/v8/modules/core/03-connection/types" chantypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" @@ -98,10 +100,35 @@ func (pathEnd *pathEndRuntime) isRelevantChannel(channelID string) bool { return false } +// checkMemoLimit returns an error if the packet memo exceeds the configured limit. +func checkMemoLimit(packetData []byte, memoLimit int) error { + if memoLimit == 0 { + // no limit + return nil + } + + var packet transfertypes.FungibleTokenPacketData + if err := transfertypes.ModuleCdc.Unmarshal(packetData, &packet); err != nil { + // not an ICS-20 packet + return nil + } + + if len(packet.Memo) > int(memoLimit) { + return fmt.Errorf("packet memo size: %d exceeds limit: %d", len(packet.Memo), memoLimit) + } + + return nil +} + // mergeMessageCache merges relevant IBC messages for packet flows, connection handshakes, and channel handshakes. // inSync indicates whether both involved ChainProcessors are in sync or not. When true, the observed packets // metrics will be counted so that observed vs relayed packets can be compared. -func (pathEnd *pathEndRuntime) mergeMessageCache(messageCache IBCMessagesCache, counterpartyChainID string, inSync bool) { +func (pathEnd *pathEndRuntime) mergeMessageCache( + messageCache IBCMessagesCache, + counterpartyChainID string, + inSync bool, + memoLimit int, +) { packetMessages := make(ChannelPacketMessagesCache) connectionHandshakeMessages := make(ConnectionMessagesCache) channelHandshakeMessages := make(ChannelMessagesCache) @@ -109,14 +136,28 @@ func (pathEnd *pathEndRuntime) mergeMessageCache(messageCache IBCMessagesCache, for ch, pmc := range messageCache.PacketFlow { if pathEnd.ShouldRelayChannel(ChainChannelKey{ChainID: pathEnd.info.ChainID, CounterpartyChainID: counterpartyChainID, ChannelKey: ch}) { - if inSync && pathEnd.metrics != nil { - for eventType, pCache := range pmc { + newPmc := make(PacketMessagesCache) + for eventType, pCache := range pmc { + if inSync && pathEnd.metrics != nil { pathEnd.metrics.AddPacketsObserved(pathEnd.info.PathName, pathEnd.info.ChainID, ch.ChannelID, ch.PortID, eventType, len(pCache)) } + newPc := make(PacketSequenceCache) + for seq, p := range pCache { + if err := checkMemoLimit(p.Data, memoLimit); err != nil { + pathEnd.log.Warn("Ignoring packet", zap.Error(err)) + continue + } + + newPc[seq] = p + } + if len(newPc) > 0 { + newPmc[eventType] = newPc + } } - packetMessages[ch] = pmc + packetMessages[ch] = newPmc } } + pathEnd.messageCache.PacketFlow.Merge(packetMessages) for eventType, cmc := range messageCache.ConnectionHandshake { @@ -370,7 +411,16 @@ func (pathEnd *pathEndRuntime) checkForMisbehaviour( return true, nil } -func (pathEnd *pathEndRuntime) mergeCacheData(ctx context.Context, cancel func(), d ChainProcessorCacheData, counterpartyChainID string, counterpartyInSync bool, messageLifecycle MessageLifecycle, counterParty *pathEndRuntime) { +func (pathEnd *pathEndRuntime) mergeCacheData( + ctx context.Context, + cancel func(), + d ChainProcessorCacheData, + counterpartyChainID string, + counterpartyInSync bool, + messageLifecycle MessageLifecycle, + counterParty *pathEndRuntime, + memoLimit int, +) { pathEnd.lastClientUpdateHeightMu.Lock() pathEnd.latestBlock = d.LatestBlock pathEnd.lastClientUpdateHeightMu.Unlock() @@ -409,7 +459,7 @@ func (pathEnd *pathEndRuntime) mergeCacheData(ctx context.Context, cancel func() pathEnd.channelStateCache = d.ChannelStateCache // Update latest channel open state for chain pathEnd.channelStateCacheMu.Unlock() - pathEnd.mergeMessageCache(d.IBCMessagesCache, counterpartyChainID, pathEnd.inSync && counterpartyInSync) // Merge incoming packet IBC messages into the backlog + pathEnd.mergeMessageCache(d.IBCMessagesCache, counterpartyChainID, pathEnd.inSync && counterpartyInSync, memoLimit) // Merge incoming packet IBC messages into the backlog pathEnd.ibcHeaderCache.Merge(d.IBCHeaderCache) // Update latest IBC header state pathEnd.ibcHeaderCache.Prune(ibcHeadersToCache) // Only keep most recent IBC headers diff --git a/relayer/processor/path_processor.go b/relayer/processor/path_processor.go index c6a284bf7..0d43511be 100644 --- a/relayer/processor/path_processor.go +++ b/relayer/processor/path_processor.go @@ -79,7 +79,8 @@ type PathProcessor struct { // true if this is a localhost IBC connection isLocalhost bool - maxMsgs uint64 + maxMsgs uint64 + memoLimit int metrics *PrometheusMetrics } @@ -105,6 +106,7 @@ func NewPathProcessor( clientUpdateThresholdTime time.Duration, flushInterval time.Duration, maxMsgs uint64, + memoLimit int, ) *PathProcessor { isLocalhost := pathEnd1.ClientID == ibcexported.LocalhostClientID @@ -119,6 +121,7 @@ func NewPathProcessor( metrics: metrics, isLocalhost: isLocalhost, maxMsgs: maxMsgs, + memoLimit: memoLimit, } if flushInterval == 0 { pp.disablePeriodicFlush() @@ -319,11 +322,11 @@ func (pp *PathProcessor) processAvailableSignals(ctx context.Context, cancel fun return true case d := <-pp.pathEnd1.incomingCacheData: // we have new data from ChainProcessor for pathEnd1 - pp.pathEnd1.mergeCacheData(ctx, cancel, d, pp.pathEnd2.info.ChainID, pp.pathEnd2.inSync, pp.messageLifecycle, pp.pathEnd2) + pp.pathEnd1.mergeCacheData(ctx, cancel, d, pp.pathEnd2.info.ChainID, pp.pathEnd2.inSync, pp.messageLifecycle, pp.pathEnd2, pp.memoLimit) case d := <-pp.pathEnd2.incomingCacheData: // we have new data from ChainProcessor for pathEnd2 - pp.pathEnd2.mergeCacheData(ctx, cancel, d, pp.pathEnd1.info.ChainID, pp.pathEnd1.inSync, pp.messageLifecycle, pp.pathEnd1) + pp.pathEnd2.mergeCacheData(ctx, cancel, d, pp.pathEnd1.info.ChainID, pp.pathEnd1.inSync, pp.messageLifecycle, pp.pathEnd1, pp.memoLimit) case <-pp.retryProcess: // No new data to merge in, just retry handling. diff --git a/relayer/processor/path_processor_internal.go b/relayer/processor/path_processor_internal.go index 1f2c627bb..4ec0dcf94 100644 --- a/relayer/processor/path_processor_internal.go +++ b/relayer/processor/path_processor_internal.go @@ -1493,8 +1493,8 @@ func (pp *PathProcessor) flush(ctx context.Context) error { return fmt.Errorf("failed to enqueue pending messages for flush: %w", err) } - pp.pathEnd1.mergeMessageCache(pathEnd1Cache, pp.pathEnd2.info.ChainID, pp.pathEnd2.inSync) - pp.pathEnd2.mergeMessageCache(pathEnd2Cache, pp.pathEnd1.info.ChainID, pp.pathEnd1.inSync) + pp.pathEnd1.mergeMessageCache(pathEnd1Cache, pp.pathEnd2.info.ChainID, pp.pathEnd2.inSync, pp.memoLimit) + pp.pathEnd2.mergeMessageCache(pathEnd2Cache, pp.pathEnd1.info.ChainID, pp.pathEnd1.inSync, pp.memoLimit) if len(skipped) > 0 { skippedPacketsString := "" diff --git a/relayer/strategies.go b/relayer/strategies.go index 704e2aa25..9363f2997 100644 --- a/relayer/strategies.go +++ b/relayer/strategies.go @@ -39,6 +39,7 @@ func StartRelayer( chains map[string]*Chain, paths []NamedPath, maxMsgLength uint64, + memoLimit int, memo string, clientUpdateThresholdTime time.Duration, flushInterval time.Duration, @@ -99,6 +100,7 @@ func StartRelayer( ePaths, initialBlockHistory, maxMsgLength, + memoLimit, memo, messageLifecycle, clientUpdateThresholdTime, @@ -154,6 +156,7 @@ func relayerStartEventProcessor( paths []path, initialBlockHistory uint64, maxMsgLength uint64, + memoLimit int, memo string, messageLifecycle processor.MessageLifecycle, clientUpdateThresholdTime time.Duration, @@ -179,6 +182,7 @@ func relayerStartEventProcessor( clientUpdateThresholdTime, flushInterval, maxMsgLength, + memoLimit, )) } From 983b16868eae2d68fbc54049bc34fe8a8bbd5b5c Mon Sep 17 00:00:00 2001 From: Justin Tieri <37750742+jtieri@users.noreply.github.com> Date: Mon, 8 Jan 2024 20:56:34 -0600 Subject: [PATCH 2/7] Add max receiver size and ics-20 memo limit configuration This change introduces new configuration, MaxReceiverSize & ICS20MemoLimit, in the relayer, path processor, and the global config. It updates the existing functions and methods to utilize this new configuration property for managing and validating receiver and memo sizes in IBC related tasks. --- cmd/config.go | 24 ++++--- cmd/start.go | 1 + cmd/tx.go | 1 + .../chains/mock/mock_chain_processor_test.go | 9 ++- relayer/channel.go | 3 + relayer/connection.go | 1 + relayer/processor/path_end_runtime.go | 68 +++++++++++++++++-- relayer/processor/path_processor.go | 31 +++++++-- relayer/processor/path_processor_internal.go | 21 +++++- relayer/strategies.go | 14 +++- 10 files changed, 145 insertions(+), 28 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 2c7c17330..a4b91416a 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -488,22 +488,24 @@ func DefaultConfig(memo string) *Config { // GlobalConfig describes any global relayer settings type GlobalConfig struct { - APIListenPort string `yaml:"api-listen-addr" json:"api-listen-addr"` - Timeout string `yaml:"timeout" json:"timeout"` - Memo string `yaml:"memo" json:"memo"` - LightCacheSize int `yaml:"light-cache-size" json:"light-cache-size"` - LogLevel string `yaml:"log-level" json:"log-level"` - ICS20MemoLimit int `yaml:"ics20-memo-limit" json:"ics20-memo-limit"` + APIListenPort string `yaml:"api-listen-addr" json:"api-listen-addr"` + Timeout string `yaml:"timeout" json:"timeout"` + Memo string `yaml:"memo" json:"memo"` + LightCacheSize int `yaml:"light-cache-size" json:"light-cache-size"` + LogLevel string `yaml:"log-level" json:"log-level"` + ICS20MemoLimit int `yaml:"ics20-memo-limit" json:"ics20-memo-limit"` + MaxReceiverSize int `yaml:"max-receiver-size" json:"max-receiver-size"` } // newDefaultGlobalConfig returns a global config with defaults set func newDefaultGlobalConfig(memo string) GlobalConfig { return GlobalConfig{ - APIListenPort: ":5183", - Timeout: "10s", - LightCacheSize: 20, - Memo: memo, - LogLevel: "info", + APIListenPort: ":5183", + Timeout: "10s", + LightCacheSize: 20, + Memo: memo, + LogLevel: "info", + MaxReceiverSize: 128, } } diff --git a/cmd/start.go b/cmd/start.go index 16375dbfa..c508f8732 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -160,6 +160,7 @@ $ %s start demo-path2 --max-tx-size 10`, appName, appName, appName, appName)), chains, paths, maxMsgLength, + a.config.Global.MaxReceiverSize, a.config.Global.ICS20MemoLimit, a.config.memo(cmd), clientUpdateThresholdTime, diff --git a/cmd/tx.go b/cmd/tx.go index 4012e4ac5..904a252ff 100644 --- a/cmd/tx.go +++ b/cmd/tx.go @@ -934,6 +934,7 @@ $ %s tx flush demo-path channel-0`, chains, paths, maxMsgLength, + a.config.Global.MaxReceiverSize, a.config.Global.ICS20MemoLimit, a.config.memo(cmd), 0, diff --git a/relayer/chains/mock/mock_chain_processor_test.go b/relayer/chains/mock/mock_chain_processor_test.go index fa6099cac..5b370ac80 100644 --- a/relayer/chains/mock/mock_chain_processor_test.go +++ b/relayer/chains/mock/mock_chain_processor_test.go @@ -63,7 +63,7 @@ func TestMockChainAndPathProcessors(t *testing.T) { flushInterval := 6 * time.Hour pathProcessor := processor.NewPathProcessor(log, pathEnd1, pathEnd2, metrics, "", - clientUpdateThresholdTime, flushInterval, relayer.DefaultMaxMsgLength, 0) + clientUpdateThresholdTime, flushInterval, relayer.DefaultMaxMsgLength, 0, 1) eventProcessor := processor.NewEventProcessor(). WithChainProcessors( @@ -96,8 +96,10 @@ func TestMockChainAndPathProcessors(t *testing.T) { // at most 3 msg transfer could still be stuck in queue since chain processor was shut down, so msgrecvpacket would never be "received" by counterparty require.LessOrEqual(t, len(pathEnd1LeftoverMsgTransfer), 3) + // at most 2 msgrecvpacket could still be stuck in the queue require.LessOrEqual(t, len(pathEnd1LeftoverMsgRecvPacket), 2) + // at most 1 msgAcknowledgement could still be stuck in the queue require.LessOrEqual(t, len(pathEnd1LeftoverMsgAcknowledgement), 1) @@ -142,7 +144,9 @@ func getMockMessages(channelKey processor.ChannelKey, mockSequence, mockSequence if int64(*mockSequence)-int64(*mockSequenceCounterparty) > 0 { return []mock.TransactionMessage{} } + *mockSequence++ + mockMessages := []mock.TransactionMessage{ { EventType: chantypes.EventTypeSendPacket, @@ -159,6 +163,7 @@ func getMockMessages(channelKey processor.ChannelKey, mockSequence, mockSequence }, }, } + if *mockSequenceCounterparty > 1 && *lastSentMockMsgRecvCounterparty != *mockSequenceCounterparty { *lastSentMockMsgRecvCounterparty = *mockSequenceCounterparty mockMessages = append(mockMessages, mock.TransactionMessage{ @@ -176,6 +181,7 @@ func getMockMessages(channelKey processor.ChannelKey, mockSequence, mockSequence }, }) } + if *mockSequence > 2 { mockMessages = append(mockMessages, mock.TransactionMessage{ EventType: chantypes.EventTypeAcknowledgePacket, @@ -189,5 +195,6 @@ func getMockMessages(channelKey processor.ChannelKey, mockSequence, mockSequence }, }) } + return mockMessages } diff --git a/relayer/channel.go b/relayer/channel.go index 5f1a16fa8..6a9c9e62d 100644 --- a/relayer/channel.go +++ b/relayer/channel.go @@ -61,6 +61,7 @@ func (c *Chain) CreateOpenChannels( DefaultFlushInterval, DefaultMaxMsgLength, 0, + 0, ) c.log.Info("Starting event processor for channel handshake", @@ -135,6 +136,7 @@ func (c *Chain) CloseChannel( DefaultFlushInterval, DefaultMaxMsgLength, 0, + 0, )). WithInitialBlockHistory(0). WithMessageLifecycle(&processor.FlushLifecycle{}). @@ -174,6 +176,7 @@ func (c *Chain) CloseChannel( DefaultFlushInterval, DefaultMaxMsgLength, 0, + 0, )). WithInitialBlockHistory(0). WithMessageLifecycle(&processor.ChannelCloseLifecycle{ diff --git a/relayer/connection.go b/relayer/connection.go index 823d60053..cb7aa3875 100644 --- a/relayer/connection.go +++ b/relayer/connection.go @@ -42,6 +42,7 @@ func (c *Chain) CreateOpenConnections( DefaultFlushInterval, DefaultMaxMsgLength, 0, + 0, ) var connectionSrc, connectionDst string diff --git a/relayer/processor/path_end_runtime.go b/relayer/processor/path_end_runtime.go index 762e8834c..cdb5d67e0 100644 --- a/relayer/processor/path_end_runtime.go +++ b/relayer/processor/path_end_runtime.go @@ -102,21 +102,46 @@ func (pathEnd *pathEndRuntime) isRelevantChannel(channelID string) bool { // checkMemoLimit returns an error if the packet memo exceeds the configured limit. func checkMemoLimit(packetData []byte, memoLimit int) error { - if memoLimit == 0 { + fmt.Printf("In checkMemoLimit limit: %d \n", memoLimit) + if memoLimit <= 0 { // no limit return nil } var packet transfertypes.FungibleTokenPacketData if err := transfertypes.ModuleCdc.Unmarshal(packetData, &packet); err != nil { + fmt.Printf("Not an ics-20 packet err: %s \n", err) + fmt.Printf("Packet Data: %s \n", string(packetData)) // not an ICS-20 packet return nil } - if len(packet.Memo) > int(memoLimit) { + if len(packet.Memo) > memoLimit { return fmt.Errorf("packet memo size: %d exceeds limit: %d", len(packet.Memo), memoLimit) } + fmt.Printf("Memo length is within limit, len: %d \n", len(packet.Memo)) + + return nil +} + +// checkMaxReceiverSize returns an error if the receiver field size exceeds the configured limit. +func checkMaxReceiverSize(packetData []byte, maxReceiverSize int) error { + if maxReceiverSize <= 0 { + // no limit + return nil + } + + var packet transfertypes.FungibleTokenPacketData + if err := transfertypes.ModuleCdc.Unmarshal(packetData, &packet); err != nil { + // not an ICS-20 packet + return nil + } + + if len(packet.Receiver) > maxReceiverSize { + return fmt.Errorf("packet receiver size: %d exceeds limit: %d", len(packet.Receiver), maxReceiverSize) + } + return nil } @@ -127,7 +152,7 @@ func (pathEnd *pathEndRuntime) mergeMessageCache( messageCache IBCMessagesCache, counterpartyChainID string, inSync bool, - memoLimit int, + memoLimit, maxReceiverSize int, ) { packetMessages := make(ChannelPacketMessagesCache) connectionHandshakeMessages := make(ConnectionMessagesCache) @@ -135,25 +160,47 @@ func (pathEnd *pathEndRuntime) mergeMessageCache( clientICQMessages := make(ClientICQMessagesCache) for ch, pmc := range messageCache.PacketFlow { - if pathEnd.ShouldRelayChannel(ChainChannelKey{ChainID: pathEnd.info.ChainID, CounterpartyChainID: counterpartyChainID, ChannelKey: ch}) { + if pathEnd.ShouldRelayChannel(ChainChannelKey{ + ChainID: pathEnd.info.ChainID, + CounterpartyChainID: counterpartyChainID, + ChannelKey: ch, + }) { newPmc := make(PacketMessagesCache) for eventType, pCache := range pmc { if inSync && pathEnd.metrics != nil { - pathEnd.metrics.AddPacketsObserved(pathEnd.info.PathName, pathEnd.info.ChainID, ch.ChannelID, ch.PortID, eventType, len(pCache)) + pathEnd.metrics.AddPacketsObserved( + pathEnd.info.PathName, + pathEnd.info.ChainID, + ch.ChannelID, + ch.PortID, + eventType, + len(pCache), + ) } + newPc := make(PacketSequenceCache) for seq, p := range pCache { + fmt.Println("About to check memo limit") + if err := checkMemoLimit(p.Data, memoLimit); err != nil { + fmt.Printf("Ignoring packet err: %s \n", err) + pathEnd.log.Warn("Ignoring packet", zap.Error(err)) + continue + } + + if err := checkMaxReceiverSize(p.Data, maxReceiverSize); err != nil { pathEnd.log.Warn("Ignoring packet", zap.Error(err)) continue } newPc[seq] = p } + if len(newPc) > 0 { newPmc[eventType] = newPc } } + packetMessages[ch] = newPmc } } @@ -419,7 +466,7 @@ func (pathEnd *pathEndRuntime) mergeCacheData( counterpartyInSync bool, messageLifecycle MessageLifecycle, counterParty *pathEndRuntime, - memoLimit int, + memoLimit, maxReceiverSize int, ) { pathEnd.lastClientUpdateHeightMu.Lock() pathEnd.latestBlock = d.LatestBlock @@ -459,7 +506,14 @@ func (pathEnd *pathEndRuntime) mergeCacheData( pathEnd.channelStateCache = d.ChannelStateCache // Update latest channel open state for chain pathEnd.channelStateCacheMu.Unlock() - pathEnd.mergeMessageCache(d.IBCMessagesCache, counterpartyChainID, pathEnd.inSync && counterpartyInSync, memoLimit) // Merge incoming packet IBC messages into the backlog + // Merge incoming packet IBC messages into the backlog + pathEnd.mergeMessageCache( + d.IBCMessagesCache, + counterpartyChainID, + pathEnd.inSync && counterpartyInSync, + memoLimit, + maxReceiverSize, + ) pathEnd.ibcHeaderCache.Merge(d.IBCHeaderCache) // Update latest IBC header state pathEnd.ibcHeaderCache.Prune(ibcHeadersToCache) // Only keep most recent IBC headers diff --git a/relayer/processor/path_processor.go b/relayer/processor/path_processor.go index 0d43511be..9a5f0ad7c 100644 --- a/relayer/processor/path_processor.go +++ b/relayer/processor/path_processor.go @@ -79,8 +79,8 @@ type PathProcessor struct { // true if this is a localhost IBC connection isLocalhost bool - maxMsgs uint64 - memoLimit int + maxMsgs uint64 + memoLimit, maxReceiverSize int metrics *PrometheusMetrics } @@ -106,7 +106,7 @@ func NewPathProcessor( clientUpdateThresholdTime time.Duration, flushInterval time.Duration, maxMsgs uint64, - memoLimit int, + memoLimit, maxReceiverSize int, ) *PathProcessor { isLocalhost := pathEnd1.ClientID == ibcexported.LocalhostClientID @@ -122,6 +122,7 @@ func NewPathProcessor( isLocalhost: isLocalhost, maxMsgs: maxMsgs, memoLimit: memoLimit, + maxReceiverSize: maxReceiverSize, } if flushInterval == 0 { pp.disablePeriodicFlush() @@ -322,11 +323,31 @@ func (pp *PathProcessor) processAvailableSignals(ctx context.Context, cancel fun return true case d := <-pp.pathEnd1.incomingCacheData: // we have new data from ChainProcessor for pathEnd1 - pp.pathEnd1.mergeCacheData(ctx, cancel, d, pp.pathEnd2.info.ChainID, pp.pathEnd2.inSync, pp.messageLifecycle, pp.pathEnd2, pp.memoLimit) + pp.pathEnd1.mergeCacheData( + ctx, + cancel, + d, + pp.pathEnd2.info.ChainID, + pp.pathEnd2.inSync, + pp.messageLifecycle, + pp.pathEnd2, + pp.memoLimit, + pp.maxReceiverSize, + ) case d := <-pp.pathEnd2.incomingCacheData: // we have new data from ChainProcessor for pathEnd2 - pp.pathEnd2.mergeCacheData(ctx, cancel, d, pp.pathEnd1.info.ChainID, pp.pathEnd1.inSync, pp.messageLifecycle, pp.pathEnd1, pp.memoLimit) + pp.pathEnd2.mergeCacheData( + ctx, + cancel, + d, + pp.pathEnd1.info.ChainID, + pp.pathEnd1.inSync, + pp.messageLifecycle, + pp.pathEnd1, + pp.memoLimit, + pp.maxReceiverSize, + ) case <-pp.retryProcess: // No new data to merge in, just retry handling. diff --git a/relayer/processor/path_processor_internal.go b/relayer/processor/path_processor_internal.go index 4ec0dcf94..a85b19870 100644 --- a/relayer/processor/path_processor_internal.go +++ b/relayer/processor/path_processor_internal.go @@ -1474,17 +1474,32 @@ func (pp *PathProcessor) flush(ctx context.Context) error { for k, seqs := range commitments2 { k := k seqs := seqs + eg.Go(func() error { - s, err := pp.queuePendingRecvAndAcks(ctx, pp.pathEnd2, pp.pathEnd1, k, seqs, pathEnd2Cache.PacketFlow, pathEnd1Cache.PacketFlow, &pathEnd2CacheMu, &pathEnd1CacheMu) + s, err := pp.queuePendingRecvAndAcks( + ctx, + pp.pathEnd2, + pp.pathEnd1, + k, + seqs, + pathEnd2Cache.PacketFlow, + pathEnd1Cache.PacketFlow, + &pathEnd2CacheMu, + &pathEnd1CacheMu, + ) + if err != nil { return err } + if s != nil { if _, ok := skipped[pp.pathEnd2.info.ChainID]; !ok { skipped[pp.pathEnd2.info.ChainID] = make(map[ChannelKey]skippedPackets) } + skipped[pp.pathEnd2.info.ChainID][k] = *s } + return nil }) } @@ -1493,8 +1508,8 @@ func (pp *PathProcessor) flush(ctx context.Context) error { return fmt.Errorf("failed to enqueue pending messages for flush: %w", err) } - pp.pathEnd1.mergeMessageCache(pathEnd1Cache, pp.pathEnd2.info.ChainID, pp.pathEnd2.inSync, pp.memoLimit) - pp.pathEnd2.mergeMessageCache(pathEnd2Cache, pp.pathEnd1.info.ChainID, pp.pathEnd1.inSync, pp.memoLimit) + pp.pathEnd1.mergeMessageCache(pathEnd1Cache, pp.pathEnd2.info.ChainID, pp.pathEnd2.inSync, pp.memoLimit, pp.maxReceiverSize) + pp.pathEnd2.mergeMessageCache(pathEnd2Cache, pp.pathEnd1.info.ChainID, pp.pathEnd1.inSync, pp.memoLimit, pp.maxReceiverSize) if len(skipped) > 0 { skippedPacketsString := "" diff --git a/relayer/strategies.go b/relayer/strategies.go index 9363f2997..274e38a2b 100644 --- a/relayer/strategies.go +++ b/relayer/strategies.go @@ -39,6 +39,7 @@ func StartRelayer( chains map[string]*Chain, paths []NamedPath, maxMsgLength uint64, + maxReceiverSize, memoLimit int, memo string, clientUpdateThresholdTime time.Duration, @@ -100,6 +101,7 @@ func StartRelayer( ePaths, initialBlockHistory, maxMsgLength, + maxReceiverSize, memoLimit, memo, messageLifecycle, @@ -156,6 +158,7 @@ func relayerStartEventProcessor( paths []path, initialBlockHistory uint64, maxMsgLength uint64, + maxReceiverSize, memoLimit int, memo string, messageLifecycle processor.MessageLifecycle, @@ -183,6 +186,7 @@ func relayerStartEventProcessor( flushInterval, maxMsgLength, memoLimit, + maxReceiverSize, )) } @@ -198,7 +202,15 @@ func relayerStartEventProcessor( } // relayerStartLegacy is the main loop of the relayer. -func relayerStartLegacy(ctx context.Context, log *zap.Logger, src, dst *Chain, filter ChannelFilter, maxTxSize, maxMsgLength uint64, memo string, errCh chan<- error) { +func relayerStartLegacy( + ctx context.Context, + log *zap.Logger, + src, dst *Chain, + filter ChannelFilter, + maxTxSize, maxMsgLength uint64, + memo string, + errCh chan<- error, +) { defer close(errCh) // Query the list of channels on the src connection. From 3b414c20f087eb39419aa95df25c4835c2861880 Mon Sep 17 00:00:00 2001 From: Justin Tieri <37750742+jtieri@users.noreply.github.com> Date: Mon, 8 Jan 2024 20:56:59 -0600 Subject: [PATCH 3/7] Add WriteConfig function and update method names Added a new function WriteConfig in relayertest/system.go for testing. Also, updated method names in relayer.go to start with an uppercase letter for public visibility. Besides, upgraded Docker image version in localhost_client_test.go from v8.0.0-beta.1 to v8.0.0. --- interchaintest/feegrant_test.go | 2 +- interchaintest/localhost_client_test.go | 2 +- interchaintest/relayer.go | 38 ++++++++++++------------- internal/relayertest/system.go | 5 ++++ 4 files changed, 26 insertions(+), 21 deletions(-) diff --git a/interchaintest/feegrant_test.go b/interchaintest/feegrant_test.go index 2a56b03af..fc78b2a41 100644 --- a/interchaintest/feegrant_test.go +++ b/interchaintest/feegrant_test.go @@ -269,7 +269,7 @@ func TestRelayerFeeGrant(t *testing.T) { //You MUST run the configure feegrant command prior to starting the relayer, otherwise it'd be like you never set it up at all (within this test) //Note that Gaia supports feegrants, but Osmosis does not (x/feegrant module, or any compatible module, is not included in Osmosis SDK app modules) localRelayer := r.(*Relayer) - res := localRelayer.sys().Run(logger, "chains", "configure", "feegrant", "basicallowance", gaia.Config().ChainID, gaiaGranterWallet.KeyName(), "--grantees", granteeCsv, "--overwrite-granter") + res := localRelayer.Sys().Run(logger, "chains", "configure", "feegrant", "basicallowance", gaia.Config().ChainID, gaiaGranterWallet.KeyName(), "--grantees", granteeCsv, "--overwrite-granter") if res.Err != nil { fmt.Printf("configure feegrant results: %s\n", res.Stdout.String()) t.Fatalf("failed to rly config feegrants: %v", res.Err) diff --git a/interchaintest/localhost_client_test.go b/interchaintest/localhost_client_test.go index 827e62c1c..845a46763 100644 --- a/interchaintest/localhost_client_test.go +++ b/interchaintest/localhost_client_test.go @@ -78,7 +78,7 @@ func TestLocalhost_TokenTransfers(t *testing.T) { numFullNodes := 0 image := ibc.DockerImage{ Repository: "ghcr.io/cosmos/ibc-go-simd", - Version: "v8.0.0-beta.1", + Version: "v8.0.0", UidGid: "100:1000", } cdc := DefaultEncoding() diff --git a/interchaintest/relayer.go b/interchaintest/relayer.go index 3b1a8cfa9..025221516 100644 --- a/interchaintest/relayer.go +++ b/interchaintest/relayer.go @@ -47,7 +47,7 @@ func NewRelayer( config: config, } - res := r.sys().Run(zaptest.NewLogger(t), "config", "init", "--memo", config.Memo) + res := r.Sys().Run(zaptest.NewLogger(t), "config", "init", "--memo", config.Memo) if res.Err != nil { t.Fatalf("failed to rly config init: %v", res.Err) } @@ -55,7 +55,7 @@ func NewRelayer( return r } -func (r *Relayer) sys() *relayertest.System { +func (r *Relayer) Sys() *relayertest.System { return &relayertest.System{HomeDir: r.home} } @@ -89,7 +89,7 @@ func (r *Relayer) AddChainConfiguration(_ context.Context, _ ibc.RelayerExecRepo } func (r *Relayer) AddKey(ctx context.Context, _ ibc.RelayerExecReporter, chainID, keyName, coinType string) (ibc.Wallet, error) { - res := r.sys().RunC(ctx, r.log(), "keys", "add", chainID, keyName, "--coin-type", coinType) + res := r.Sys().RunC(ctx, r.log(), "keys", "add", chainID, keyName, "--coin-type", coinType) if res.Err != nil { return nil, res.Err } @@ -103,7 +103,7 @@ func (r *Relayer) AddKey(ctx context.Context, _ ibc.RelayerExecReporter, chainID } func (r *Relayer) RestoreKey(ctx context.Context, _ ibc.RelayerExecReporter, cfg ibc.ChainConfig, keyName, mnemonic string) error { - res := r.sys().RunC(ctx, r.log(), "keys", "restore", cfg.ChainID, keyName, mnemonic, "--coin-type", cfg.CoinType) + res := r.Sys().RunC(ctx, r.log(), "keys", "restore", cfg.ChainID, keyName, mnemonic, "--coin-type", cfg.CoinType) if res.Err != nil { return res.Err } @@ -111,7 +111,7 @@ func (r *Relayer) RestoreKey(ctx context.Context, _ ibc.RelayerExecReporter, cfg } func (r *Relayer) GeneratePath(ctx context.Context, _ ibc.RelayerExecReporter, srcChainID, dstChainID, pathName string) error { - res := r.sys().RunC(ctx, r.log(), "paths", "new", srcChainID, dstChainID, pathName) + res := r.Sys().RunC(ctx, r.log(), "paths", "new", srcChainID, dstChainID, pathName) if res.Err != nil { return res.Err } @@ -119,7 +119,7 @@ func (r *Relayer) GeneratePath(ctx context.Context, _ ibc.RelayerExecReporter, s } func (r *Relayer) UpdatePath(ctx context.Context, _ ibc.RelayerExecReporter, pathName string, filter ibc.ChannelFilter) error { - res := r.sys().RunC(ctx, r.log(), "paths", "update", pathName, + res := r.Sys().RunC(ctx, r.log(), "paths", "update", pathName, "--filter-rule", filter.Rule, "--filter-channels", strings.Join(filter.ChannelList, ","), ) @@ -130,7 +130,7 @@ func (r *Relayer) UpdatePath(ctx context.Context, _ ibc.RelayerExecReporter, pat } func (r *Relayer) GetChannels(ctx context.Context, _ ibc.RelayerExecReporter, chainID string) ([]ibc.ChannelOutput, error) { - res := r.sys().RunC(ctx, r.log(), "q", "channels", chainID) + res := r.Sys().RunC(ctx, r.log(), "q", "channels", chainID) if res.Err != nil { return nil, res.Err } @@ -151,7 +151,7 @@ func (r *Relayer) GetChannels(ctx context.Context, _ ibc.RelayerExecReporter, ch } func (r *Relayer) GetClients(ctx context.Context, _ ibc.RelayerExecReporter, chainID string) (ibc.ClientOutputs, error) { - res := r.sys().RunC(ctx, r.log(), "q", "clients", chainID) + res := r.Sys().RunC(ctx, r.log(), "q", "clients", chainID) if res.Err != nil { return nil, res.Err } @@ -172,7 +172,7 @@ func (r *Relayer) GetClients(ctx context.Context, _ ibc.RelayerExecReporter, cha } func (r *Relayer) LinkPath(ctx context.Context, _ ibc.RelayerExecReporter, pathName string, chanOpts ibc.CreateChannelOptions, clientOpts ibc.CreateClientOptions) error { - res := r.sys().RunC(ctx, r.log(), "tx", "link", pathName, + res := r.Sys().RunC(ctx, r.log(), "tx", "link", pathName, "--src-port", chanOpts.SourcePortName, "--dst-port", chanOpts.DestPortName, "--order", chanOpts.Order.String(), @@ -186,7 +186,7 @@ func (r *Relayer) LinkPath(ctx context.Context, _ ibc.RelayerExecReporter, pathN } func (r *Relayer) GetConnections(ctx context.Context, _ ibc.RelayerExecReporter, chainID string) (ibc.ConnectionOutputs, error) { - res := r.sys().RunC(ctx, r.log(), "q", "connections", chainID) + res := r.Sys().RunC(ctx, r.log(), "q", "connections", chainID) if res.Err != nil { return nil, res.Err } @@ -214,7 +214,7 @@ func (r *Relayer) GetConnections(ctx context.Context, _ ibc.RelayerExecReporter, } func (r *Relayer) CreateChannel(ctx context.Context, _ ibc.RelayerExecReporter, pathName string, opts ibc.CreateChannelOptions) error { - res := r.sys().RunC( + res := r.Sys().RunC( ctx, r.log(), "tx", "channel", pathName, "--src-port", opts.SourcePortName, @@ -229,7 +229,7 @@ func (r *Relayer) CreateChannel(ctx context.Context, _ ibc.RelayerExecReporter, } func (r *Relayer) CreateConnections(ctx context.Context, _ ibc.RelayerExecReporter, pathName string) error { - res := r.sys().RunC(ctx, r.log(), "tx", "connection", pathName) + res := r.Sys().RunC(ctx, r.log(), "tx", "connection", pathName) if res.Err != nil { return res.Err } @@ -237,7 +237,7 @@ func (r *Relayer) CreateConnections(ctx context.Context, _ ibc.RelayerExecReport } func (r *Relayer) CreateClients(ctx context.Context, _ ibc.RelayerExecReporter, pathName string, clientOpts ibc.CreateClientOptions) error { - res := r.sys().RunC(ctx, r.log(), "tx", "clients", pathName, "--client-tp", clientOpts.TrustingPeriod) + res := r.Sys().RunC(ctx, r.log(), "tx", "clients", pathName, "--client-tp", clientOpts.TrustingPeriod) if res.Err != nil { return res.Err } @@ -245,7 +245,7 @@ func (r *Relayer) CreateClients(ctx context.Context, _ ibc.RelayerExecReporter, } func (r *Relayer) UpdateClients(ctx context.Context, _ ibc.RelayerExecReporter, pathName string) error { - res := r.sys().RunC(ctx, r.log(), "tx", "update-clients", pathName) + res := r.Sys().RunC(ctx, r.log(), "tx", "update-clients", pathName) if res.Err != nil { return res.Err } @@ -290,7 +290,7 @@ func (r *Relayer) start(ctx context.Context, remainingArgs ...string) { // It won't be reachable without introspecting the output, // but this will allow catching any possible data races around the debug server. args := append([]string{"start", "--debug-addr", "localhost:0"}, remainingArgs...) - res := r.sys().RunC(ctx, r.log(), args...) + res := r.Sys().RunC(ctx, r.log(), args...) if res.Err != nil { r.errCh <- res.Err return @@ -304,7 +304,7 @@ func (r *Relayer) Exec(ctx context.Context, _ ibc.RelayerExecReporter, cmd, _ [] // TODO: env would be ignored for now. // We may want to modify the call to sys() to accept environment overrides, // so this relayer can continue to be used in parallel without environment cross-contamination. - res := r.sys().RunC(ctx, r.log(), cmd...) + res := r.Sys().RunC(ctx, r.log(), cmd...) exitCode := 0 if res.Err != nil { @@ -327,7 +327,7 @@ func (r *Relayer) Flush(ctx context.Context, _ ibc.RelayerExecReporter, pathName cmd = append(cmd, channelID) } } - res := r.sys().RunC(ctx, r.log(), cmd...) + res := r.Sys().RunC(ctx, r.log(), cmd...) if res.Err != nil { return res.Err } @@ -335,14 +335,14 @@ func (r *Relayer) Flush(ctx context.Context, _ ibc.RelayerExecReporter, pathName } func (r *Relayer) GetWallet(chainID string) (ibc.Wallet, bool) { - res := r.sys().RunC(context.Background(), r.log(), "keys", "show", chainID) + res := r.Sys().RunC(context.Background(), r.log(), "keys", "show", chainID) if res.Err != nil { return &interchaintestcosmos.CosmosWallet{}, false } address := strings.TrimSpace(res.Stdout.String()) var keyName string - config := r.sys().MustGetConfig(r.t) + config := r.Sys().MustGetConfig(r.t) for _, v := range config.ProviderConfigs { if c, ok := v.Value.(cosmos.CosmosProviderConfig); ok { if c.ChainID == chainID { diff --git a/internal/relayertest/system.go b/internal/relayertest/system.go index 6b917b653..59daad624 100644 --- a/internal/relayertest/system.go +++ b/internal/relayertest/system.go @@ -139,6 +139,11 @@ func (s *System) MustGetConfig(t *testing.T) (config cmd.ConfigInputWrapper) { return config } +func (s *System) WriteConfig(t *testing.T, contents []byte) error { + t.Helper() + return os.WriteFile(filepath.Join(s.HomeDir, "config", "config.yaml"), contents, 0600) +} + // A fixed mnemonic and its resulting cosmos address, helpful for tests that need a mnemonic. const ( ZeroMnemonic = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" From fd952d1a317f79f36f482eb632df38d25b1a7c9f Mon Sep 17 00:00:00 2001 From: Justin Tieri <37750742+jtieri@users.noreply.github.com> Date: Mon, 8 Jan 2024 20:57:43 -0600 Subject: [PATCH 4/7] test: add memo and receiver limit test Created the Memo and Receiver Limit test within the interchain test suite. This test evaluates the functionality of IBC transfers with a memo field length exceeding configured limit, as well as receiver field exceeding the limit. It also evaluates a successful IBC transfer for completeness. --- interchaintest/memo_receiver_limit_test.go | 275 +++++++++++++++++++++ 1 file changed, 275 insertions(+) create mode 100644 interchaintest/memo_receiver_limit_test.go diff --git a/interchaintest/memo_receiver_limit_test.go b/interchaintest/memo_receiver_limit_test.go new file mode 100644 index 000000000..87b12e6ca --- /dev/null +++ b/interchaintest/memo_receiver_limit_test.go @@ -0,0 +1,275 @@ +package interchaintest_test + +import ( + "context" + "testing" + + sdkmath "cosmossdk.io/math" + transfertypes "github.com/cosmos/ibc-go/v8/modules/apps/transfer/types" + "github.com/cosmos/relayer/v2/cmd" + relayertest "github.com/cosmos/relayer/v2/interchaintest" + "github.com/cosmos/relayer/v2/relayer/chains/cosmos" + "github.com/strangelove-ventures/interchaintest/v8" + "github.com/strangelove-ventures/interchaintest/v8/ibc" + "github.com/strangelove-ventures/interchaintest/v8/testreporter" + "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + "gopkg.in/yaml.v3" +) + +func TestMemoAndReceiverLimit(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + t.Parallel() + + numVals := 1 + numFullNodes := 0 + + image := ibc.DockerImage{ + Repository: "ghcr.io/cosmos/ibc-go-simd", + Version: "v8.0.0", + UidGid: "100:1000", + } + + cf := interchaintest.NewBuiltinChainFactory(zaptest.NewLogger(t), []*interchaintest.ChainSpec{ + { + Name: "ibc-go-simd", + Version: "main", + NumValidators: &numVals, + NumFullNodes: &numFullNodes, + ChainConfig: ibc.ChainConfig{ + Type: "cosmos", + Name: "simd", + ChainID: "chain-a", + Images: []ibc.DockerImage{image}, + Bin: "simd", + Bech32Prefix: "cosmos", + Denom: "stake", + CoinType: "118", + GasPrices: "0.0stake", + GasAdjustment: 1.1, + }, + }, + { + Name: "ibc-go-simd", + Version: "main", + NumValidators: &numVals, + NumFullNodes: &numFullNodes, + ChainConfig: ibc.ChainConfig{ + Type: "cosmos", + Name: "simd", + ChainID: "chain-b", + Images: []ibc.DockerImage{image}, + Bin: "simd", + Bech32Prefix: "cosmos", + Denom: "stake", + CoinType: "118", + GasPrices: "0.0stake", + GasAdjustment: 1.1, + }, + }, + }) + + chains, err := cf.Chains(t.Name()) + require.NoError(t, err) + chainA, chainB := chains[0], chains[1] + + ctx := context.Background() + client, network := interchaintest.DockerSetup(t) + + rf := relayertest.NewRelayerFactory(relayertest.RelayerConfig{InitialBlockHistory: 50}) + r := rf.Build(t, client, network) + + const pathName = "chainA-chainB" + + ic := interchaintest.NewInterchain(). + AddChain(chainA). + AddChain(chainB). + AddRelayer(r, "relayer"). + AddLink(interchaintest.InterchainLink{ + Chain1: chainA, + Chain2: chainB, + Relayer: r, + Path: pathName, + }) + + rep := testreporter.NewNopReporter() + eRep := rep.RelayerExecReporter(t) + + require.NoError(t, ic.Build(ctx, eRep, interchaintest.InterchainBuildOptions{ + TestName: t.Name(), + Client: client, + NetworkID: network, + })) + + t.Cleanup(func() { + _ = ic.Close() + }) + + // read relayer config + // set memo limit + // write relayer config + + relayer := r.(*relayertest.Relayer) + + cfg := relayer.Sys().MustGetConfig(t) + + t.Logf("Global Config: %+v \n", cfg.Global) + + for _, c := range cfg.ProviderConfigs { + t.Logf("Provider Config: %+v \n", c.Value) + } + + cfg.Global.ICS20MemoLimit = 10 + + cfgOutput := new(cmd.ConfigOutputWrapper) + cfgOutput.ProviderConfigs = cmd.ProviderConfigs{} + cfgOutput.Global = cfg.Global + cfgOutput.Paths = cfg.Paths + + for _, c := range cfg.ProviderConfigs { + stuff := c.Value.(*cosmos.CosmosProviderConfig) + + cfgOutput.ProviderConfigs[stuff.ChainID] = &cmd.ProviderConfigWrapper{ + Type: "cosmos", + Value: stuff, + } + } + + cfgBz, err := yaml.Marshal(cfgOutput) + require.NoError(t, err) + + err = relayer.Sys().WriteConfig(t, cfgBz) + require.NoError(t, err) + + cfg = relayer.Sys().MustGetConfig(t) + + t.Logf("Global Config: %+v \n", cfg.Global) + + for _, c := range cfg.ProviderConfigs { + t.Logf("Provider Config: %+v \n", c.Value) + } + + err = r.StartRelayer(ctx, eRep, pathName) + require.NoError(t, err) + + t.Cleanup( + func() { + err := r.StopRelayer(ctx, eRep) + if err != nil { + t.Logf("an error occurred while stopping the relayer: %s", err) + } + }, + ) + + // create and fund user accs + // assert initial balances + + // initialize a new acc for the relayer along with a couple user accs + initBal := sdkmath.NewInt(1_000_000_000_000) + users := interchaintest.GetAndFundTestUsers(t, ctx, t.Name(), initBal, chainA, chainB) + + require.NoError(t, testutil.WaitForBlocks(ctx, 2, chainA, chainB)) + + userA := users[0] + userB := users[1] + + // assert initial balances are correct + userABal, err := chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userABal)) + + userBBal, err := chainB.GetBalance(ctx, userB.FormattedAddress(), chainB.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userBBal)) + + channels, err := r.GetChannels(ctx, eRep, chainA.Config().ChainID) + require.NoError(t, err) + require.Equal(t, 1, len(channels)) + + channel := channels[0] + + // send transfer with memo that exceeds limit + // ensure transfer failed and assert balances + // compose and send a localhost IBC transfer which should be successful + transferAmount := sdkmath.NewInt(1_000) + transfer := ibc.WalletAmount{ + Address: userB.FormattedAddress(), + Denom: chainA.Config().Denom, + Amount: transferAmount, + } + + // use a memo with a length that exceeds our configured limit + opts := ibc.TransferOptions{ + Memo: "this memo is too long", + } + + _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transfer, opts) + require.NoError(t, err) + + require.NoError(t, testutil.WaitForBlocks(ctx, 5, chainA, chainB)) + + userABal, err = chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userABal)) + + userBBal, err = chainB.GetBalance(ctx, userB.FormattedAddress(), chainB.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userBBal)) + + // send transfer with receiver field that exceeds limit + // ensure transfer failed and assert balances + var junkReceiver string + + for i := 0; i < 130; i++ { + junkReceiver += "a" + } + + transfer = ibc.WalletAmount{ + Address: junkReceiver, + Denom: chainA.Config().Denom, + Amount: transferAmount, + } + + _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transfer, opts) + require.NoError(t, err) + + require.NoError(t, testutil.WaitForBlocks(ctx, 5, chainA, chainB)) + + userABal, err = chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userABal)) + + userBBal, err = chainB.GetBalance(ctx, userB.FormattedAddress(), chainB.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userBBal)) + + // send transfer that should succeed + // ensure transfer succeeded and assert balances + transfer = ibc.WalletAmount{ + Address: userB.FormattedAddress(), + Denom: chainA.Config().Denom, + Amount: transferAmount, + } + + _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transfer, opts) + require.NoError(t, err) + + require.NoError(t, testutil.WaitForBlocks(ctx, 5, chainA, chainB)) + + // compose the ibc denom for balance assertions + denom := transfertypes.GetPrefixedDenom(channel.Counterparty.PortID, channel.Counterparty.ChannelID, chainA.Config().Denom) + trace := transfertypes.ParseDenomTrace(denom) + + // assert balances are correct + userABal, err = chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) + require.NoError(t, err) + require.True(t, userABal.Equal(initBal.Sub(transferAmount))) + + userBBal, err = chainB.GetBalance(ctx, userB.FormattedAddress(), trace.IBCDenom()) + require.NoError(t, err) + require.True(t, userBBal.Equal(transferAmount)) +} From 312d0486aa32f229e323eaec4cc6d80507e744da Mon Sep 17 00:00:00 2001 From: Justin Tieri <37750742+jtieri@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:54:28 -0600 Subject: [PATCH 5/7] test: update test for memo and receiver limit Adjusted the test case 'TestMemoAndReceiverLimit' to focus more on the functionality of sending transfers with a memo and receiver limit configured. This reduces the complexity of the test case, and it now focuses more closely on the actual purpose of the method. The general flow of the test has been maintained, but some unnecessary operations have been removed. --- interchaintest/memo_receiver_limit_test.go | 126 +++++++++------------ 1 file changed, 51 insertions(+), 75 deletions(-) diff --git a/interchaintest/memo_receiver_limit_test.go b/interchaintest/memo_receiver_limit_test.go index 87b12e6ca..435cc7871 100644 --- a/interchaintest/memo_receiver_limit_test.go +++ b/interchaintest/memo_receiver_limit_test.go @@ -18,6 +18,7 @@ import ( "gopkg.in/yaml.v3" ) +// TestMemoAndReceiverLimit tests the functionality of sending transfers with a memo and receiver limit configured. func TestMemoAndReceiverLimit(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode") @@ -109,19 +110,27 @@ func TestMemoAndReceiverLimit(t *testing.T) { _ = ic.Close() }) - // read relayer config - // set memo limit - // write relayer config + // Create and fund user accs & assert initial balances. + initBal := sdkmath.NewInt(1_000_000_000_000) + users := interchaintest.GetAndFundTestUsers(t, ctx, t.Name(), initBal, chainA, chainB) - relayer := r.(*relayertest.Relayer) + require.NoError(t, testutil.WaitForBlocks(ctx, 2, chainA, chainB)) - cfg := relayer.Sys().MustGetConfig(t) + userA := users[0] + userB := users[1] - t.Logf("Global Config: %+v \n", cfg.Global) + userABal, err := chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userABal)) - for _, c := range cfg.ProviderConfigs { - t.Logf("Provider Config: %+v \n", c.Value) - } + userBBal, err := chainB.GetBalance(ctx, userB.FormattedAddress(), chainB.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userBBal)) + + // Read relayer config from disk, configure memo limit, & write config back to disk. + relayer := r.(*relayertest.Relayer) + + cfg := relayer.Sys().MustGetConfig(t) cfg.Global.ICS20MemoLimit = 10 @@ -145,14 +154,6 @@ func TestMemoAndReceiverLimit(t *testing.T) { err = relayer.Sys().WriteConfig(t, cfgBz) require.NoError(t, err) - cfg = relayer.Sys().MustGetConfig(t) - - t.Logf("Global Config: %+v \n", cfg.Global) - - for _, c := range cfg.ProviderConfigs { - t.Logf("Provider Config: %+v \n", c.Value) - } - err = r.StartRelayer(ctx, eRep, pathName) require.NoError(t, err) @@ -165,92 +166,72 @@ func TestMemoAndReceiverLimit(t *testing.T) { }, ) - // create and fund user accs - // assert initial balances - - // initialize a new acc for the relayer along with a couple user accs - initBal := sdkmath.NewInt(1_000_000_000_000) - users := interchaintest.GetAndFundTestUsers(t, ctx, t.Name(), initBal, chainA, chainB) - - require.NoError(t, testutil.WaitForBlocks(ctx, 2, chainA, chainB)) - - userA := users[0] - userB := users[1] - - // assert initial balances are correct - userABal, err := chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) - require.NoError(t, err) - require.True(t, initBal.Equal(userABal)) - - userBBal, err := chainB.GetBalance(ctx, userB.FormattedAddress(), chainB.Config().Denom) - require.NoError(t, err) - require.True(t, initBal.Equal(userBBal)) - + // Send transfer that should succeed & assert balances. channels, err := r.GetChannels(ctx, eRep, chainA.Config().ChainID) require.NoError(t, err) require.Equal(t, 1, len(channels)) channel := channels[0] - // send transfer with memo that exceeds limit - // ensure transfer failed and assert balances - // compose and send a localhost IBC transfer which should be successful transferAmount := sdkmath.NewInt(1_000) + transfer := ibc.WalletAmount{ Address: userB.FormattedAddress(), Denom: chainA.Config().Denom, Amount: transferAmount, } - // use a memo with a length that exceeds our configured limit - opts := ibc.TransferOptions{ - Memo: "this memo is too long", - } - - _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transfer, opts) + _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transfer, ibc.TransferOptions{}) require.NoError(t, err) require.NoError(t, testutil.WaitForBlocks(ctx, 5, chainA, chainB)) + // Compose the ibc denom for balance assertions on the counterparty and assert balances. + denom := transfertypes.GetPrefixedDenom( + channel.Counterparty.PortID, + channel.Counterparty.ChannelID, + chainA.Config().Denom, + ) + trace := transfertypes.ParseDenomTrace(denom) + userABal, err = chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) require.NoError(t, err) - require.True(t, initBal.Equal(userABal)) + require.True(t, userABal.Equal(initBal.Sub(transferAmount))) - userBBal, err = chainB.GetBalance(ctx, userB.FormattedAddress(), chainB.Config().Denom) + userBBal, err = chainB.GetBalance(ctx, userB.FormattedAddress(), trace.IBCDenom()) require.NoError(t, err) - require.True(t, initBal.Equal(userBBal)) - - // send transfer with receiver field that exceeds limit - // ensure transfer failed and assert balances - var junkReceiver string - - for i := 0; i < 130; i++ { - junkReceiver += "a" - } + require.True(t, userBBal.Equal(transferAmount)) - transfer = ibc.WalletAmount{ - Address: junkReceiver, - Denom: chainA.Config().Denom, - Amount: transferAmount, + // Send transfer with memo that exceeds limit, ensure transfer failed and assert balances. + opts := ibc.TransferOptions{ + Memo: "this memo is too long", + Timeout: &ibc.IBCTimeout{ + Height: 10, + }, } _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transfer, opts) require.NoError(t, err) - require.NoError(t, testutil.WaitForBlocks(ctx, 5, chainA, chainB)) + require.NoError(t, testutil.WaitForBlocks(ctx, 11, chainA, chainB)) userABal, err = chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) require.NoError(t, err) - require.True(t, initBal.Equal(userABal)) + require.True(t, !userABal.Equal(initBal.Sub(transferAmount))) - userBBal, err = chainB.GetBalance(ctx, userB.FormattedAddress(), chainB.Config().Denom) + userBBal, err = chainB.GetBalance(ctx, userB.FormattedAddress(), trace.IBCDenom()) require.NoError(t, err) - require.True(t, initBal.Equal(userBBal)) + require.True(t, userBBal.Equal(transferAmount)) + + // Send transfer with receiver field that exceeds limit, ensure transfer failed and assert balances. + var junkReceiver string + + for i := 0; i < 130; i++ { + junkReceiver += "a" + } - // send transfer that should succeed - // ensure transfer succeeded and assert balances transfer = ibc.WalletAmount{ - Address: userB.FormattedAddress(), + Address: junkReceiver, Denom: chainA.Config().Denom, Amount: transferAmount, } @@ -260,14 +241,9 @@ func TestMemoAndReceiverLimit(t *testing.T) { require.NoError(t, testutil.WaitForBlocks(ctx, 5, chainA, chainB)) - // compose the ibc denom for balance assertions - denom := transfertypes.GetPrefixedDenom(channel.Counterparty.PortID, channel.Counterparty.ChannelID, chainA.Config().Denom) - trace := transfertypes.ParseDenomTrace(denom) - - // assert balances are correct userABal, err = chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) require.NoError(t, err) - require.True(t, userABal.Equal(initBal.Sub(transferAmount))) + require.True(t, !userABal.Equal(initBal.Sub(transferAmount))) userBBal, err = chainB.GetBalance(ctx, userB.FormattedAddress(), trace.IBCDenom()) require.NoError(t, err) From 996e41c9971ee4c7aabaa4f79a2c7c434c871109 Mon Sep 17 00:00:00 2001 From: Justin Tieri <37750742+jtieri@users.noreply.github.com> Date: Tue, 9 Jan 2024 13:22:51 -0600 Subject: [PATCH 6/7] fix: properly unmarshal ics-20 packet data & remove debug print statements This commit removes various debug print statements from the code, making it cleaner and easier to read. Additionally, it updates the way the 'transfertypes.ModuleCdc.Unmarshal' function is used to 'transfertypes.ModuleCdc.UnmarshalJSON', simplifying the overall structure of the code. --- relayer/processor/path_end_runtime.go | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/relayer/processor/path_end_runtime.go b/relayer/processor/path_end_runtime.go index cdb5d67e0..d0f3e1d23 100644 --- a/relayer/processor/path_end_runtime.go +++ b/relayer/processor/path_end_runtime.go @@ -102,16 +102,13 @@ func (pathEnd *pathEndRuntime) isRelevantChannel(channelID string) bool { // checkMemoLimit returns an error if the packet memo exceeds the configured limit. func checkMemoLimit(packetData []byte, memoLimit int) error { - fmt.Printf("In checkMemoLimit limit: %d \n", memoLimit) if memoLimit <= 0 { // no limit return nil } var packet transfertypes.FungibleTokenPacketData - if err := transfertypes.ModuleCdc.Unmarshal(packetData, &packet); err != nil { - fmt.Printf("Not an ics-20 packet err: %s \n", err) - fmt.Printf("Packet Data: %s \n", string(packetData)) + if err := transfertypes.ModuleCdc.UnmarshalJSON(packetData, &packet); err != nil { // not an ICS-20 packet return nil } @@ -120,8 +117,6 @@ func checkMemoLimit(packetData []byte, memoLimit int) error { return fmt.Errorf("packet memo size: %d exceeds limit: %d", len(packet.Memo), memoLimit) } - fmt.Printf("Memo length is within limit, len: %d \n", len(packet.Memo)) - return nil } @@ -133,7 +128,7 @@ func checkMaxReceiverSize(packetData []byte, maxReceiverSize int) error { } var packet transfertypes.FungibleTokenPacketData - if err := transfertypes.ModuleCdc.Unmarshal(packetData, &packet); err != nil { + if err := transfertypes.ModuleCdc.UnmarshalJSON(packetData, &packet); err != nil { // not an ICS-20 packet return nil } @@ -180,10 +175,7 @@ func (pathEnd *pathEndRuntime) mergeMessageCache( newPc := make(PacketSequenceCache) for seq, p := range pCache { - fmt.Println("About to check memo limit") - if err := checkMemoLimit(p.Data, memoLimit); err != nil { - fmt.Printf("Ignoring packet err: %s \n", err) pathEnd.log.Warn("Ignoring packet", zap.Error(err)) continue } From f7cc18e7288d45479e926f8dfc543c09a85a6341 Mon Sep 17 00:00:00 2001 From: Justin Tieri <37750742+jtieri@users.noreply.github.com> Date: Tue, 9 Jan 2024 13:29:56 -0600 Subject: [PATCH 7/7] test: expand test users and IBC transfers in memo/receiver limit test The test for the memo receiver limit now supports four test users instead of two, allowing for more complex transfer scenarios. This change requires increased checks for account balances, as well as additional IBC transfers. This expansion helps ensure more comprehensive testing coverage and overall system validation. --- interchaintest/memo_receiver_limit_test.go | 49 +++++++++++++++++----- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/interchaintest/memo_receiver_limit_test.go b/interchaintest/memo_receiver_limit_test.go index 435cc7871..ce9dd45bb 100644 --- a/interchaintest/memo_receiver_limit_test.go +++ b/interchaintest/memo_receiver_limit_test.go @@ -112,21 +112,31 @@ func TestMemoAndReceiverLimit(t *testing.T) { // Create and fund user accs & assert initial balances. initBal := sdkmath.NewInt(1_000_000_000_000) - users := interchaintest.GetAndFundTestUsers(t, ctx, t.Name(), initBal, chainA, chainB) + users := interchaintest.GetAndFundTestUsers(t, ctx, t.Name(), initBal, chainA, chainB, chainA, chainB) require.NoError(t, testutil.WaitForBlocks(ctx, 2, chainA, chainB)) userA := users[0] userB := users[1] + userC := users[2] + userD := users[3] userABal, err := chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) require.NoError(t, err) require.True(t, initBal.Equal(userABal)) + userCBal, err := chainA.GetBalance(ctx, userC.FormattedAddress(), chainA.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userCBal)) + userBBal, err := chainB.GetBalance(ctx, userB.FormattedAddress(), chainB.Config().Denom) require.NoError(t, err) require.True(t, initBal.Equal(userBBal)) + userDBal, err := chainB.GetBalance(ctx, userD.FormattedAddress(), chainB.Config().Denom) + require.NoError(t, err) + require.True(t, initBal.Equal(userDBal)) + // Read relayer config from disk, configure memo limit, & write config back to disk. relayer := r.(*relayertest.Relayer) @@ -166,7 +176,7 @@ func TestMemoAndReceiverLimit(t *testing.T) { }, ) - // Send transfer that should succeed & assert balances. + // Send transfers that should succeed & assert balances. channels, err := r.GetChannels(ctx, eRep, chainA.Config().ChainID) require.NoError(t, err) require.Equal(t, 1, len(channels)) @@ -175,13 +185,22 @@ func TestMemoAndReceiverLimit(t *testing.T) { transferAmount := sdkmath.NewInt(1_000) - transfer := ibc.WalletAmount{ + transferAB := ibc.WalletAmount{ Address: userB.FormattedAddress(), Denom: chainA.Config().Denom, Amount: transferAmount, } - _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transfer, ibc.TransferOptions{}) + transferCD := ibc.WalletAmount{ + Address: userD.FormattedAddress(), + Denom: chainA.Config().Denom, + Amount: transferAmount, + } + + _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transferAB, ibc.TransferOptions{}) + require.NoError(t, err) + + _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userC.KeyName(), transferCD, ibc.TransferOptions{}) require.NoError(t, err) require.NoError(t, testutil.WaitForBlocks(ctx, 5, chainA, chainB)) @@ -202,6 +221,14 @@ func TestMemoAndReceiverLimit(t *testing.T) { require.NoError(t, err) require.True(t, userBBal.Equal(transferAmount)) + userCBal, err = chainA.GetBalance(ctx, userC.FormattedAddress(), chainA.Config().Denom) + require.NoError(t, err) + require.True(t, userCBal.Equal(initBal.Sub(transferAmount))) + + userDBal, err = chainB.GetBalance(ctx, userD.FormattedAddress(), trace.IBCDenom()) + require.NoError(t, err) + require.True(t, userDBal.Equal(transferAmount)) + // Send transfer with memo that exceeds limit, ensure transfer failed and assert balances. opts := ibc.TransferOptions{ Memo: "this memo is too long", @@ -210,7 +237,7 @@ func TestMemoAndReceiverLimit(t *testing.T) { }, } - _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transfer, opts) + _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transferAB, opts) require.NoError(t, err) require.NoError(t, testutil.WaitForBlocks(ctx, 11, chainA, chainB)) @@ -230,22 +257,22 @@ func TestMemoAndReceiverLimit(t *testing.T) { junkReceiver += "a" } - transfer = ibc.WalletAmount{ + transferCD = ibc.WalletAmount{ Address: junkReceiver, Denom: chainA.Config().Denom, Amount: transferAmount, } - _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userA.KeyName(), transfer, opts) + _, err = chainA.SendIBCTransfer(ctx, channel.ChannelID, userC.KeyName(), transferCD, ibc.TransferOptions{}) require.NoError(t, err) require.NoError(t, testutil.WaitForBlocks(ctx, 5, chainA, chainB)) - userABal, err = chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) + userCBal, err = chainA.GetBalance(ctx, userC.FormattedAddress(), chainA.Config().Denom) require.NoError(t, err) - require.True(t, !userABal.Equal(initBal.Sub(transferAmount))) + require.True(t, !userCBal.Equal(initBal.Sub(transferAmount))) - userBBal, err = chainB.GetBalance(ctx, userB.FormattedAddress(), trace.IBCDenom()) + userDBal, err = chainB.GetBalance(ctx, userD.FormattedAddress(), trace.IBCDenom()) require.NoError(t, err) - require.True(t, userBBal.Equal(transferAmount)) + require.True(t, userDBal.Equal(transferAmount)) }