From 2bd5510d0c3bff29202068b95acba1609ab9700e Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 14 Oct 2025 12:30:47 -0400 Subject: [PATCH 01/48] feat(sensor): broadcast block, txs, and hashes --- cmd/p2p/sensor/api.go | 58 +++--- cmd/p2p/sensor/sensor.go | 82 ++++++-- doc/polycli_p2p_sensor.md | 87 ++++---- p2p/cache.go | 155 +++++++++++++++ p2p/conns.go | 373 ++++++++++++++++++++++++++++++++-- p2p/protocol.go | 407 ++++++++++++++++++++++++-------------- p2p/rlpx.go | 7 - 7 files changed, 926 insertions(+), 243 deletions(-) create mode 100644 p2p/cache.go diff --git a/cmd/p2p/sensor/api.go b/cmd/p2p/sensor/api.go index 2214b64ef..96bf785d0 100644 --- a/cmd/p2p/sensor/api.go +++ b/cmd/p2p/sensor/api.go @@ -5,6 +5,8 @@ import ( "fmt" "net/http" "slices" + "strings" + "time" "github.com/0xPolygon/polygon-cli/p2p" "github.com/ethereum/go-ethereum/eth/protocols/eth" @@ -20,10 +22,18 @@ type nodeInfo struct { URL string `json:"enode"` } +// peerInfo represents information about a connected peer. +type peerInfo struct { + MessagesReceived p2p.MessageCount `json:"messages_received"` + MessagesSent p2p.MessageCount `json:"messages_sent"` + ConnectedAt string `json:"connected_at"` + DurationSeconds int64 `json:"duration_seconds"` +} + // handleAPI sets up the API for interacting with the sensor. The `/peers` // endpoint returns a list of all peers connected to the sensor, including the -// types and counts of eth packets sent by each peer. -func handleAPI(server *ethp2p.Server, counter *prometheus.CounterVec) { +// types and counts of eth packets sent by and received from each peer. +func handleAPI(server *ethp2p.Server, msgsReceived, msgsSent *prometheus.CounterVec, conns *p2p.Conns) { http.HandleFunc("/peers", func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) @@ -33,10 +43,18 @@ func handleAPI(server *ethp2p.Server, counter *prometheus.CounterVec) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - peers := make(map[string]p2p.MessageCount) + peers := make(map[string]peerInfo) for _, peer := range server.Peers() { url := peer.Node().URLv4() - peers[url] = getPeerMessages(url, peer.Fullname(), counter) + nodeID := peer.Node().ID().String() + connectedAt := conns.GetPeerConnectedAt(nodeID) + + peers[url] = peerInfo{ + MessagesReceived: getPeerMessages(url, peer.Fullname(), msgsReceived), + MessagesSent: getPeerMessages(url, peer.Fullname(), msgsSent), + ConnectedAt: connectedAt.UTC().Format(time.RFC3339), + DurationSeconds: int64(time.Since(connectedAt).Seconds()), + } } if err := json.NewEncoder(w).Encode(peers); err != nil { @@ -105,30 +123,22 @@ func removePeerMessages(counter *prometheus.CounterVec, urls []string) error { return err } - var family *dto.MetricFamily - for _, f := range families { - if f.GetName() == "sensor_messages" { - family = f - break + // Find all matching metric families + for _, family := range families { + // Check for any sensor_messages metric (received, sent, etc.) + if !strings.Contains(family.GetName(), "sensor_messages") { + continue } - } - // During DNS-discovery or when the server is taking a while to discover - // peers and has yet to receive a message, the sensor_messages prometheus - // metric may not exist yet. - if family == nil { - log.Trace().Msg("Could not find sensor_messages metric family") - return nil - } + for _, metric := range family.GetMetric() { + for _, label := range metric.GetLabel() { + url := label.GetValue() + if label.GetName() != "url" || slices.Contains(urls, url) { + continue + } - for _, metric := range family.GetMetric() { - for _, label := range metric.GetLabel() { - url := label.GetValue() - if label.GetName() != "url" || slices.Contains(urls, url) { - continue + counter.DeletePartialMatch(prometheus.Labels{"url": url}) } - - counter.DeletePartialMatch(prometheus.Labels{"url": url}) } } diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index dd471f7f6..d263871fc 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -50,6 +50,17 @@ type ( ShouldWriteTransactions bool ShouldWriteTransactionEvents bool ShouldWritePeers bool + ShouldBroadcastTx bool + ShouldBroadcastTxHashes bool + ShouldBroadcastBlocks bool + ShouldBroadcastBlockHashes bool + MaxCachedTxs int + MaxCachedBlocks int + MaxKnownTxs int + MaxKnownBlocks int + MaxRequests int + CacheTTL time.Duration + PeerCacheTTL time.Duration ShouldRunPprof bool PprofPort uint ShouldRunPrometheus bool @@ -183,27 +194,50 @@ var SensorCmd = &cobra.Command{ Help: "The number of peers the sensor is connected to", }) - msgCounter := promauto.NewCounterVec(prometheus.CounterOpts{ + msgsReceived := promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "sensor", - Name: "messages", + Name: "messages_received", Help: "The number and type of messages the sensor has received", }, []string{"message", "url", "name"}) + msgsSent := promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "sensor", + Name: "messages_sent", + Help: "The number and type of messages the sensor has sent", + }, []string{"message", "url", "name"}) + // Create peer connection manager for broadcasting transactions - conns := p2p.NewConns() + conns := p2p.NewConns(p2p.ConnsOptions{ + MaxCachedTxs: inputSensorParams.MaxCachedTxs, + MaxCachedBlocks: inputSensorParams.MaxCachedBlocks, + CacheTTL: inputSensorParams.CacheTTL, + ShouldBroadcastTx: inputSensorParams.ShouldBroadcastTx, + ShouldBroadcastTxHashes: inputSensorParams.ShouldBroadcastTxHashes, + ShouldBroadcastBlocks: inputSensorParams.ShouldBroadcastBlocks, + ShouldBroadcastBlockHashes: inputSensorParams.ShouldBroadcastBlockHashes, + }) opts := p2p.EthProtocolOptions{ - Context: cmd.Context(), - Database: db, - GenesisHash: common.HexToHash(inputSensorParams.GenesisHash), - RPC: inputSensorParams.RPC, - SensorID: inputSensorParams.SensorID, - NetworkID: inputSensorParams.NetworkID, - Conns: conns, - Head: &head, - HeadMutex: &sync.RWMutex{}, - ForkID: forkid.ID{Hash: [4]byte(inputSensorParams.ForkID)}, - MsgCounter: msgCounter, + Context: cmd.Context(), + Database: db, + GenesisHash: common.HexToHash(inputSensorParams.GenesisHash), + RPC: inputSensorParams.RPC, + SensorID: inputSensorParams.SensorID, + NetworkID: inputSensorParams.NetworkID, + Conns: conns, + Head: &head, + HeadMutex: &sync.RWMutex{}, + ForkID: forkid.ID{Hash: [4]byte(inputSensorParams.ForkID)}, + MessagesReceived: msgsReceived, + MessagesSent: msgsSent, + ShouldBroadcastTx: inputSensorParams.ShouldBroadcastTx, + ShouldBroadcastTxHashes: inputSensorParams.ShouldBroadcastTxHashes, + ShouldBroadcastBlocks: inputSensorParams.ShouldBroadcastBlocks, + ShouldBroadcastBlockHashes: inputSensorParams.ShouldBroadcastBlockHashes, + MaxKnownTxs: inputSensorParams.MaxKnownTxs, + MaxKnownBlocks: inputSensorParams.MaxKnownBlocks, + MaxRequests: inputSensorParams.MaxRequests, + PeerCacheTTL: inputSensorParams.PeerCacheTTL, } config := ethp2p.Config{ @@ -258,7 +292,7 @@ var SensorCmd = &cobra.Command{ go handlePrometheus() } - go handleAPI(&server, msgCounter) + go handleAPI(&server, msgsReceived, msgsSent, conns) // Start the RPC server for receiving transactions go handleRPC(conns, inputSensorParams.NetworkID) @@ -277,8 +311,11 @@ var SensorCmd = &cobra.Command{ urls = append(urls, peer.Node().URLv4()) } - if err := removePeerMessages(msgCounter, urls); err != nil { - log.Error().Err(err).Msg("Failed to clean up peer messages") + if err := removePeerMessages(msgsReceived, urls); err != nil { + log.Error().Err(err).Msg("Failed to clean up received peer messages") + } + if err := removePeerMessages(msgsSent, urls); err != nil { + log.Error().Err(err).Msg("Failed to clean up sent peer messages") } if err := p2p.WritePeers(inputSensorParams.NodesFile, urls); err != nil { @@ -449,6 +486,17 @@ will result in less chance of missing data but can significantly increase memory f.BoolVar(&inputSensorParams.ShouldWriteTransactionEvents, "write-tx-events", true, `write transaction events to database (this option can significantly increase CPU and memory usage)`) f.BoolVar(&inputSensorParams.ShouldWritePeers, "write-peers", true, "write peers to database") + f.BoolVar(&inputSensorParams.ShouldBroadcastTx, "broadcast-tx", false, "broadcast full transactions to peers") + f.BoolVar(&inputSensorParams.ShouldBroadcastTxHashes, "broadcast-tx-hashes", false, "broadcast transaction hashes to peers") + f.BoolVar(&inputSensorParams.ShouldBroadcastBlocks, "broadcast-blocks", false, "broadcast full blocks to peers") + f.BoolVar(&inputSensorParams.ShouldBroadcastBlockHashes, "broadcast-block-hashes", false, "broadcast block hashes to peers") + f.IntVar(&inputSensorParams.MaxCachedTxs, "max-cached-txs", 2048, "maximum number of transactions to cache for serving to peers") + f.IntVar(&inputSensorParams.MaxCachedBlocks, "max-cached-blocks", 128, "maximum number of blocks to cache for serving to peers") + f.DurationVar(&inputSensorParams.CacheTTL, "cache-ttl", 10*time.Minute, "time to live for cached transactions and blocks") + f.IntVar(&inputSensorParams.MaxKnownTxs, "max-known-txs", 8192, "maximum transaction hashes to track per peer") + f.IntVar(&inputSensorParams.MaxKnownBlocks, "max-known-blocks", 1024, "maximum block hashes to track per peer") + f.IntVar(&inputSensorParams.MaxRequests, "max-requests", 2048, "maximum request IDs to track per peer") + f.DurationVar(&inputSensorParams.PeerCacheTTL, "peer-cache-ttl", 5*time.Minute, "time to live for per-peer caches (known tx/block hashes and requests)") f.BoolVar(&inputSensorParams.ShouldRunPprof, "pprof", false, "run pprof server") f.UintVar(&inputSensorParams.PprofPort, "pprof-port", 6060, "port pprof runs on") f.BoolVar(&inputSensorParams.ShouldRunPrometheus, "prom", true, "run Prometheus server") diff --git a/doc/polycli_p2p_sensor.md b/doc/polycli_p2p_sensor.md index 640e9e35a..8f3301eed 100644 --- a/doc/polycli_p2p_sensor.md +++ b/doc/polycli_p2p_sensor.md @@ -23,44 +23,55 @@ If no nodes.json file exists, it will be created. ## Flags ```bash - --api-port uint port API server will listen on (default 8080) - -b, --bootnodes string comma separated nodes used for bootstrapping - --database string which database to persist data to, options are: - - datastore (GCP Datastore) - - json (output to stdout) - - none (no persistence) (default "none") - -d, --database-id string datastore database ID - --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) - --discovery-dns string DNS discovery ENR tree URL - --discovery-port int UDP P2P discovery port (default 30303) - --fork-id bytesHex hex encoded fork ID (omit 0x) (default F097BC13) - --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") - -h, --help help for sensor - --key string hex-encoded private key (cannot be set with --key-file) - -k, --key-file string private key file (cannot be set with --key) - -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this - will result in less chance of missing data but can significantly increase memory usage) (default 10000) - -m, --max-peers int maximum number of peers to connect to (default 2000) - --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") - -n, --network-id uint filter discovered nodes by this network ID - --no-discovery disable P2P peer discovery - --port int TCP network listening port (default 30303) - --pprof run pprof server - --pprof-port uint port pprof runs on (default 6060) - -p, --project-id string GCP project ID - --prom run Prometheus server (default true) - --prom-port uint port Prometheus runs on (default 2112) - --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") - --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) - -s, --sensor-id string sensor ID when writing block/tx events - --static-nodes string static nodes file - --trusted-nodes string trusted nodes file - --ttl duration time to live (default 336h0m0s) - --write-block-events write block events to database (default true) - -B, --write-blocks write blocks to database (default true) - --write-peers write peers to database (default true) - --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) - -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) + --api-port uint port API server will listen on (default 8080) + -b, --bootnodes string comma separated nodes used for bootstrapping + --broadcast-block-hashes broadcast block hashes to peers + --broadcast-blocks broadcast full blocks to peers + --broadcast-tx broadcast full transactions to peers + --broadcast-tx-hashes broadcast transaction hashes to peers + --cache-ttl duration time to live for cached transactions and blocks (default 10m0s) + --database string which database to persist data to, options are: + - datastore (GCP Datastore) + - json (output to stdout) + - none (no persistence) (default "none") + -d, --database-id string datastore database ID + --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) + --discovery-dns string DNS discovery ENR tree URL + --discovery-port int UDP P2P discovery port (default 30303) + --fork-id bytesHex hex encoded fork ID (omit 0x) (default F097BC13) + --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") + -h, --help help for sensor + --key string hex-encoded private key (cannot be set with --key-file) + -k, --key-file string private key file (cannot be set with --key) + --max-cached-blocks int maximum number of blocks to cache for serving to peers (default 128) + --max-cached-txs int maximum number of transactions to cache for serving to peers (default 2048) + -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this + will result in less chance of missing data but can significantly increase memory usage) (default 10000) + --max-known-blocks int maximum block hashes to track per peer (default 1024) + --max-known-txs int maximum transaction hashes to track per peer (default 8192) + -m, --max-peers int maximum number of peers to connect to (default 2000) + --max-requests int maximum request IDs to track per peer (default 2048) + --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") + -n, --network-id uint filter discovered nodes by this network ID + --no-discovery disable P2P peer discovery + --peer-cache-ttl duration time to live for per-peer caches (known tx/block hashes and requests) (default 5m0s) + --port int TCP network listening port (default 30303) + --pprof run pprof server + --pprof-port uint port pprof runs on (default 6060) + -p, --project-id string GCP project ID + --prom run Prometheus server (default true) + --prom-port uint port Prometheus runs on (default 2112) + --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") + --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) + -s, --sensor-id string sensor ID when writing block/tx events + --static-nodes string static nodes file + --trusted-nodes string trusted nodes file + --ttl duration time to live (default 336h0m0s) + --write-block-events write block events to database (default true) + -B, --write-blocks write blocks to database (default true) + --write-peers write peers to database (default true) + --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) + -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) ``` The command also inherits flags from parent commands. diff --git a/p2p/cache.go b/p2p/cache.go new file mode 100644 index 000000000..97f8ca967 --- /dev/null +++ b/p2p/cache.go @@ -0,0 +1,155 @@ +package p2p + +import ( + "container/list" + "sync" + "time" +) + +// Cache is a thread-safe LRU cache with optional TTL-based expiration. +type Cache[K comparable, V any] struct { + mu sync.RWMutex + maxSize int + ttl time.Duration + items map[K]*list.Element + list *list.List +} + +type entry[K comparable, V any] struct { + key K + value V + expiresAt time.Time +} + +// NewCache creates a new cache with the given max size and optional TTL. +// If maxSize <= 0, the cache has no size limit. +// If ttl is 0, entries never expire based on time. +func NewCache[K comparable, V any](maxSize int, ttl time.Duration) *Cache[K, V] { + return &Cache[K, V]{ + maxSize: maxSize, + ttl: ttl, + items: make(map[K]*list.Element), + list: list.New(), + } +} + +// Add adds or updates a value in the cache. +func (c *Cache[K, V]) Add(key K, value V) { + c.mu.Lock() + defer c.mu.Unlock() + + now := time.Now() + expiresAt := time.Time{} + if c.ttl > 0 { + expiresAt = now.Add(c.ttl) + } + + if elem, ok := c.items[key]; ok { + c.list.MoveToFront(elem) + e := elem.Value.(*entry[K, V]) + e.value = value + e.expiresAt = expiresAt + return + } + + e := &entry[K, V]{ + key: key, + value: value, + expiresAt: expiresAt, + } + elem := c.list.PushFront(e) + c.items[key] = elem + + if c.maxSize > 0 && c.list.Len() > c.maxSize { + back := c.list.Back() + if back != nil { + c.list.Remove(back) + e := back.Value.(*entry[K, V]) + delete(c.items, e.key) + } + } +} + +// Get retrieves a value from the cache and updates LRU ordering. +func (c *Cache[K, V]) Get(key K) (V, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + elem, ok := c.items[key] + if !ok { + var zero V + return zero, false + } + + e := elem.Value.(*entry[K, V]) + + if c.ttl > 0 && time.Now().After(e.expiresAt) { + c.list.Remove(elem) + delete(c.items, key) + var zero V + return zero, false + } + + c.list.MoveToFront(elem) + return e.value, true +} + +// Contains checks if a key exists in the cache and is not expired. +// Uses a read lock and doesn't update LRU ordering. +func (c *Cache[K, V]) Contains(key K) bool { + c.mu.RLock() + defer c.mu.RUnlock() + + elem, ok := c.items[key] + if !ok { + return false + } + + e := elem.Value.(*entry[K, V]) + + if c.ttl > 0 && time.Now().After(e.expiresAt) { + return false + } + + return true +} + +// Remove removes a key from the cache. +func (c *Cache[K, V]) Remove(key K) { + c.mu.Lock() + defer c.mu.Unlock() + + if elem, ok := c.items[key]; ok { + c.list.Remove(elem) + delete(c.items, key) + } +} + +// Len returns the number of items in the cache. +func (c *Cache[K, V]) Len() int { + c.mu.RLock() + defer c.mu.RUnlock() + return c.list.Len() +} + +// Purge clears all items from the cache. +func (c *Cache[K, V]) Purge() { + c.mu.Lock() + defer c.mu.Unlock() + + c.items = make(map[K]*list.Element) + c.list.Init() +} + +// Keys returns all keys in the cache. +func (c *Cache[K, V]) Keys() []K { + c.mu.RLock() + defer c.mu.RUnlock() + + keys := make([]K, 0, c.list.Len()) + for elem := c.list.Front(); elem != nil; elem = elem.Next() { + e := elem.Value.(*entry[K, V]) + keys = append(keys, e.key) + } + return keys +} diff --git a/p2p/conns.go b/p2p/conns.go index ac7074794..b39ac8db5 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -1,52 +1,93 @@ package p2p import ( + "math/big" "sync" + "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/protocols/eth" ethp2p "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/rs/zerolog/log" ) +// ConnsOptions contains configuration options for the connection manager. +type ConnsOptions struct { + MaxCachedTxs int + MaxCachedBlocks int + CacheTTL time.Duration + ShouldBroadcastTx bool + ShouldBroadcastTxHashes bool + ShouldBroadcastBlocks bool + ShouldBroadcastBlockHashes bool +} + // Conns manages a collection of active peer connections for transaction broadcasting. type Conns struct { conns map[string]*conn mu sync.RWMutex + + // Shared caches for serving broadcast data to peers + txs *Cache[common.Hash, *types.Transaction] + blocks *Cache[common.Hash, *types.Block] + + // Broadcast flags control what gets cached and rebroadcasted + shouldBroadcastTx bool + shouldBroadcastTxHashes bool + shouldBroadcastBlocks bool + shouldBroadcastBlockHashes bool } -// NewConns creates a new connection manager. -func NewConns() *Conns { +// NewConns creates a new connection manager with the specified options. +func NewConns(opts ConnsOptions) *Conns { + // Create caches with configured TTL for data freshness + txCache := NewCache[common.Hash, *types.Transaction](opts.MaxCachedTxs, opts.CacheTTL) + blockCache := NewCache[common.Hash, *types.Block](opts.MaxCachedBlocks, opts.CacheTTL) + return &Conns{ - conns: make(map[string]*conn), + conns: make(map[string]*conn), + txs: txCache, + blocks: blockCache, + shouldBroadcastTx: opts.ShouldBroadcastTx, + shouldBroadcastTxHashes: opts.ShouldBroadcastTxHashes, + shouldBroadcastBlocks: opts.ShouldBroadcastBlocks, + shouldBroadcastBlockHashes: opts.ShouldBroadcastBlockHashes, } } -// Add adds a connection to the manager. -func (c *Conns) Add(cn *conn) { +// AddConn adds a connection to the manager. +func (c *Conns) AddConn(cn *conn) { c.mu.Lock() defer c.mu.Unlock() c.conns[cn.node.ID().String()] = cn cn.logger.Debug().Msg("Added connection") } -// Remove removes a connection from the manager when a peer disconnects. -func (c *Conns) Remove(cn *conn) { +// RemoveConn removes a connection from the manager when a peer disconnects. +func (c *Conns) RemoveConn(cn *conn) { c.mu.Lock() defer c.mu.Unlock() delete(c.conns, cn.node.ID().String()) cn.logger.Debug().Msg("Removed connection") } -// BroadcastTx broadcasts a single transaction to all connected peers. -// Returns the number of peers the transaction was successfully sent to. +// BroadcastTx broadcasts a single transaction to all connected peers and +// returns the number of peers the transaction was successfully sent to. func (c *Conns) BroadcastTx(tx *types.Transaction) int { return c.BroadcastTxs(types.Transactions{tx}) } -// BroadcastTxs broadcasts multiple transactions to all connected peers. -// Returns the number of peers the transactions were successfully sent to. +// BroadcastTxs broadcasts multiple transactions to all connected peers, +// filtering out transactions that each peer already knows about, and returns +// the number of peers the transactions were successfully sent to. +// If broadcast flags are disabled, this is a no-op. func (c *Conns) BroadcastTxs(txs types.Transactions) int { + if !c.shouldBroadcastTx { + return 0 + } + c.mu.RLock() defer c.mu.RUnlock() @@ -56,12 +97,225 @@ func (c *Conns) BroadcastTxs(txs types.Transactions) int { count := 0 for _, cn := range c.conns { - if err := ethp2p.Send(cn.rw, eth.TransactionsMsg, txs); err != nil { + // Filter transactions this peer doesn't know about + unknownTxs := make(types.Transactions, 0, len(txs)) + for _, tx := range txs { + if !cn.hasKnownTx(tx.Hash()) { + unknownTxs = append(unknownTxs, tx) + } + } + + if len(unknownTxs) == 0 { + continue + } + + // Send as TransactionsPacket + packet := eth.TransactionsPacket(unknownTxs) + cn.AddCountSent(packet.Name(), 1) + if err := ethp2p.Send(cn.rw, eth.TransactionsMsg, packet); err != nil { + cn.logger.Debug(). + Err(err). + Msg("Failed to send transactions") + continue + } + + // Mark transactions as known for this peer + for _, tx := range unknownTxs { + cn.addKnownTx(tx.Hash()) + } + + count++ + } + + if count > 0 { + log.Debug(). + Int("peers", count). + Int("txs", len(txs)). + Msg("Broadcasted transactions") + } + + return count +} + +// BroadcastTxHashes broadcasts transaction hashes to peers that don't already +// know about them and returns the number of peers the hashes were successfully +// sent to. If broadcast flags are disabled, this is a no-op. +func (c *Conns) BroadcastTxHashes(hashes []common.Hash) int { + if !c.shouldBroadcastTxHashes { + return 0 + } + + c.mu.RLock() + defer c.mu.RUnlock() + + if len(hashes) == 0 { + return 0 + } + + count := 0 + for _, cn := range c.conns { + // Filter hashes this peer doesn't know about + unknownHashes := make([]common.Hash, 0, len(hashes)) + for _, hash := range hashes { + if !cn.hasKnownTx(hash) { + unknownHashes = append(unknownHashes, hash) + } + } + + if len(unknownHashes) == 0 { + continue + } + + // Send NewPooledTransactionHashesPacket + packet := eth.NewPooledTransactionHashesPacket{ + Types: make([]byte, len(unknownHashes)), + Sizes: make([]uint32, len(unknownHashes)), + Hashes: unknownHashes, + } + + cn.AddCountSent(packet.Name(), 1) + if err := ethp2p.Send(cn.rw, eth.NewPooledTransactionHashesMsg, packet); err != nil { + cn.logger.Debug(). + Err(err). + Msg("Failed to send transaction hashes") + continue + } + + // Mark hashes as known for this peer + for _, hash := range unknownHashes { + cn.addKnownTx(hash) + } + + count++ + } + + if count > 0 { + log.Debug(). + Int("peers", count). + Int("hashes", len(hashes)). + Msg("Broadcasted transaction hashes") + } + + return count +} + +// BroadcastBlock broadcasts a full block to peers that don't already know +// about it and returns the number of peers the block was successfully sent to. +// If broadcast flags are disabled, this is a no-op. +func (c *Conns) BroadcastBlock(block *types.Block, td *big.Int) int { + if !c.shouldBroadcastBlocks { + return 0 + } + + c.mu.RLock() + defer c.mu.RUnlock() + + if block == nil { + return 0 + } + + hash := block.Hash() + count := 0 + + for _, cn := range c.conns { + // Skip if peer already knows about this block + if cn.hasKnownBlock(hash) { + continue + } + + // Send NewBlockPacket + packet := eth.NewBlockPacket{ + Block: block, + TD: td, + } + + cn.AddCountSent(packet.Name(), 1) + if err := ethp2p.Send(cn.rw, eth.NewBlockMsg, &packet); err != nil { + cn.logger.Debug(). + Err(err). + Uint64("number", block.Number().Uint64()). + Msg("Failed to send block") + continue + } + + // Mark block as known for this peer + cn.addKnownBlock(hash) + count++ + } + + if count > 0 { + log.Debug(). + Int("peers", count). + Uint64("number", block.NumberU64()). + Msg("Broadcasted block") + } + + return count +} + +// BroadcastBlockHashes broadcasts block hashes with their corresponding block +// numbers to peers that don't already know about them and returns the number +// of peers the hashes were successfully sent to. If broadcast flags are disabled, this is a no-op. +func (c *Conns) BroadcastBlockHashes(hashes []common.Hash, numbers []uint64) int { + if !c.shouldBroadcastBlockHashes { + return 0 + } + + c.mu.RLock() + defer c.mu.RUnlock() + + if len(hashes) == 0 || len(hashes) != len(numbers) { + return 0 + } + + count := 0 + + for _, cn := range c.conns { + // Filter hashes this peer doesn't know about + unknownHashes := make([]common.Hash, 0, len(hashes)) + unknownNumbers := make([]uint64, 0, len(numbers)) + + for i, hash := range hashes { + if !cn.hasKnownBlock(hash) { + unknownHashes = append(unknownHashes, hash) + unknownNumbers = append(unknownNumbers, numbers[i]) + } + } + + if len(unknownHashes) == 0 { continue } + + // Send NewBlockHashesPacket + packet := make(eth.NewBlockHashesPacket, len(unknownHashes)) + for i := range unknownHashes { + packet[i].Hash = unknownHashes[i] + packet[i].Number = unknownNumbers[i] + } + + cn.AddCountSent(packet.Name(), 1) + if err := ethp2p.Send(cn.rw, eth.NewBlockHashesMsg, packet); err != nil { + cn.logger.Debug(). + Err(err). + Msg("Failed to send block hashes") + continue + } + + // Mark hashes as known for this peer + for _, hash := range unknownHashes { + cn.addKnownBlock(hash) + } + count++ } + if count > 0 { + log.Debug(). + Int("peers", count). + Int("hashes", len(hashes)). + Msg("Broadcasted block hashes") + } + return count } @@ -77,3 +331,98 @@ func (c *Conns) Nodes() []*enode.Node { return nodes } + +// AddTx adds a transaction to the shared cache for duplicate detection and serving. +func (c *Conns) AddTx(hash common.Hash, tx *types.Transaction) { + c.txs.Add(hash, tx) +} + +// AddBlock adds a block to the shared cache for duplicate detection and serving. +func (c *Conns) AddBlock(hash common.Hash, block *types.Block) { + c.blocks.Add(hash, block) +} + +// AddBlockHeader adds a block header to the cache. If a block already exists with a real header, does nothing. +// If a block exists with an empty header (body received first), replaces it with the real header. +// Otherwise creates a new block with just the header. +func (c *Conns) AddBlockHeader(header *types.Header) { + hash := header.Hash() + + // Check if block already exists in cache + block, ok := c.blocks.Get(hash) + if !ok { + // No block exists, create new one with header only + c.AddBlock(hash, types.NewBlockWithHeader(header)) + return + } + + // Check if existing block has a real header already + if block.Number() != nil && block.Number().Uint64() > 0 { + // Block already has a real header, don't overwrite + return + } + + // Block has empty header (body came first), replace with real header + keep body + b := types.NewBlockWithHeader(header).WithBody(types.Body{ + Transactions: block.Transactions(), + Uncles: block.Uncles(), + Withdrawals: block.Withdrawals(), + }) + c.AddBlock(hash, b) +} + +// AddBlockBody adds a body to an existing block in the cache. If no block exists for this hash, +// creates a block with an empty header and the body. If a block exists with only a header, updates it with the body. +func (c *Conns) AddBlockBody(hash common.Hash, body *eth.BlockBody) { + // Get existing block from cache + block, ok := c.blocks.Get(hash) + if !ok { + // No header yet, create block with empty header and body + blockWithBody := types.NewBlockWithHeader(&types.Header{}).WithBody(types.Body(*body)) + c.AddBlock(hash, blockWithBody) + return + } + + // Check if block already has a body + if len(block.Transactions()) > 0 || len(block.Uncles()) > 0 || len(block.Withdrawals()) > 0 { + // Block already has a body, no need to update + return + } + + // Reconstruct full block with existing header and body + c.AddBlock(hash, block.WithBody(types.Body(*body))) +} + +// GetTx retrieves a transaction from the shared cache. +func (c *Conns) GetTx(hash common.Hash) (*types.Transaction, bool) { + return c.txs.Get(hash) +} + +// GetBlock retrieves a block from the shared cache. +func (c *Conns) GetBlock(hash common.Hash) (*types.Block, bool) { + return c.blocks.Get(hash) +} + +// HasBlockHeader checks if we have at least a header for a block in the cache. +// Returns true if we have a block with a real header (number > 0). +func (c *Conns) HasBlockHeader(hash common.Hash) bool { + block, ok := c.blocks.Get(hash) + if !ok { + return false + } + + // Check if block has a real header (not empty) + return block.Number() != nil && block.Number().Uint64() > 0 +} + +// GetPeerConnectedAt returns the time when a peer connected, or zero time if not found. +func (c *Conns) GetPeerConnectedAt(url string) time.Time { + c.mu.RLock() + defer c.mu.RUnlock() + + if cn, ok := c.conns[url]; ok { + return cn.connectedAt + } + + return time.Time{} +} diff --git a/p2p/protocol.go b/p2p/protocol.go index fdbcca582..668735d5c 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -1,7 +1,6 @@ package p2p import ( - "container/list" "context" "encoding/hex" "errors" @@ -25,46 +24,71 @@ import ( // conn represents an individual connection with a peer. type conn struct { - sensorID string - node *enode.Node - logger zerolog.Logger - rw ethp2p.MsgReadWriter - db database.Database - head *HeadBlock - headMutex *sync.RWMutex - counter *prometheus.CounterVec - peer *ethp2p.Peer + sensorID string + node *enode.Node + logger zerolog.Logger + rw ethp2p.MsgReadWriter + db database.Database + conns *Conns + head *HeadBlock + headMutex *sync.RWMutex + msgsReceived *prometheus.CounterVec + msgsSent *prometheus.CounterVec + peer *ethp2p.Peer // requests is used to store the request ID and the block hash. This is used // when fetching block bodies because the eth protocol block bodies do not // contain information about the block hash. - requests *list.List + requests *Cache[uint64, common.Hash] requestNum uint64 - // Linked list of seen block hashes with timestamps. - blockHashes *list.List - // oldestBlock stores the first block the sensor has seen so when fetching // parent blocks, it does not request blocks older than this. oldestBlock *types.Header + + // Broadcast flags control what gets rebroadcasted to other peers + shouldBroadcastTx bool + shouldBroadcastTxHashes bool + shouldBroadcastBlocks bool + shouldBroadcastBlockHashes bool + + // Known caches track what this peer has seen to avoid redundant sends. + knownTxs *Cache[common.Hash, struct{}] + knownBlocks *Cache[common.Hash, struct{}] + + // connectedAt stores when this connection was established + connectedAt time.Time } // EthProtocolOptions is the options used when creating a new eth protocol. type EthProtocolOptions struct { - Context context.Context - Database database.Database - GenesisHash common.Hash - RPC string - SensorID string - NetworkID uint64 - Conns *Conns - ForkID forkid.ID - MsgCounter *prometheus.CounterVec + Context context.Context + Database database.Database + GenesisHash common.Hash + RPC string + SensorID string + NetworkID uint64 + Conns *Conns + ForkID forkid.ID + MessagesReceived *prometheus.CounterVec + MessagesSent *prometheus.CounterVec // Head keeps track of the current head block of the chain. This is required // when doing the status exchange. Head *HeadBlock HeadMutex *sync.RWMutex + + // Broadcast flags control what gets rebroadcasted to other peers + ShouldBroadcastTx bool + ShouldBroadcastTxHashes bool + ShouldBroadcastBlocks bool + ShouldBroadcastBlockHashes bool + + // Cache sizes for known tx/block tracking per peer + MaxKnownTxs int + MaxKnownBlocks int + MaxRequests int + PeerCacheTTL time.Duration } // HeadBlock contains the necessary head block data for the status message. @@ -75,14 +99,6 @@ type HeadBlock struct { Time uint64 } -type BlockHashEntry struct { - hash common.Hash - time time.Time -} - -// blockHashTTL defines the time-to-live for block hash entries in blockHashes list. -var blockHashTTL = 10 * time.Minute - // NewEthProtocol creates the new eth protocol. This will handle writing the // status exchange, message handling, and writing blocks/txs to the database. func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { @@ -92,20 +108,29 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { Length: 17, Run: func(p *ethp2p.Peer, rw ethp2p.MsgReadWriter) error { c := &conn{ - sensorID: opts.SensorID, - node: p.Node(), - logger: log.With().Str("peer", p.Node().URLv4()).Logger(), - rw: rw, - db: opts.Database, - requests: list.New(), - requestNum: 0, - head: opts.Head, - headMutex: opts.HeadMutex, - counter: opts.MsgCounter, - peer: p, - blockHashes: list.New(), + sensorID: opts.SensorID, + node: p.Node(), + logger: log.With().Str("peer", p.Node().URLv4()).Logger(), + rw: rw, + db: opts.Database, + conns: opts.Conns, + requestNum: 0, + head: opts.Head, + headMutex: opts.HeadMutex, + msgsReceived: opts.MessagesReceived, + msgsSent: opts.MessagesSent, + peer: p, + shouldBroadcastTx: opts.ShouldBroadcastTx, + shouldBroadcastTxHashes: opts.ShouldBroadcastTxHashes, + shouldBroadcastBlocks: opts.ShouldBroadcastBlocks, + shouldBroadcastBlockHashes: opts.ShouldBroadcastBlockHashes, + connectedAt: time.Now(), } + c.knownTxs = NewCache[common.Hash, struct{}](opts.MaxKnownTxs, opts.PeerCacheTTL) + c.knownBlocks = NewCache[common.Hash, struct{}](opts.MaxKnownBlocks, opts.PeerCacheTTL) + c.requests = NewCache[uint64, common.Hash](opts.MaxRequests, opts.PeerCacheTTL) + c.headMutex.RLock() status := eth.StatusPacket{ ProtocolVersion: uint32(version), @@ -122,8 +147,8 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { } // Send the connection object to the conns manager for RPC broadcasting - opts.Conns.Add(c) - defer opts.Conns.Remove(c) + opts.Conns.AddConn(c) + defer opts.Conns.RemoveConn(c) ctx := opts.Context @@ -207,9 +232,14 @@ func (c *conn) statusExchange(packet *eth.StatusPacket) error { return nil } -// AddCount increments the prometheus counter for this connection with the given message name and count. -func (c *conn) AddCount(messageName string, count float64) { - c.counter.WithLabelValues(messageName, c.node.URLv4(), c.peer.Fullname()).Add(count) +// AddCountReceived increments the prometheus counter for received messages. +func (c *conn) AddCountReceived(messageName string, count float64) { + c.msgsReceived.WithLabelValues(messageName, c.node.URLv4(), c.peer.Fullname()).Add(count) +} + +// AddCountSent increments the prometheus counter for sent messages. +func (c *conn) AddCountSent(messageName string, count float64) { + c.msgsSent.WithLabelValues(messageName, c.node.URLv4(), c.peer.Fullname()).Add(count) } func (c *conn) readStatus(packet *eth.StatusPacket) error { @@ -218,6 +248,12 @@ func (c *conn) readStatus(packet *eth.StatusPacket) error { return err } + defer func() { + if msgErr := msg.Discard(); msgErr != nil { + c.logger.Error().Err(msgErr).Msg("Failed to discard message") + } + }() + if msg.Code != eth.StatusMsg { return errors.New("expected status message code") } @@ -260,35 +296,25 @@ func (c *conn) getBlockData(hash common.Hash) error { }, } + c.AddCountSent(headersRequest.Name(), 1) if err := ethp2p.Send(c.rw, eth.GetBlockHeadersMsg, headersRequest); err != nil { return err } - for e := c.requests.Front(); e != nil; e = e.Next() { - r := e.Value.(request) - - if time.Since(r.time).Minutes() > 10 { - c.requests.Remove(e) - } - } - c.requestNum++ - c.requests.PushBack(request{ - requestID: c.requestNum, - hash: hash, - time: time.Now(), - }) + c.requests.Add(c.requestNum, hash) bodiesRequest := &GetBlockBodies{ RequestId: c.requestNum, GetBlockBodiesRequest: []common.Hash{hash}, } + c.AddCountSent(bodiesRequest.Name(), 1) return ethp2p.Send(c.rw, eth.GetBlockBodiesMsg, bodiesRequest) } // getParentBlock will send a request to the peer if the parent of the header -// does not exist in the database. +// does not exist in the cache or database. func (c *conn) getParentBlock(ctx context.Context, header *types.Header) error { if !c.db.ShouldWriteBlocks() || !c.db.ShouldWriteBlockEvents() { return nil @@ -300,7 +326,9 @@ func (c *conn) getParentBlock(ctx context.Context, header *types.Header) error { return nil } - if c.db.HasBlock(ctx, header.ParentHash) || header.Number.Cmp(c.oldestBlock.Number) != 1 { + if c.conns.HasBlockHeader(header.ParentHash) || + c.db.HasBlock(ctx, header.ParentHash) || + header.Number.Cmp(c.oldestBlock.Number) != 1 { return nil } @@ -320,65 +348,77 @@ func (c *conn) handleNewBlockHashes(ctx context.Context, msg ethp2p.Msg) error { tfs := time.Now() - c.AddCount(packet.Name(), float64(len(packet))) + c.AddCountReceived(packet.Name(), float64(len(packet))) - // Collect unique hashes for database write. + // Collect unique hashes and numbers for database write and broadcasting. uniqueHashes := make([]common.Hash, 0, len(packet)) + uniqueNumbers := make([]uint64, 0, len(packet)) for _, entry := range packet { - hash := entry.Hash - - // Check if we've seen the hash and remove old entries - if c.hasSeenBlockHash(hash) { + // Check if we've already seen this block (in cache or database) + if _, ok := c.conns.GetBlock(entry.Hash); ok || c.db.HasBlock(ctx, entry.Hash) { continue } + // Mark as known from this peer + c.addKnownBlock(entry.Hash) + // Attempt to fetch block data first - if err := c.getBlockData(hash); err != nil { + if err := c.getBlockData(entry.Hash); err != nil { return err } - // Now that we've successfully fetched, record the new block hash - c.addBlockHash(hash) - uniqueHashes = append(uniqueHashes, hash) + uniqueHashes = append(uniqueHashes, entry.Hash) + uniqueNumbers = append(uniqueNumbers, entry.Number) } // Write only unique hashes to the database. - if len(uniqueHashes) > 0 { - c.db.WriteBlockHashes(ctx, c.node, uniqueHashes, tfs) + if len(uniqueHashes) == 0 { + return nil } + c.db.WriteBlockHashes(ctx, c.node, uniqueHashes, tfs) + + // Broadcast block hashes to other peers + c.conns.BroadcastBlockHashes(uniqueHashes, uniqueNumbers) + return nil } -// addBlockHash adds a new block hash with a timestamp to the blockHashes list. -func (c *conn) addBlockHash(hash common.Hash) { - now := time.Now() +// addKnownTx adds a transaction hash to the known tx cache. +func (c *conn) addKnownTx(hash common.Hash) { + if !c.shouldBroadcastTx && !c.shouldBroadcastTxHashes { + return + } - // Add the new block hash entry to the list. - c.blockHashes.PushBack(BlockHashEntry{ - hash: hash, - time: now, - }) + c.knownTxs.Add(hash, struct{}{}) } -// Helper method to check if a block hash is already in blockHashes. -func (c *conn) hasSeenBlockHash(hash common.Hash) bool { - now := time.Now() - for e := c.blockHashes.Front(); e != nil; e = e.Next() { - entry := e.Value.(BlockHashEntry) - // Check if the hash matches. We can short circuit here because there will - // be block hashes that we haven't seen before, which will make a full - // iteration of the blockHashes linked list. - if entry.hash.Cmp(hash) == 0 { - return true - } - // Remove entries older than blockHashTTL. - if now.Sub(entry.time) > blockHashTTL { - c.blockHashes.Remove(e) - } +// addKnownBlock adds a block hash to the known block cache. +func (c *conn) addKnownBlock(hash common.Hash) { + if !c.shouldBroadcastBlocks && !c.shouldBroadcastBlockHashes { + return } - return false + + c.knownBlocks.Add(hash, struct{}{}) +} + +// hasKnownTx checks if a transaction hash is in the known tx cache. +func (c *conn) hasKnownTx(hash common.Hash) bool { + if !c.shouldBroadcastTx && !c.shouldBroadcastTxHashes { + return false + } + + return c.knownTxs.Contains(hash) +} + +// hasKnownBlock checks if a block hash is in the known block cache. +func (c *conn) hasKnownBlock(hash common.Hash) bool { + if !c.shouldBroadcastBlocks && !c.shouldBroadcastBlockHashes { + return false + } + + return c.knownBlocks.Contains(hash) } func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { @@ -389,10 +429,29 @@ func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { tfs := time.Now() - c.AddCount(txs.Name(), float64(len(txs))) + c.AddCountReceived(txs.Name(), float64(len(txs))) + + // Mark transactions as known from this peer + for _, tx := range txs { + c.addKnownTx(tx.Hash()) + } c.db.WriteTransactions(ctx, c.node, txs, tfs) + // Cache transactions for duplicate detection and serving to peers + for _, tx := range txs { + c.conns.AddTx(tx.Hash(), tx) + } + + hashes := make([]common.Hash, len(txs)) + for i, tx := range txs { + hashes[i] = tx.Hash() + } + + // Broadcast transactions or hashes to other peers + c.conns.BroadcastTxs(types.Transactions(txs)) + c.conns.BroadcastTxHashes(hashes) + return nil } @@ -402,13 +461,20 @@ func (c *conn) handleGetBlockHeaders(msg ethp2p.Msg) error { return err } - c.AddCount(request.Name(), 1) + c.AddCountReceived(request.Name(), 1) - return ethp2p.Send( - c.rw, - eth.BlockHeadersMsg, - ð.BlockHeadersPacket{RequestId: request.RequestId}, - ) + // Try to serve from cache if we have the block + var headers []*types.Header + if block, ok := c.conns.GetBlock(request.Origin.Hash); ok { + headers = []*types.Header{block.Header()} + } + + packet := ð.BlockHeadersPacket{ + RequestId: request.RequestId, + BlockHeadersRequest: headers, + } + c.AddCountSent(packet.Name(), 1) + return ethp2p.Send(c.rw, eth.BlockHeadersMsg, packet) } func (c *conn) handleBlockHeaders(ctx context.Context, msg ethp2p.Msg) error { @@ -420,7 +486,7 @@ func (c *conn) handleBlockHeaders(ctx context.Context, msg ethp2p.Msg) error { tfs := time.Now() headers := packet.BlockHeadersRequest - c.AddCount(packet.Name(), float64(len(headers))) + c.AddCountReceived(packet.Name(), float64(len(headers))) for _, header := range headers { if err := c.getParentBlock(ctx, header); err != nil { @@ -429,6 +495,11 @@ func (c *conn) handleBlockHeaders(ctx context.Context, msg ethp2p.Msg) error { } c.db.WriteBlockHeaders(ctx, headers, tfs) + + for _, header := range headers { + c.conns.AddBlockHeader(header) + } + return nil } @@ -438,13 +509,26 @@ func (c *conn) handleGetBlockBodies(msg ethp2p.Msg) error { return err } - c.AddCount(request.Name(), float64(len(request.GetBlockBodiesRequest))) + c.AddCountReceived(request.Name(), float64(len(request.GetBlockBodiesRequest))) - return ethp2p.Send( - c.rw, - eth.BlockBodiesMsg, - ð.BlockBodiesPacket{RequestId: request.RequestId}, - ) + // Try to serve from cache + var bodies []*eth.BlockBody + for _, hash := range request.GetBlockBodiesRequest { + if block, ok := c.conns.GetBlock(hash); ok { + bodies = append(bodies, ð.BlockBody{ + Transactions: block.Transactions(), + Uncles: block.Uncles(), + Withdrawals: block.Withdrawals(), + }) + } + } + + packet := ð.BlockBodiesPacket{ + RequestId: request.RequestId, + BlockBodiesResponse: bodies, + } + c.AddCountSent(packet.Name(), 1) + return ethp2p.Send(c.rw, eth.BlockBodiesMsg, packet) } func (c *conn) handleBlockBodies(ctx context.Context, msg ethp2p.Msg) error { @@ -459,25 +543,19 @@ func (c *conn) handleBlockBodies(ctx context.Context, msg ethp2p.Msg) error { return nil } - c.AddCount(packet.Name(), float64(len(packet.BlockBodiesResponse))) - - var hash *common.Hash - for e := c.requests.Front(); e != nil; e = e.Next() { - r := e.Value.(request) - - if r.requestID == packet.RequestId { - hash = &r.hash - c.requests.Remove(e) - break - } - } + c.AddCountReceived(packet.Name(), float64(len(packet.BlockBodiesResponse))) - if hash == nil { + hash, ok := c.requests.Get(packet.RequestId) + if !ok { c.logger.Warn().Msg("No block hash found for block body") return nil } + c.requests.Remove(packet.RequestId) - c.db.WriteBlockBody(ctx, packet.BlockBodiesResponse[0], *hash, tfs) + c.db.WriteBlockBody(ctx, packet.BlockBodiesResponse[0], hash, tfs) + + // Add body to cache - will merge with header if it exists + c.conns.AddBlockBody(hash, packet.BlockBodiesResponse[0]) return nil } @@ -490,7 +568,7 @@ func (c *conn) handleNewBlock(ctx context.Context, msg ethp2p.Msg) error { tfs := time.Now() - c.AddCount(block.Name(), 1) + c.AddCountReceived(block.Name(), 1) // Set the head block if newer. c.headMutex.Lock() @@ -511,6 +589,19 @@ func (c *conn) handleNewBlock(ctx context.Context, msg ethp2p.Msg) error { c.db.WriteBlock(ctx, c.node, block.Block, block.TD, tfs) + // Mark block as known from this peer + c.addKnownBlock(block.Block.Hash()) + + // Cache block for duplicate detection and serving to peers + c.conns.AddBlock(block.Block.Hash(), block.Block) + + // Broadcast block or block hash to other peers + c.conns.BroadcastBlock(block.Block, block.TD) + c.conns.BroadcastBlockHashes( + []common.Hash{block.Block.Hash()}, + []uint64{block.Block.Number().Uint64()}, + ) + return nil } @@ -520,12 +611,22 @@ func (c *conn) handleGetPooledTransactions(msg ethp2p.Msg) error { return err } - c.AddCount(request.Name(), float64(len(request.GetPooledTransactionsRequest))) + c.AddCountReceived(request.Name(), float64(len(request.GetPooledTransactionsRequest))) - return ethp2p.Send( - c.rw, - eth.PooledTransactionsMsg, - ð.PooledTransactionsPacket{RequestId: request.RequestId}) + // Try to serve from cache + var txs []*types.Transaction + for _, hash := range request.GetPooledTransactionsRequest { + if tx, ok := c.conns.GetTx(hash); ok { + txs = append(txs, tx) + } + } + + packet := ð.PooledTransactionsPacket{ + RequestId: request.RequestId, + PooledTransactionsResponse: txs, + } + c.AddCountSent(packet.Name(), 1) + return ethp2p.Send(c.rw, eth.PooledTransactionsMsg, packet) } func (c *conn) handleNewPooledTransactionHashes(version uint, msg ethp2p.Msg) error { @@ -544,17 +645,15 @@ func (c *conn) handleNewPooledTransactionHashes(version uint, msg ethp2p.Msg) er return errors.New("protocol version not found") } - c.AddCount(name, float64(len(hashes))) + c.AddCountReceived(name, float64(len(hashes))) if !c.db.ShouldWriteTransactions() || !c.db.ShouldWriteTransactionEvents() { return nil } - return ethp2p.Send( - c.rw, - eth.GetPooledTransactionsMsg, - ð.GetPooledTransactionsPacket{GetPooledTransactionsRequest: hashes}, - ) + packet := ð.GetPooledTransactionsPacket{GetPooledTransactionsRequest: hashes} + c.AddCountSent(packet.Name(), 1) + return ethp2p.Send(c.rw, eth.GetPooledTransactionsMsg, packet) } func (c *conn) handlePooledTransactions(ctx context.Context, msg ethp2p.Msg) error { @@ -565,10 +664,29 @@ func (c *conn) handlePooledTransactions(ctx context.Context, msg ethp2p.Msg) err tfs := time.Now() - c.AddCount(packet.Name(), float64(len(packet.PooledTransactionsResponse))) + c.AddCountReceived(packet.Name(), float64(len(packet.PooledTransactionsResponse))) + + // Mark transactions as known from this peer + for _, tx := range packet.PooledTransactionsResponse { + c.addKnownTx(tx.Hash()) + } c.db.WriteTransactions(ctx, c.node, packet.PooledTransactionsResponse, tfs) + // Cache transactions for duplicate detection and serving to peers + for _, tx := range packet.PooledTransactionsResponse { + c.conns.AddTx(tx.Hash(), tx) + } + + hashes := make([]common.Hash, len(packet.PooledTransactionsResponse)) + for i, tx := range packet.PooledTransactionsResponse { + hashes[i] = tx.Hash() + } + + // Broadcast transactions or hashes to other peers + c.conns.BroadcastTxs(types.Transactions(packet.PooledTransactionsResponse)) + c.conns.BroadcastTxHashes(hashes) + return nil } @@ -577,9 +695,8 @@ func (c *conn) handleGetReceipts(msg ethp2p.Msg) error { if err := msg.Decode(&request); err != nil { return err } - return ethp2p.Send( - c.rw, - eth.ReceiptsMsg, - ð.ReceiptsPacket{RequestId: request.RequestId}, - ) + + packet := ð.ReceiptsPacket{RequestId: request.RequestId} + c.AddCountSent(packet.Name(), 0) + return ethp2p.Send(c.rw, eth.ReceiptsMsg, packet) } diff --git a/p2p/rlpx.go b/p2p/rlpx.go index d856b5b05..c34d8cdfc 100644 --- a/p2p/rlpx.go +++ b/p2p/rlpx.go @@ -167,13 +167,6 @@ loop: return status, nil } -// request stores the request ID and the block's hash. -type request struct { - requestID uint64 - hash common.Hash - time time.Time -} - // ReadAndServe reads messages from peers and writes it to a database. func (c *rlpxConn) ReadAndServe(count *MessageCount) error { for { From ae4bc714f5bd561d183153db5c3c9fdc9b96afe7 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 20 Jan 2026 11:43:45 -0500 Subject: [PATCH 02/48] chore: revert ulxly changes to match main Co-Authored-By: Claude Opus 4.5 --- cmd/ulxly/balanceandnullifiertreehelper.go | 9 - cmd/ulxly/ulxly.go | 2582 +++++++++++++++++++- 2 files changed, 2533 insertions(+), 58 deletions(-) diff --git a/cmd/ulxly/balanceandnullifiertreehelper.go b/cmd/ulxly/balanceandnullifiertreehelper.go index 1f795eca1..0b4d17d04 100644 --- a/cmd/ulxly/balanceandnullifiertreehelper.go +++ b/cmd/ulxly/balanceandnullifiertreehelper.go @@ -103,15 +103,6 @@ type Balancer struct { lastRoot common.Hash } -func generateZeroHashes(height uint8) []common.Hash { - zeroHashes := make([]common.Hash, height) - zeroHashes[0] = common.Hash{} - for i := 1; i < int(height); i++ { - zeroHashes[i] = crypto.Keccak256Hash(zeroHashes[i-1][:], zeroHashes[i-1][:]) - } - return zeroHashes -} - func NewBalanceTree() (*Balancer, error) { var depth uint8 = 192 zeroHashes := generateZeroHashes(depth) diff --git a/cmd/ulxly/ulxly.go b/cmd/ulxly/ulxly.go index 6c8d5e7cd..7cd4c5671 100644 --- a/cmd/ulxly/ulxly.go +++ b/cmd/ulxly/ulxly.go @@ -1,33 +1,2018 @@ package ulxly import ( - "github.com/0xPolygon/polygon-cli/cmd/ulxly/bridge" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/claim" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/common" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/events" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/proof" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/tree" + "bufio" + "bytes" + "context" + "crypto/ecdsa" + "crypto/tls" + _ "embed" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "math/big" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/0xPolygon/polygon-cli/bindings/tokens" + "github.com/0xPolygon/polygon-cli/bindings/ulxly" + "github.com/0xPolygon/polygon-cli/bindings/ulxly/polygonrollupmanager" + "github.com/0xPolygon/polygon-cli/cmd/ulxly/bridge_service" + bridge_service_factory "github.com/0xPolygon/polygon-cli/cmd/ulxly/bridge_service/factory" + smcerror "github.com/0xPolygon/polygon-cli/errors" "github.com/0xPolygon/polygon-cli/flag" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + ethclient "github.com/ethereum/go-ethereum/ethclient" + ethrpc "github.com/ethereum/go-ethereum/rpc" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) +const ( + // TreeDepth of 32 is pulled directly from the + // _DEPOSIT_CONTRACT_TREE_DEPTH from the smart contract. We + // could make this a variable as well + // https://github.com/0xPolygonHermez/zkevm-contracts/blob/54f58c8b64806429bc4d5c52248f29cf80ba401c/contracts/v2/lib/DepositContractBase.sol#L15 + TreeDepth = 32 +) + +var ( + ErrNotReadyForClaim = errors.New("the claim transaction is not yet ready to be claimed, try again in a few blocks") + ErrDepositAlreadyClaimed = errors.New("the claim transaction has already been claimed") +) + +type IMT struct { + Branches map[uint32][]common.Hash + Leaves map[uint32]common.Hash + Roots []common.Hash + ZeroHashes []common.Hash + Proofs map[uint32]Proof +} + +type Proof struct { + Siblings [TreeDepth]common.Hash + Root common.Hash + DepositCount uint32 + LeafHash common.Hash +} +type RollupsProof struct { + Siblings [TreeDepth]common.Hash + Root common.Hash + RollupID uint32 + LeafHash common.Hash +} + +type DepositID struct { + DepositCnt uint32 `json:"deposit_cnt"` + NetworkID uint32 `json:"network_id"` +} + +func readDeposit(cmd *cobra.Command) error { + bridgeAddress := getSmcOptions.BridgeAddress + rpcURL := getEvent.URL + toBlock := getEvent.ToBlock + fromBlock := getEvent.FromBlock + filter := getEvent.FilterSize + + // Use the new helper function + var rpc *ethrpc.Client + var err error + + if getEvent.Insecure { + client, clientErr := createInsecureEthClient(rpcURL) + if clientErr != nil { + log.Error().Err(clientErr).Msg("Unable to create insecure client") + return clientErr + } + defer client.Close() + rpc = client.Client() + } else { + rpc, err = ethrpc.DialContext(cmd.Context(), rpcURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer rpc.Close() + } + + ec := ethclient.NewClient(rpc) + + bridgeV2, err := ulxly.NewUlxly(common.HexToAddress(bridgeAddress), ec) + if err != nil { + return err + } + currentBlock := fromBlock + for currentBlock < toBlock { + endBlock := min(currentBlock+filter, toBlock) + + opts := bind.FilterOpts{ + Start: currentBlock, + End: &endBlock, + Context: cmd.Context(), + } + evtV2Iterator, err := bridgeV2.FilterBridgeEvent(&opts) + if err != nil { + return err + } + + for evtV2Iterator.Next() { + evt := evtV2Iterator.Event + log.Info().Uint32("deposit", evt.DepositCount).Uint64("block-number", evt.Raw.BlockNumber).Msg("Found ulxly Deposit") + var jBytes []byte + jBytes, err = json.Marshal(evt) + if err != nil { + return err + } + fmt.Println(string(jBytes)) + } + err = evtV2Iterator.Close() + if err != nil { + log.Error().Err(err).Msg("error closing event iterator") + } + currentBlock = endBlock + 1 + } + + return nil +} + +func DecodeGlobalIndex(globalIndex *big.Int) (bool, uint32, uint32, error) { + const lengthGlobalIndexInBytes = 32 + var buf [32]byte + gIBytes := globalIndex.FillBytes(buf[:]) + if len(gIBytes) != lengthGlobalIndexInBytes { + return false, 0, 0, fmt.Errorf("invalid globalIndex length. Should be 32. Current length: %d", len(gIBytes)) + } + mainnetFlag := big.NewInt(0).SetBytes([]byte{gIBytes[23]}).Uint64() == 1 + rollupIndex := big.NewInt(0).SetBytes(gIBytes[24:28]) + localRootIndex := big.NewInt(0).SetBytes(gIBytes[28:32]) + if rollupIndex.Uint64() > math.MaxUint32 { + return false, 0, 0, fmt.Errorf("invalid rollupIndex length. Should be fit into uint32 type") + } + if localRootIndex.Uint64() > math.MaxUint32 { + return false, 0, 0, fmt.Errorf("invalid localRootIndex length. Should be fit into uint32 type") + } + return mainnetFlag, uint32(rollupIndex.Uint64()), uint32(localRootIndex.Uint64()), nil // nolint:gosec +} + +func readClaim(cmd *cobra.Command) error { + bridgeAddress := getSmcOptions.BridgeAddress + rpcURL := getEvent.URL + toBlock := getEvent.ToBlock + fromBlock := getEvent.FromBlock + filter := getEvent.FilterSize + + // Use the new helper function + var rpc *ethrpc.Client + var err error + + if getEvent.Insecure { + client, clientErr := createInsecureEthClient(rpcURL) + if clientErr != nil { + log.Error().Err(clientErr).Msg("Unable to create insecure client") + return clientErr + } + defer client.Close() + rpc = client.Client() + } else { + rpc, err = ethrpc.DialContext(cmd.Context(), rpcURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer rpc.Close() + } + + ec := ethclient.NewClient(rpc) + + bridgeV2, err := ulxly.NewUlxly(common.HexToAddress(bridgeAddress), ec) + if err != nil { + return err + } + currentBlock := fromBlock + for currentBlock < toBlock { + endBlock := min(currentBlock+filter, toBlock) + + opts := bind.FilterOpts{ + Start: currentBlock, + End: &endBlock, + Context: cmd.Context(), + } + evtV2Iterator, err := bridgeV2.FilterClaimEvent(&opts) + if err != nil { + return err + } + + for evtV2Iterator.Next() { + evt := evtV2Iterator.Event + var ( + mainnetFlag bool + rollupIndex, localExitRootIndex uint32 + ) + mainnetFlag, rollupIndex, localExitRootIndex, err = DecodeGlobalIndex(evt.GlobalIndex) + if err != nil { + log.Error().Err(err).Msg("error decoding globalIndex") + return err + } + log.Info().Bool("claim-mainnetFlag", mainnetFlag).Uint32("claim-RollupIndex", rollupIndex).Uint32("claim-LocalExitRootIndex", localExitRootIndex).Uint64("block-number", evt.Raw.BlockNumber).Msg("Found Claim") + var jBytes []byte + jBytes, err = json.Marshal(evt) + if err != nil { + return err + } + fmt.Println(string(jBytes)) + } + err = evtV2Iterator.Close() + if err != nil { + log.Error().Err(err).Msg("error closing event iterator") + } + currentBlock = endBlock + 1 + } + + return nil +} + +func readVerifyBatches(cmd *cobra.Command) error { + rollupManagerAddress := getVerifyBatchesOptions.RollupManagerAddress + rpcURL := getEvent.URL + toBlock := getEvent.ToBlock + fromBlock := getEvent.FromBlock + filter := getEvent.FilterSize + + // Use the new helper function + var rpc *ethrpc.Client + var err error + + if getEvent.Insecure { + client, clientErr := createInsecureEthClient(rpcURL) + if clientErr != nil { + log.Error().Err(clientErr).Msg("Unable to create insecure client") + return clientErr + } + defer client.Close() + rpc = client.Client() + } else { + rpc, err = ethrpc.DialContext(cmd.Context(), rpcURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer rpc.Close() + } + + client := ethclient.NewClient(rpc) + rm := common.HexToAddress(rollupManagerAddress) + rollupManager, err := polygonrollupmanager.NewPolygonrollupmanager(rm, client) + if err != nil { + return err + } + verifyBatchesTrustedAggregatorSignatureHash := crypto.Keccak256Hash([]byte("VerifyBatchesTrustedAggregator(uint32,uint64,bytes32,bytes32,address)")) + + currentBlock := fromBlock + for currentBlock < toBlock { + endBlock := min(currentBlock+filter, toBlock) + // Filter 0xd1ec3a1216f08b6eff72e169ceb548b782db18a6614852618d86bb19f3f9b0d3 + query := ethereum.FilterQuery{ + FromBlock: new(big.Int).SetUint64(currentBlock), + ToBlock: new(big.Int).SetUint64(endBlock), + Addresses: []common.Address{rm}, + Topics: [][]common.Hash{{verifyBatchesTrustedAggregatorSignatureHash}}, + } + logs, err := client.FilterLogs(cmd.Context(), query) + if err != nil { + return err + } + + for _, vLog := range logs { + vb, err := rollupManager.ParseVerifyBatchesTrustedAggregator(vLog) + if err != nil { + return err + } + log.Info().Uint32("RollupID", vb.RollupID).Uint64("block-number", vb.Raw.BlockNumber).Msg("Found rollupmanager VerifyBatchesTrustedAggregator event") + var jBytes []byte + jBytes, err = json.Marshal(vb) + if err != nil { + return err + } + fmt.Println(string(jBytes)) + } + currentBlock = endBlock + 1 + } + + return nil +} + +func proof(args []string) error { + depositNumber := proofOptions.DepositCount + rawDepositData, err := getInputData(args) + if err != nil { + return err + } + return readDeposits(rawDepositData, uint32(depositNumber)) +} + +func balanceTree() error { + l2NetworkID := balanceTreeOptions.L2NetworkID + bridgeAddress := common.HexToAddress(balanceTreeOptions.BridgeAddress) + + var client *ethclient.Client + var err error + + if balanceTreeOptions.Insecure { + client, err = createInsecureEthClient(balanceTreeOptions.RpcURL) + } else { + client, err = ethclient.DialContext(context.Background(), balanceTreeOptions.RpcURL) + } + + if err != nil { + return err + } + defer client.Close() + l2RawClaimsData, l2RawDepositsData, err := getBalanceTreeData() + if err != nil { + return err + } + root, balances, err := computeBalanceTree(client, bridgeAddress, l2RawClaimsData, l2NetworkID, l2RawDepositsData) + if err != nil { + return err + } + type BalanceEntry struct { + OriginNetwork uint32 `json:"originNetwork"` + OriginTokenAddress common.Address `json:"originTokenAddress"` + TotalSupply string `json:"totalSupply"` + } + + var balanceEntries []BalanceEntry + for tokenKey, balance := range balances { + if balance.Cmp(big.NewInt(0)) == 0 { + continue + } + + var token TokenInfo + token, err = TokenInfoStringToStruct(tokenKey) + if err != nil { + return err + } + + if token.OriginNetwork.Uint64() == uint64(l2NetworkID) { + continue + } + + balanceEntries = append(balanceEntries, BalanceEntry{ + OriginNetwork: uint32(token.OriginNetwork.Uint64()), + OriginTokenAddress: token.OriginTokenAddress, + TotalSupply: balance.String(), + }) + } + + // Create the response structure + response := struct { + Root string `json:"root"` + Balances []BalanceEntry `json:"balances"` + }{ + Root: root.String(), + Balances: balanceEntries, + } + + // Marshal to JSON with proper formatting + jsonOutput, err := json.MarshalIndent(response, "", " ") + if err != nil { + return err + } + + fmt.Println(string(jsonOutput)) + return nil +} + +func nullifierTree(args []string) error { + rawClaims, err := getInputData(args) + if err != nil { + return err + } + root, err := computeNullifierTree(rawClaims) + if err != nil { + return err + } + fmt.Printf(` + { + "root": "%s" + } + `, root.String()) + return nil +} + +func nullifierAndBalanceTree() error { + l2NetworkID := balanceTreeOptions.L2NetworkID + bridgeAddress := common.HexToAddress(balanceTreeOptions.BridgeAddress) + + var client *ethclient.Client + var err error + + if balanceTreeOptions.Insecure { + client, err = createInsecureEthClient(balanceTreeOptions.RpcURL) + } else { + client, err = ethclient.DialContext(context.Background(), balanceTreeOptions.RpcURL) + } + + if err != nil { + return err + } + defer client.Close() + l2RawClaimsData, l2RawDepositsData, err := getBalanceTreeData() + if err != nil { + return err + } + bridgeV2, err := ulxly.NewUlxly(bridgeAddress, client) + if err != nil { + return err + } + ler_count, err := bridgeV2.LastUpdatedDepositCount(&bind.CallOpts{Pending: false}) + if err != nil { + return err + } + log.Info().Msgf("Last LER count: %d", ler_count) + balanceTreeRoot, _, err := computeBalanceTree(client, bridgeAddress, l2RawClaimsData, l2NetworkID, l2RawDepositsData) + if err != nil { + return err + } + nullifierTreeRoot, err := computeNullifierTree(l2RawClaimsData) + if err != nil { + return err + } + initPessimisticRoot := crypto.Keccak256Hash(balanceTreeRoot.Bytes(), nullifierTreeRoot.Bytes(), Uint32ToBytesLittleEndian(ler_count)) + fmt.Printf(` + { + "balanceTreeRoot": "%s", + "nullifierTreeRoot": "%s", + "initPessimisticRoot": "%s" + } + `, balanceTreeRoot.String(), nullifierTreeRoot.String(), initPessimisticRoot.String()) + return nil +} + +func computeNullifierTree(rawClaims []byte) (common.Hash, error) { + buf := bytes.NewBuffer(rawClaims) + scanner := bufio.NewScanner(buf) + scannerBuf := make([]byte, 0) + scanner.Buffer(scannerBuf, 1024*1024) + nTree, err := NewNullifierTree() + if err != nil { + return common.Hash{}, err + } + var root common.Hash + for scanner.Scan() { + claim := new(ulxly.UlxlyClaimEvent) + err = json.Unmarshal(scanner.Bytes(), claim) + if err != nil { + return common.Hash{}, err + } + mainnetFlag, rollupIndex, localExitRootIndex, err := DecodeGlobalIndex(claim.GlobalIndex) + if err != nil { + log.Error().Err(err).Msg("error decoding globalIndex") + return common.Hash{}, err + } + log.Info().Bool("MainnetFlag", mainnetFlag).Uint32("RollupIndex", rollupIndex).Uint32("LocalExitRootIndex", localExitRootIndex).Uint64("block-number", claim.Raw.BlockNumber).Msg("Adding Claim") + nullifierKey := NullifierKey{ + NetworkID: claim.OriginNetwork, + Index: localExitRootIndex, + } + root, err = nTree.UpdateNullifierTree(nullifierKey) + if err != nil { + log.Error().Err(err).Uint32("OriginNetwork: ", claim.OriginNetwork).Msg("error computing nullifierTree. Claim information: GlobalIndex: " + claim.GlobalIndex.String() + ", OriginAddress: " + claim.OriginAddress.String() + ", Amount: " + claim.Amount.String()) + return common.Hash{}, err + } + } + log.Info().Msgf("Final nullifierTree root: %s", root.String()) + return root, nil +} + +func computeBalanceTree(client *ethclient.Client, bridgeAddress common.Address, l2RawClaims []byte, l2NetworkID uint32, l2RawDeposits []byte) (common.Hash, map[string]*big.Int, error) { + buf := bytes.NewBuffer(l2RawClaims) + scanner := bufio.NewScanner(buf) + scannerBuf := make([]byte, 0) + scanner.Buffer(scannerBuf, 1024*1024) + bTree, err := NewBalanceTree() + if err != nil { + return common.Hash{}, nil, err + } + balances := make(map[string]*big.Int) + for scanner.Scan() { + l2Claim := new(ulxly.UlxlyClaimEvent) + err = json.Unmarshal(scanner.Bytes(), l2Claim) + if err != nil { + return common.Hash{}, nil, err + } + token := TokenInfo{ + OriginNetwork: big.NewInt(0).SetUint64(uint64(l2Claim.OriginNetwork)), + OriginTokenAddress: l2Claim.OriginAddress, + } + isMessage, err := checkClaimCalldata(client, bridgeAddress, l2Claim.Raw.TxHash) + if err != nil { + return common.Hash{}, nil, err + } + if isMessage { + token.OriginNetwork = big.NewInt(0) + token.OriginTokenAddress = common.Address{} + } + log.Info().Msgf("L2 Claim. isMessage: %v OriginNetwork: %d. TokenAddress: %s. Amount: %s", isMessage, token.OriginNetwork, token.OriginTokenAddress.String(), l2Claim.Amount.String()) + if _, ok := balances[token.String()]; !ok { + balances[token.String()] = big.NewInt(0) + } + balances[token.String()] = big.NewInt(0).Add(balances[token.String()], l2Claim.Amount) + + } + l2Buf := bytes.NewBuffer(l2RawDeposits) + l2Scanner := bufio.NewScanner(l2Buf) + l2ScannerBuf := make([]byte, 0) + l2Scanner.Buffer(l2ScannerBuf, 1024*1024) + for l2Scanner.Scan() { + l2Deposit := new(ulxly.UlxlyBridgeEvent) + err := json.Unmarshal(l2Scanner.Bytes(), l2Deposit) + if err != nil { + return common.Hash{}, nil, err + } + token := TokenInfo{ + OriginNetwork: big.NewInt(0).SetUint64(uint64(l2Deposit.OriginNetwork)), + OriginTokenAddress: l2Deposit.OriginAddress, + } + if _, ok := balances[token.String()]; !ok { + balances[token.String()] = big.NewInt(0) + } + balances[token.String()] = big.NewInt(0).Sub(balances[token.String()], l2Deposit.Amount) + } + // Now, the balance map is complete. Let's build the tree. + var root common.Hash + for t, balance := range balances { + if balance.Cmp(big.NewInt(0)) == 0 { + continue + } + token, err := TokenInfoStringToStruct(t) + if err != nil { + return common.Hash{}, nil, err + } + if token.OriginNetwork.Uint64() == uint64(l2NetworkID) { + continue + } + root, err = bTree.UpdateBalanceTree(token, balance) + if err != nil { + return common.Hash{}, nil, err + } + log.Info().Msgf("New balanceTree leaf. OriginNetwork: %s, TokenAddress: %s, Balance: %s, Root: %s", token.OriginNetwork.String(), token.OriginTokenAddress.String(), balance.String(), root.String()) + } + log.Info().Msgf("Final balanceTree root: %s", root.String()) + + return root, balances, nil +} + +func rollupsExitRootProof(args []string) error { + rollupID := rollupsProofOptions.RollupID + completeMT := rollupsProofOptions.CompleteMerkleTree + rawLeavesData, err := getInputData(args) + if err != nil { + return err + } + return readRollupsExitRootLeaves(rawLeavesData, rollupID, completeMT) +} + +func emptyProof() error { + p := new(Proof) + + e := generateEmptyHashes(TreeDepth) + copy(p.Siblings[:], e) + fmt.Println(String(p)) + return nil +} + +func zeroProof() error { + p := new(Proof) + + e := generateZeroHashes(TreeDepth) + copy(p.Siblings[:], e) + fmt.Println(String(p)) + return nil +} + +type JSONError struct { + Code int `json:"code"` + Message string `json:"message"` + Data any `json:"data"` +} + +func logAndReturnJSONError(ctx context.Context, client *ethclient.Client, tx *types.Transaction, opts *bind.TransactOpts, err error) error { + + var callErr error + if tx != nil { + // in case the error came down to gas estimation, we can sometimes get more information by doing a call + _, callErr = client.CallContract(ctx, ethereum.CallMsg{ + From: opts.From, + To: tx.To(), + Gas: tx.Gas(), + GasPrice: tx.GasPrice(), + GasFeeCap: tx.GasFeeCap(), + GasTipCap: tx.GasTipCap(), + Value: tx.Value(), + Data: tx.Data(), + AccessList: tx.AccessList(), + BlobGasFeeCap: tx.BlobGasFeeCap(), + BlobHashes: tx.BlobHashes(), + }, nil) + + if inputUlxlyArgs.dryRun { + castCmd := "cast call" + castCmd += fmt.Sprintf(" --rpc-url %s", inputUlxlyArgs.rpcURL) + castCmd += fmt.Sprintf(" --from %s", opts.From.String()) + castCmd += fmt.Sprintf(" --gas-limit %d", tx.Gas()) + if tx.Type() == types.LegacyTxType { + castCmd += fmt.Sprintf(" --gas-price %s", tx.GasPrice().String()) + } else { + castCmd += fmt.Sprintf(" --gas-price %s", tx.GasFeeCap().String()) + castCmd += fmt.Sprintf(" --priority-gas-price %s", tx.GasTipCap().String()) + } + castCmd += fmt.Sprintf(" --value %s", tx.Value().String()) + castCmd += fmt.Sprintf(" %s", tx.To().String()) + castCmd += fmt.Sprintf(" %s", common.Bytes2Hex(tx.Data())) + log.Info().Str("cmd", castCmd).Msg("use this command to replicate the call") + } + } + + if err == nil { + return nil + } + + var jsonError JSONError + jsonErrorBytes, jsErr := json.Marshal(err) + if jsErr != nil { + log.Error().Err(err).Msg("Unable to interact with the bridge contract") + return err + } + + jsErr = json.Unmarshal(jsonErrorBytes, &jsonError) + if jsErr != nil { + log.Error().Err(err).Msg("Unable to interact with the bridge contract") + return err + } + + reason, decodeErr := smcerror.DecodeSmcErrorCode(jsonError.Data) + if decodeErr != nil { + log.Error().Err(err).Msg("unable to decode smart contract error") + return err + } + errLog := log.Error(). + Err(err). + Str("message", jsonError.Message). + Int("code", jsonError.Code). + Interface("data", jsonError.Data). + Str("reason", reason) + + if callErr != nil { + errLog = errLog.Err(callErr) + } + + customErr := errors.New(err.Error() + ": " + reason) + if errCode, isValid := jsonError.Data.(string); isValid && errCode == "0x646cf558" { + // I don't want to bother with the additional error logging for previously claimed deposits + return customErr + } + + errLog.Msg("Unable to interact with bridge contract") + return customErr +} + +// Function to parse deposit count from bridge transaction logs +func ParseBridgeDepositCount(logs []types.Log, bridgeContract *ulxly.Ulxly) (uint32, error) { + for _, log := range logs { + // Try to parse the log as a BridgeEvent using the contract's filterer + bridgeEvent, err := bridgeContract.ParseBridgeEvent(log) + if err != nil { + // This log is not a bridge event, continue to next log + continue + } + + // Successfully parsed a bridge event, return the deposit count + return bridgeEvent.DepositCount, nil + } + + return 0, fmt.Errorf("bridge event not found in logs") +} + +// parseDepositCountFromTransaction extracts the deposit count from a bridge transaction receipt +func parseDepositCountFromTransaction(ctx context.Context, client *ethclient.Client, txHash common.Hash, bridgeContract *ulxly.Ulxly) (uint32, error) { + receipt, err := client.TransactionReceipt(ctx, txHash) + if err != nil { + return 0, err + } + + // Check if the transaction was successful before trying to parse logs + if receipt.Status == 0 { + log.Error().Str("txHash", receipt.TxHash.String()).Msg("Bridge transaction failed") + return 0, fmt.Errorf("bridge transaction failed with hash: %s", receipt.TxHash.String()) + } + + // Convert []*types.Log to []types.Log + logs := make([]types.Log, len(receipt.Logs)) + for i, log := range receipt.Logs { + logs[i] = *log + } + + depositCount, err := ParseBridgeDepositCount(logs, bridgeContract) + if err != nil { + log.Error().Err(err).Msg("failed to parse deposit count from logs") + return 0, err + } + + return depositCount, nil +} + +func bridgeAsset(cmd *cobra.Command) error { + bridgeAddr := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + amount := inputUlxlyArgs.value + tokenAddr := inputUlxlyArgs.tokenAddress + callDataString := inputUlxlyArgs.callData + destinationNetwork := inputUlxlyArgs.destNetwork + isForced := inputUlxlyArgs.forceUpdate + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + + // Initialize and assign variables required to send transaction payload + bridgeV2, toAddress, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddr, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + + bridgeAddress := common.HexToAddress(bridgeAddr) + value, _ := big.NewInt(0).SetString(amount, 0) + tokenAddress := common.HexToAddress(tokenAddr) + callData := common.Hex2Bytes(strings.TrimPrefix(callDataString, "0x")) + + if tokenAddress == common.HexToAddress("0x0000000000000000000000000000000000000000") { + auth.Value = value + } else { + // in case it's a token transfer, we need to ensure that the bridge contract + // has enough allowance to transfer the tokens on behalf of the user + tokenContract, iErr := tokens.NewERC20(tokenAddress, client) + if iErr != nil { + log.Error().Err(iErr).Msg("error getting token contract") + return iErr + } + + allowance, iErr := tokenContract.Allowance(&bind.CallOpts{Pending: false}, auth.From, bridgeAddress) + if iErr != nil { + log.Error().Err(iErr).Msg("error getting token allowance") + return iErr + } + + if allowance.Cmp(value) < 0 { + log.Info(). + Str("amount", value.String()). + Str("tokenAddress", tokenAddress.String()). + Str("bridgeAddress", bridgeAddress.String()). + Str("userAddress", auth.From.String()). + Msg("approving bridge contract to spend tokens on behalf of user") + + // Approve the bridge contract to spend the tokens on behalf of the user + approveTxn, iErr := tokenContract.Approve(auth, bridgeAddress, value) + if iErr = logAndReturnJSONError(cmd.Context(), client, approveTxn, auth, iErr); iErr != nil { + return iErr + } + log.Info().Msg("approveTxn: " + approveTxn.Hash().String()) + if iErr = WaitMineTransaction(cmd.Context(), client, approveTxn, timeoutTxnReceipt); iErr != nil { + return iErr + } + } + } + + bridgeTxn, err := bridgeV2.BridgeAsset(auth, destinationNetwork, toAddress, value, tokenAddress, isForced, callData) + if err = logAndReturnJSONError(cmd.Context(), client, bridgeTxn, auth, err); err != nil { + log.Info().Err(err).Str("calldata", callDataString).Msg("Bridge transaction failed") + return err + } + log.Info().Msg("bridgeTxn: " + bridgeTxn.Hash().String()) + if err = WaitMineTransaction(cmd.Context(), client, bridgeTxn, timeoutTxnReceipt); err != nil { + return err + } + depositCount, err := parseDepositCountFromTransaction(cmd.Context(), client, bridgeTxn.Hash(), bridgeV2) + if err != nil { + return err + } + + log.Info().Uint32("depositCount", depositCount).Msg("Bridge deposit count parsed from logs") + return nil +} + +func bridgeMessage(cmd *cobra.Command) error { + bridgeAddress := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + amount := inputUlxlyArgs.value + tokenAddr := inputUlxlyArgs.tokenAddress + callDataString := inputUlxlyArgs.callData + destinationNetwork := inputUlxlyArgs.destNetwork + isForced := inputUlxlyArgs.forceUpdate + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + + // Dial the Ethereum RPC server. + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + // Initialize and assign variables required to send transaction payload + bridgeV2, toAddress, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + + value, _ := big.NewInt(0).SetString(amount, 0) + tokenAddress := common.HexToAddress(tokenAddr) + callData := common.Hex2Bytes(strings.TrimPrefix(callDataString, "0x")) + + if tokenAddress == common.HexToAddress("0x0000000000000000000000000000000000000000") { + auth.Value = value + } + + bridgeTxn, err := bridgeV2.BridgeMessage(auth, destinationNetwork, toAddress, isForced, callData) + if err = logAndReturnJSONError(cmd.Context(), client, bridgeTxn, auth, err); err != nil { + log.Info().Err(err).Str("calldata", callDataString).Msg("Bridge transaction failed") + return err + } + log.Info().Msg("bridgeTxn: " + bridgeTxn.Hash().String()) + if err = WaitMineTransaction(cmd.Context(), client, bridgeTxn, timeoutTxnReceipt); err != nil { + return err + } + depositCount, err := parseDepositCountFromTransaction(cmd.Context(), client, bridgeTxn.Hash(), bridgeV2) + if err != nil { + return err + } + + log.Info().Uint32("depositCount", depositCount).Msg("Bridge deposit count parsed from logs") + return nil +} + +func bridgeWETHMessage(cmd *cobra.Command) error { + bridgeAddress := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + amount := inputUlxlyArgs.value + callDataString := inputUlxlyArgs.callData + destinationNetwork := inputUlxlyArgs.destNetwork + isForced := inputUlxlyArgs.forceUpdate + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + + // Dial the Ethereum RPC server. + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + // Initialize and assign variables required to send transaction payload + bridgeV2, toAddress, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + // Check if WETH is allowed + wethAddress, err := bridgeV2.WETHToken(&bind.CallOpts{Pending: false}) + if err != nil { + log.Error().Err(err).Msg("error getting WETH address from the bridge smc") + return err + } + if wethAddress == (common.Address{}) { + return fmt.Errorf("bridge WETH not allowed. Native ETH token configured in this network. This tx will fail") + } + + value, _ := big.NewInt(0).SetString(amount, 0) + callData := common.Hex2Bytes(strings.TrimPrefix(callDataString, "0x")) + + bridgeTxn, err := bridgeV2.BridgeMessageWETH(auth, destinationNetwork, toAddress, value, isForced, callData) + if err = logAndReturnJSONError(cmd.Context(), client, bridgeTxn, auth, err); err != nil { + log.Info().Err(err).Str("calldata", callDataString).Msg("Bridge transaction failed") + return err + } + log.Info().Msg("bridgeTxn: " + bridgeTxn.Hash().String()) + if err = WaitMineTransaction(cmd.Context(), client, bridgeTxn, timeoutTxnReceipt); err != nil { + return err + } + depositCount, err := parseDepositCountFromTransaction(cmd.Context(), client, bridgeTxn.Hash(), bridgeV2) + if err != nil { + return err + } + + log.Info().Uint32("depositCount", depositCount).Msg("Bridge deposit count parsed from logs") + return nil +} + +func claimAsset(cmd *cobra.Command) error { + bridgeAddress := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + depositCount := inputUlxlyArgs.depositCount + depositNetwork := inputUlxlyArgs.depositNetwork + globalIndexOverride := inputUlxlyArgs.globalIndex + proofGERHash := inputUlxlyArgs.proofGER + proofL1InfoTreeIndex := inputUlxlyArgs.proofL1InfoTreeIndex + wait := inputUlxlyArgs.wait + + // Dial Ethereum client + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + // Initialize and assign variables required to send transaction payload + bridgeV2, _, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + + deposit, err := getDepositWhenReadyForClaim(depositNetwork, depositCount, wait) + if err != nil { + log.Error().Err(err) + return err + } + + if deposit.LeafType != 0 { + log.Warn().Msg("Deposit leafType is not asset") + } + if globalIndexOverride != "" { + deposit.GlobalIndex.SetString(globalIndexOverride, 10) + } + + proof, err := getMerkleProofsExitRoots(bridgeService, *deposit, proofGERHash, proofL1InfoTreeIndex) + if err != nil { + log.Error().Err(err).Msg("error getting merkle proofs and exit roots from bridge service") + return err + } + + claimTxn, err := bridgeV2.ClaimAsset(auth, bridge_service.HashSliceToBytesArray(proof.MerkleProof), bridge_service.HashSliceToBytesArray(proof.RollupMerkleProof), deposit.GlobalIndex, *proof.MainExitRoot, *proof.RollupExitRoot, deposit.OrigNet, deposit.OrigAddr, deposit.DestNet, deposit.DestAddr, deposit.Amount, deposit.Metadata) + if err = logAndReturnJSONError(cmd.Context(), client, claimTxn, auth, err); err != nil { + return err + } + log.Info().Msg("claimTxn: " + claimTxn.Hash().String()) + return WaitMineTransaction(cmd.Context(), client, claimTxn, timeoutTxnReceipt) +} + +func claimMessage(cmd *cobra.Command) error { + bridgeAddress := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + depositCount := inputUlxlyArgs.depositCount + depositNetwork := inputUlxlyArgs.depositNetwork + globalIndexOverride := inputUlxlyArgs.globalIndex + proofGERHash := inputUlxlyArgs.proofGER + proofL1InfoTreeIndex := inputUlxlyArgs.proofL1InfoTreeIndex + wait := inputUlxlyArgs.wait + + // Dial Ethereum client + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + // Initialize and assign variables required to send transaction payload + bridgeV2, _, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + + deposit, err := getDepositWhenReadyForClaim(depositNetwork, depositCount, wait) + if err != nil { + log.Error().Err(err) + return err + } + + if deposit.LeafType != 1 { + log.Warn().Msg("Deposit leafType is not message") + } + if globalIndexOverride != "" { + deposit.GlobalIndex.SetString(globalIndexOverride, 10) + } + + proof, err := getMerkleProofsExitRoots(bridgeService, *deposit, proofGERHash, proofL1InfoTreeIndex) + if err != nil { + log.Error().Err(err).Msg("error getting merkle proofs and exit roots from bridge service") + return err + } + + claimTxn, err := bridgeV2.ClaimMessage(auth, bridge_service.HashSliceToBytesArray(proof.MerkleProof), bridge_service.HashSliceToBytesArray(proof.RollupMerkleProof), deposit.GlobalIndex, *proof.MainExitRoot, *proof.RollupExitRoot, deposit.OrigNet, deposit.OrigAddr, deposit.DestNet, deposit.DestAddr, deposit.Amount, deposit.Metadata) + if err = logAndReturnJSONError(cmd.Context(), client, claimTxn, auth, err); err != nil { + return err + } + log.Info().Msg("claimTxn: " + claimTxn.Hash().String()) + return WaitMineTransaction(cmd.Context(), client, claimTxn, timeoutTxnReceipt) +} + +func getDepositWhenReadyForClaim(depositNetwork, depositCount uint32, wait time.Duration) (*bridge_service.Deposit, error) { + var deposit *bridge_service.Deposit + var err error + + waiter := time.After(wait) + +out: + for { + deposit, err = getDeposit(depositNetwork, depositCount) + if err == nil { + log.Info().Msg("The deposit is ready to be claimed") + break out + } + + select { + case <-waiter: + if wait != 0 { + err = fmt.Errorf("the deposit seems to be stuck after %s", wait.String()) + } + break out + default: + if errors.Is(err, ErrNotReadyForClaim) || errors.Is(err, bridge_service.ErrNotFound) { + log.Info().Msg("retrying...") + time.Sleep(10 * time.Second) + continue + } + break out + } + } + return deposit, err +} + +func getBridgeServiceURLs() (map[uint32]string, error) { + bridgeServiceUrls := inputUlxlyArgs.bridgeServiceURLs + urlMap := make(map[uint32]string) + for _, mapping := range bridgeServiceUrls { + pieces := strings.Split(mapping, "=") + if len(pieces) != 2 { + return nil, fmt.Errorf("bridge service url mapping should contain a networkid and url separated by an equal sign. Got: %s", mapping) + } + networkID, err := strconv.ParseInt(pieces[0], 10, 32) + if err != nil { + return nil, err + } + urlMap[uint32(networkID)] = pieces[1] + } + return urlMap, nil +} + +func claimEverything(cmd *cobra.Command) error { + privateKey := inputUlxlyArgs.privateKey + claimerAddress := inputUlxlyArgs.addressOfPrivateKey + + gasLimit := inputUlxlyArgs.gasLimit + chainID := inputUlxlyArgs.chainID + timeoutTxnReceipt := inputUlxlyArgs.timeout + bridgeAddress := inputUlxlyArgs.bridgeAddress + destinationAddress := inputUlxlyArgs.destAddress + RPCURL := inputUlxlyArgs.rpcURL + limit := inputUlxlyArgs.bridgeLimit + offset := inputUlxlyArgs.bridgeOffset + concurrency := inputUlxlyArgs.concurrency + + depositMap := make(map[DepositID]*bridge_service.Deposit) + + for networkID, bridgeService := range bridgeServices { + deposits, _, bErr := getDepositsForAddress(bridgeService, destinationAddress, offset, limit) + if bErr != nil { + log.Err(bErr).Uint32("id", networkID).Str("url", bridgeService.Url()).Msgf("Error getting deposits for bridge: %s", bErr.Error()) + return bErr + } + for idx, deposit := range deposits { + depID := DepositID{ + DepositCnt: deposit.DepositCnt, + NetworkID: deposit.NetworkID, + } + _, hasKey := depositMap[depID] + // if we haven't seen this deposit at all, we'll store it + if !hasKey { + depositMap[depID] = &deposits[idx] + continue + } + + // if this new deposit is ready for claim OR it has already been claimed we should override the existing value + if inputUlxlyArgs.legacy { + if deposit.ReadyForClaim || deposit.ClaimTxHash != nil { + depositMap[depID] = &deposits[idx] + } + } + } + } + + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + + bridgeContract, _, opts, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + return err + } + currentNetworkID, err := bridgeContract.NetworkID(nil) + if err != nil { + return err + } + log.Info().Uint32("networkID", currentNetworkID).Msg("current network") + + workPool := make(chan *bridge_service.Deposit, concurrency) // bounded chan for controlled concurrency + + nonceCounter, err := currentNonce(cmd.Context(), client, claimerAddress) + if err != nil { + return err + } + log.Info().Int64("nonce", nonceCounter.Int64()).Msg("starting nonce") + nonceMutex := sync.Mutex{} + nonceIncrement := big.NewInt(1) + retryNonces := make(chan *big.Int, concurrency) // bounded same as workPool + + wg := sync.WaitGroup{} // wg so the last ones can get processed + + for _, d := range depositMap { + wg.Add(1) + workPool <- d // block until a slot is available + go func(deposit *bridge_service.Deposit) { + defer func() { + <-workPool // release work slot + }() + defer wg.Done() + + if deposit.DestNet != currentNetworkID { + log.Debug().Uint32("destination_network", deposit.DestNet).Msg("discarding deposit for different network") + return + } + if deposit.ClaimTxHash != nil { + log.Info().Str("txhash", deposit.ClaimTxHash.String()).Msg("It looks like this tx was already claimed") + return + } + // Either use the next retry nonce, or set and increment the next one + var nextNonce *big.Int + select { + case n := <-retryNonces: + nextNonce = n + default: + nonceMutex.Lock() + nextNonce = big.NewInt(nonceCounter.Int64()) + nonceCounter = nonceCounter.Add(nonceCounter, nonceIncrement) + nonceMutex.Unlock() + } + log.Info().Int64("nonce", nextNonce.Int64()).Msg("Next nonce") + + claimTx, dErr := claimSingleDeposit(cmd, client, bridgeContract, withNonce(opts, nextNonce), *deposit, bridgeServices, currentNetworkID) + if dErr != nil { + log.Warn().Err(dErr).Uint32("DepositCnt", deposit.DepositCnt). + Uint32("OrigNet", deposit.OrigNet). + Uint32("DestNet", deposit.DestNet). + Uint32("NetworkID", deposit.NetworkID). + Stringer("OrigAddr", deposit.OrigAddr). + Stringer("DestAddr", deposit.DestAddr). + Int64("nonce", nextNonce.Int64()). + Msg("There was an error claiming") + + // Some nonces should not be reused + if strings.Contains(dErr.Error(), "could not replace existing") { + return + } + if strings.Contains(dErr.Error(), "already known") { + return + } + if strings.Contains(dErr.Error(), "nonce is too low") { + return + } + // are there other cases? + retryNonces <- nextNonce + return + } + dErr = WaitMineTransaction(cmd.Context(), client, claimTx, timeoutTxnReceipt) + if dErr != nil { + log.Error().Err(dErr).Msg("error while waiting for tx to mine") + } + }(d) + } + + wg.Wait() + return nil +} + +func currentNonce(ctx context.Context, client *ethclient.Client, address string) (*big.Int, error) { + addr := common.HexToAddress(address) + nonce, err := client.NonceAt(ctx, addr, nil) + if err != nil { + log.Error().Err(err).Str("address", addr.Hex()).Msg("Failed to get nonce") + return nil, err + } + n := int64(nonce) + return big.NewInt(n), nil +} + +// todo: implement for other fields in library, or find a library that does this +func withNonce(opts *bind.TransactOpts, newNonce *big.Int) *bind.TransactOpts { + if opts == nil { + return nil + } + clone := &bind.TransactOpts{ + From: opts.From, + Signer: opts.Signer, + GasLimit: opts.GasLimit, + Context: opts.Context, // Usually OK to share, unless you need a separate context + NoSend: opts.NoSend, + } + // Deep-copy big.Int fields + if opts.Value != nil { + clone.Value = new(big.Int).Set(opts.Value) + } + if opts.GasFeeCap != nil { + clone.GasFeeCap = new(big.Int).Set(opts.GasFeeCap) + } + if opts.GasTipCap != nil { + clone.GasTipCap = new(big.Int).Set(opts.GasTipCap) + } + // Set the new nonce + if newNonce != nil { + clone.Nonce = new(big.Int).Set(newNonce) + } + + return clone +} + +func claimSingleDeposit(cmd *cobra.Command, client *ethclient.Client, bridgeContract *ulxly.Ulxly, opts *bind.TransactOpts, deposit bridge_service.Deposit, bridgeServices map[uint32]bridge_service.BridgeService, currentNetworkID uint32) (*types.Transaction, error) { + networkIDForBridgeService := deposit.NetworkID + if deposit.NetworkID == 0 { + networkIDForBridgeService = currentNetworkID + } + + bridgeServiceFromMap, hasKey := bridgeServices[networkIDForBridgeService] + if !hasKey { + return nil, fmt.Errorf("we don't have a bridge service url for network: %d", deposit.DestNet) + } + + proof, err := getMerkleProofsExitRoots(bridgeServiceFromMap, deposit, "", 0) + if err != nil { + log.Error().Err(err).Msg("error getting merkle proofs and exit roots from bridge service") + return nil, err + } + + var claimTx *types.Transaction + if deposit.LeafType == 0 { + claimTx, err = bridgeContract.ClaimAsset(opts, bridge_service.HashSliceToBytesArray(proof.MerkleProof), bridge_service.HashSliceToBytesArray(proof.RollupMerkleProof), deposit.GlobalIndex, *proof.MainExitRoot, *proof.RollupExitRoot, deposit.OrigNet, deposit.OrigAddr, deposit.DestNet, deposit.DestAddr, deposit.Amount, deposit.Metadata) + } else { + claimTx, err = bridgeContract.ClaimMessage(opts, bridge_service.HashSliceToBytesArray(proof.MerkleProof), bridge_service.HashSliceToBytesArray(proof.RollupMerkleProof), deposit.GlobalIndex, *proof.MainExitRoot, *proof.RollupExitRoot, deposit.OrigNet, deposit.OrigAddr, deposit.DestNet, deposit.DestAddr, deposit.Amount, deposit.Metadata) + } + + if err = logAndReturnJSONError(cmd.Context(), client, claimTx, opts, err); err != nil { + log.Warn(). + Uint32("DepositCnt", deposit.DepositCnt). + Uint32("OrigNet", deposit.OrigNet). + Uint32("DestNet", deposit.DestNet). + Uint32("NetworkID", deposit.NetworkID). + Stringer("OrigAddr", deposit.OrigAddr). + Stringer("DestAddr", deposit.DestAddr). + Msg("attempt to claim deposit failed") + return nil, err + } + log.Info().Stringer("txhash", claimTx.Hash()).Msg("sent claim") + + return claimTx, nil +} + +// Wait for the transaction to be mined +func WaitMineTransaction(ctx context.Context, client *ethclient.Client, tx *types.Transaction, txTimeout uint64) error { + if inputUlxlyArgs.dryRun { + txJson, _ := tx.MarshalJSON() + log.Info().RawJSON("tx", txJson).Msg("Skipping receipt check. Dry run is enabled") + return nil + } + txnMinedTimer := time.NewTimer(time.Duration(txTimeout) * time.Second) + defer txnMinedTimer.Stop() + for { + select { + case <-txnMinedTimer.C: + log.Info().Msg("Wait timer for transaction receipt exceeded!") + return nil + default: + r, err := client.TransactionReceipt(ctx, tx.Hash()) + if err != nil { + if err.Error() != "not found" { + log.Error().Err(err) + return err + } + time.Sleep(1 * time.Second) + continue + } + if r.Status != 0 { + log.Info().Interface("txHash", r.TxHash).Msg("transaction successful") + return nil + } else if r.Status == 0 { + log.Error().Interface("txHash", r.TxHash).Msg("Deposit transaction failed") + log.Info().Uint64("GasUsed", tx.Gas()).Uint64("cumulativeGasUsedForTx", r.CumulativeGasUsed).Msg("Perhaps try increasing the gas limit") + return nil + } + time.Sleep(1 * time.Second) + } + } +} + +func getInputData(args []string) ([]byte, error) { + fileName := fileOptions.FileName + if fileName != "" { + return os.ReadFile(fileName) + } + + if len(args) > 1 { + concat := strings.Join(args[1:], " ") + return []byte(concat), nil + } + + return io.ReadAll(os.Stdin) +} + +func getBalanceTreeData() ([]byte, []byte, error) { + claimsFileName := balanceTreeOptions.L2ClaimsFile + file, err := os.Open(claimsFileName) + if err != nil { + return nil, nil, err + } + defer file.Close() // Ensure the file is closed after reading + + // Read the entire file content + l2Claims, err := io.ReadAll(file) + if err != nil { + return nil, nil, err + } + + l2FileName := balanceTreeOptions.L2DepositsFile + file2, err := os.Open(l2FileName) + if err != nil { + return nil, nil, err + } + defer file2.Close() // Ensure the file is closed after reading + + // Read the entire file content + l2Deposits, err := io.ReadAll(file2) + if err != nil { + return nil, nil, err + } + return l2Claims, l2Deposits, nil +} + +func readRollupsExitRootLeaves(rawLeaves []byte, rollupID uint32, completeMT bool) error { + buf := bytes.NewBuffer(rawLeaves) + scanner := bufio.NewScanner(buf) + scannerBuf := make([]byte, 0) + scanner.Buffer(scannerBuf, 1024*1024) + leaves := make(map[uint32]*polygonrollupmanager.PolygonrollupmanagerVerifyBatchesTrustedAggregator, 0) + highestRollupID := uint32(0) + for scanner.Scan() { + evt := new(polygonrollupmanager.PolygonrollupmanagerVerifyBatchesTrustedAggregator) + err := json.Unmarshal(scanner.Bytes(), evt) + if err != nil { + return err + } + if highestRollupID < evt.RollupID { + highestRollupID = evt.RollupID + } + leaves[evt.RollupID] = evt + } + if err := scanner.Err(); err != nil { + log.Error().Err(err).Msg("there was an error reading the deposit file") + return err + } + if rollupID > highestRollupID && !completeMT { + return fmt.Errorf("rollupID %d required is higher than the highest rollupID %d provided in the file. Please use --complete-merkle-tree option if you know what you are doing", rollupID, highestRollupID) + } else if completeMT { + highestRollupID = rollupID + } + var ls []common.Hash + var i uint32 = 0 + for ; i <= highestRollupID; i++ { + var exitRoot common.Hash + if leaf, exists := leaves[i]; exists { + exitRoot = leaf.ExitRoot + log.Info(). + Uint64("block-number", leaf.Raw.BlockNumber). + Uint32("rollupID", leaf.RollupID). + Str("exitRoot", exitRoot.String()). + Str("tx-hash", leaf.Raw.TxHash.String()). + Msg("latest event received for the tree") + } else { + log.Warn().Uint32("rollupID", i).Msg("No event found for this rollup") + } + ls = append(ls, exitRoot) + } + p, err := ComputeSiblings(rollupID, ls, TreeDepth) + if err != nil { + return err + } + log.Info().Str("root", p.Root.String()).Msg("finished") + fmt.Println(String(p)) + return nil +} + +func ComputeSiblings(rollupID uint32, leaves []common.Hash, height uint8) (*RollupsProof, error) { + initLeaves := leaves + var ns [][][]byte + if len(leaves) == 0 { + leaves = append(leaves, common.Hash{}) + } + currentZeroHashHeight := common.Hash{} + var siblings []common.Hash + index := rollupID + for h := uint8(0); h < height; h++ { + if len(leaves)%2 == 1 { + leaves = append(leaves, currentZeroHashHeight) + } + if index%2 == 1 { // If it is odd + siblings = append(siblings, leaves[index-1]) + } else { // It is even + if len(leaves) > 1 { + siblings = append(siblings, leaves[index+1]) + } + } + var ( + nsi [][][]byte + hashes []common.Hash + ) + for i := 0; i < len(leaves); i += 2 { + var left, right = i, i + 1 + hash := crypto.Keccak256Hash(leaves[left][:], leaves[right][:]) + nsi = append(nsi, [][]byte{hash[:], leaves[left][:], leaves[right][:]}) + hashes = append(hashes, hash) + } + // Find the index of the leave in the next level of the tree. + // Divide the index by 2 to find the position in the upper level + index = uint32(float64(index) / 2) //nolint:gomnd + ns = nsi + leaves = hashes + currentZeroHashHeight = crypto.Keccak256Hash(currentZeroHashHeight.Bytes(), currentZeroHashHeight.Bytes()) + } + if len(ns) != 1 { + return nil, fmt.Errorf("error: more than one root detected: %+v", ns) + } + if len(siblings) != TreeDepth { + return nil, fmt.Errorf("error: invalid number of siblings: %+v", siblings) + } + if leaves[0] != common.BytesToHash(ns[0][0]) { + return nil, fmt.Errorf("latest leave (root of the tree) does not match with the root (ns[0][0])") + } + sb := [32]common.Hash{} + for i := range TreeDepth { + sb[i] = siblings[i] + } + p := &RollupsProof{ + Siblings: sb, + RollupID: rollupID, + LeafHash: initLeaves[rollupID], + Root: common.BytesToHash(ns[0][0]), + } + + computedRoot := computeRoot(p.LeafHash, p.Siblings, p.RollupID, TreeDepth) + if computedRoot != p.Root { + return nil, fmt.Errorf("error: computed root does not match the expected root") + } + + return p, nil +} + +func computeRoot(leafHash common.Hash, smtProof [32]common.Hash, index uint32, height uint8) common.Hash { + var node common.Hash + copy(node[:], leafHash[:]) + + // Check merkle proof + var h uint8 + for h = 0; h < height; h++ { + if ((index >> h) & 1) == 1 { + node = crypto.Keccak256Hash(smtProof[h].Bytes(), node.Bytes()) + } else { + node = crypto.Keccak256Hash(node.Bytes(), smtProof[h].Bytes()) + } + } + return common.BytesToHash(node[:]) +} + +func readDeposits(rawDeposits []byte, depositNumber uint32) error { + buf := bytes.NewBuffer(rawDeposits) + scanner := bufio.NewScanner(buf) + scannerBuf := make([]byte, 0) + scanner.Buffer(scannerBuf, 1024*1024) + imt := new(IMT) + imt.Init() + seenDeposit := make(map[uint32]common.Hash, 0) + lastDeposit := uint32(0) + for scanner.Scan() { + evt := new(ulxly.UlxlyBridgeEvent) + err := json.Unmarshal(scanner.Bytes(), evt) + if err != nil { + return err + } + if _, hasBeenSeen := seenDeposit[evt.DepositCount]; hasBeenSeen { + log.Warn().Uint32("deposit", evt.DepositCount).Str("tx-hash", evt.Raw.TxHash.String()).Msg("Skipping duplicate deposit") + continue + } + seenDeposit[evt.DepositCount] = evt.Raw.TxHash + if lastDeposit+1 != evt.DepositCount && lastDeposit != 0 { + log.Error().Uint32("missing-deposit", lastDeposit+1).Uint32("current-deposit", evt.DepositCount).Msg("Missing deposit") + return fmt.Errorf("missing deposit: %d", lastDeposit+1) + } + lastDeposit = evt.DepositCount + leaf := hashDeposit(evt) + log.Debug().Str("leaf-hash", common.Bytes2Hex(leaf[:])).Msg("Leaf hash calculated") + imt.AddLeaf(leaf, evt.DepositCount) + log.Info(). + Uint64("block-number", evt.Raw.BlockNumber). + Uint32("deposit-count", evt.DepositCount). + Str("tx-hash", evt.Raw.TxHash.String()). + Str("root", common.Hash(imt.Roots[len(imt.Roots)-1]).String()). + Msg("adding event to tree") + // There's no point adding more leaves if we can prove the deposit already? + if evt.DepositCount >= depositNumber { + break + } + } + if err := scanner.Err(); err != nil { + log.Error().Err(err).Msg("there was an error reading the deposit file") + return err + } + + log.Info().Msg("finished") + p := imt.GetProof(depositNumber) + fmt.Println(String(p)) + return nil +} + +func ensureCodePresent(ctx context.Context, client *ethclient.Client, address string) error { + code, err := client.CodeAt(ctx, common.HexToAddress(address), nil) + if err != nil { + log.Error().Err(err).Str("address", address).Msg("error getting code at address") + return err + } + if len(code) == 0 { + return fmt.Errorf("address %s has no code", address) + } + return nil +} + +// String will create the json representation of the proof +func String[T any](p T) string { + jsonBytes, err := json.Marshal(p) + if err != nil { + log.Error().Err(err).Msg("error marshalling proof to json") + return "" + } + return string(jsonBytes) + +} + +// hashDeposit create the leaf hash value for a particular deposit +func hashDeposit(deposit *ulxly.UlxlyBridgeEvent) common.Hash { + var res common.Hash + origNet := make([]byte, 4) //nolint:gomnd + binary.BigEndian.PutUint32(origNet, deposit.OriginNetwork) + destNet := make([]byte, 4) //nolint:gomnd + binary.BigEndian.PutUint32(destNet, deposit.DestinationNetwork) + var buf common.Hash + metaHash := crypto.Keccak256Hash(deposit.Metadata) + copy(res[:], crypto.Keccak256Hash([]byte{deposit.LeafType}, origNet, deposit.OriginAddress.Bytes(), destNet, deposit.DestinationAddress[:], deposit.Amount.FillBytes(buf[:]), metaHash.Bytes()).Bytes()) + return res +} + +// Init will allocate the objects in the IMT +func (s *IMT) Init() { + s.Branches = make(map[uint32][]common.Hash) + s.Leaves = make(map[uint32]common.Hash) + s.ZeroHashes = generateZeroHashes(TreeDepth) + s.Proofs = make(map[uint32]Proof) +} + +// AddLeaf will take a given deposit and add it to the collection of leaves. It will also update the +func (s *IMT) AddLeaf(leaf common.Hash, position uint32) { + // just keep a copy of the leaf indexed by deposit count for now + s.Leaves[position] = leaf + + node := leaf + size := uint64(position) + 1 + + // copy the previous set of branches as a starting point. We're going to make copies of the branches at each deposit + branches := make([]common.Hash, TreeDepth) + if position == 0 { + branches = generateEmptyHashes(TreeDepth) + } else { + copy(branches, s.Branches[position-1]) + } + + for height := uint64(0); height < TreeDepth; height += 1 { + if ((size >> height) & 1) == 1 { + copy(branches[height][:], node[:]) + break + } + node = crypto.Keccak256Hash(branches[height][:], node[:]) + } + s.Branches[position] = branches + s.Roots = append(s.Roots, s.GetRoot(position)) +} + +// GetRoot will return the root for a particular deposit +func (s *IMT) GetRoot(depositNum uint32) common.Hash { + node := common.Hash{} + size := depositNum + 1 + currentZeroHashHeight := common.Hash{} + + for height := 0; height < TreeDepth; height++ { + if ((size >> height) & 1) == 1 { + node = crypto.Keccak256Hash(s.Branches[depositNum][height][:], node.Bytes()) + + } else { + node = crypto.Keccak256Hash(node.Bytes(), currentZeroHashHeight.Bytes()) + } + currentZeroHashHeight = crypto.Keccak256Hash(currentZeroHashHeight.Bytes(), currentZeroHashHeight.Bytes()) + } + return node +} + +// GetProof will return an object containing the proof data necessary for verification +func (s *IMT) GetProof(depositNum uint32) Proof { + node := common.Hash{} + size := depositNum + 1 + currentZeroHashHeight := common.Hash{} + + siblings := [TreeDepth]common.Hash{} + for height := 0; height < TreeDepth; height++ { + siblingDepositNum := getSiblingLeafNumber(depositNum, uint32(height)) + sibling := currentZeroHashHeight + if _, hasKey := s.Branches[siblingDepositNum]; hasKey { + sibling = s.Branches[siblingDepositNum][height] + } else { + sibling = currentZeroHashHeight + } + + log.Info().Str("sibling", sibling.String()).Msg("Proof Inputs") + siblings[height] = sibling + if ((size >> height) & 1) == 1 { + // node = keccak256(abi.encodePacked(_branch[height], node)); + node = crypto.Keccak256Hash(sibling.Bytes(), node.Bytes()) + } else { + // node = keccak256(abi.encodePacked(node, currentZeroHashHeight)); + node = crypto.Keccak256Hash(node.Bytes(), sibling.Bytes()) + } + currentZeroHashHeight = crypto.Keccak256Hash(currentZeroHashHeight.Bytes(), currentZeroHashHeight.Bytes()) + } + p := &Proof{ + Siblings: siblings, + DepositCount: depositNum, + LeafHash: s.Leaves[depositNum], + } + + r, err := Check(s.Roots, p.LeafHash, p.DepositCount, p.Siblings) + if err != nil { + log.Error().Err(err).Msg("failed to validate proof") + } + p.Root = r + s.Proofs[depositNum] = *p + return *p +} + +// getSiblingLeafNumber returns the sibling number of a given number at a specified level in an incremental Merkle tree. +// +// In an incremental Merkle tree, each node has a sibling node at each level of the tree. +// The sibling node can be determined by flipping the bit at the current level and setting all bits to the right of the current level to 1. +// This function calculates the sibling number based on the deposit number and the specified level. +// +// Parameters: +// - LeafNumber: the original number for which the sibling is to be found. +// - level: the level in the Merkle tree at which to find the sibling. +// +// The logic works as follows: +// 1. `1 << level` creates a binary number with a single 1 bit at the position corresponding to the level. +// 2. `LeafNumber ^ (1 << level)` flips the bit at the position corresponding to the level in the LeafNumber. +// 3. `(1 << level) - 1` creates a binary number with all bits to the right of the current level set to 1. +// 4. `| ((1 << level) - 1)` ensures that all bits to the right of the current level are set to 1 in the result. +// +// The function effectively finds the sibling deposit number at each level of the Merkle tree by manipulating the bits accordingly. +func getSiblingLeafNumber(leafNumber, level uint32) uint32 { + return leafNumber ^ (1 << level) | ((1 << level) - 1) +} + +// Check is a sanity check of a proof in order to make sure that the +// proof that was generated creates a root that we recognize. This was +// useful while testing in order to avoid verifying that the proof +// works or doesn't work onchain +func Check(roots []common.Hash, leaf common.Hash, position uint32, siblings [32]common.Hash) (common.Hash, error) { + node := leaf + index := position + for height := 0; height < TreeDepth; height++ { + if ((index >> height) & 1) == 1 { + node = crypto.Keccak256Hash(siblings[height][:], node[:]) + } else { + node = crypto.Keccak256Hash(node[:], siblings[height][:]) + } + } + + isProofValid := false + for i := len(roots) - 1; i >= 0; i-- { + if roots[i].Cmp(node) == 0 { + isProofValid = true + break + } + } + + log.Info(). + Bool("is-proof-valid", isProofValid). + Uint32("leaf-position", position). + Str("leaf-hash", leaf.String()). + Str("checked-root", node.String()).Msg("checking proof") + if !isProofValid { + return common.Hash{}, fmt.Errorf("invalid proof") + } + + return node, nil +} + +// https://eth2book.info/capella/part2/deposits-withdrawals/contract/ +func generateZeroHashes(height uint8) []common.Hash { + zeroHashes := make([]common.Hash, height) + zeroHashes[0] = common.Hash{} + for i := 1; i < int(height); i++ { + zeroHashes[i] = crypto.Keccak256Hash(zeroHashes[i-1][:], zeroHashes[i-1][:]) + } + return zeroHashes +} + +func generateEmptyHashes(height uint8) []common.Hash { + zeroHashes := make([]common.Hash, height) + zeroHashes[0] = common.Hash{} + for i := 1; i < int(height); i++ { + zeroHashes[i] = common.Hash{} + } + return zeroHashes +} + +func generateTransactionPayload(ctx context.Context, client *ethclient.Client, ulxlyInputArgBridge string, ulxlyInputArgPvtKey string, ulxlyInputArgGasLimit uint64, ulxlyInputArgDestAddr string, ulxlyInputArgChainID string) (bridgeV2 *ulxly.Ulxly, toAddress common.Address, opts *bind.TransactOpts, err error) { + // checks if bridge address has code + err = ensureCodePresent(ctx, client, ulxlyInputArgBridge) + if err != nil { + err = fmt.Errorf("bridge code check err: %w", err) + return + } + + ulxlyInputArgPvtKey = strings.TrimPrefix(ulxlyInputArgPvtKey, "0x") + bridgeV2, err = ulxly.NewUlxly(common.HexToAddress(ulxlyInputArgBridge), client) + if err != nil { + return + } + + privateKey, err := crypto.HexToECDSA(ulxlyInputArgPvtKey) + if err != nil { + log.Error().Err(err).Msg("Unable to retrieve private key") + return + } + + // value := big.NewInt(*ulxlyInputArgs.Amount) + gasLimit := ulxlyInputArgGasLimit + + chainID := new(big.Int) + // For manual input of chainID, use the user's input + if ulxlyInputArgChainID != "" { + chainID.SetString(ulxlyInputArgChainID, 10) + } else { // If there is no user input for chainID, infer it from context + chainID, err = client.ChainID(ctx) + if err != nil { + log.Error().Err(err).Msg("Cannot get chain ID") + return + } + } + + opts, err = bind.NewKeyedTransactorWithChainID(privateKey, chainID) + if err != nil { + log.Error().Err(err).Msg("Cannot generate transactionOpts") + return + } + if inputUlxlyArgs.gasPrice != "" { + gasPrice := new(big.Int) + gasPrice.SetString(inputUlxlyArgs.gasPrice, 10) + opts.GasPrice = gasPrice + } + if inputUlxlyArgs.dryRun { + opts.NoSend = true + } + opts.Context = ctx + opts.GasLimit = gasLimit + toAddress = common.HexToAddress(ulxlyInputArgDestAddr) + if toAddress == (common.Address{}) { + toAddress = opts.From + } + return bridgeV2, toAddress, opts, err +} + +func getMerkleProofsExitRoots(bridgeService bridge_service.BridgeService, deposit bridge_service.Deposit, proofGERHash string, l1InfoTreeIndex uint32) (*bridge_service.Proof, error) { + var ger *common.Hash + if len(proofGERHash) > 0 { + hash := common.HexToHash(proofGERHash) + ger = &hash + } + + var proof *bridge_service.Proof + var err error + if ger != nil { + proof, err = bridgeService.GetProofByGer(deposit.NetworkID, deposit.DepositCnt, *ger) + } else if l1InfoTreeIndex > 0 { + proof, err = bridgeService.GetProofByL1InfoTreeIndex(deposit.NetworkID, deposit.DepositCnt, l1InfoTreeIndex) + } else { + proof, err = bridgeService.GetProof(deposit.NetworkID, deposit.DepositCnt) + } + if err != nil { + return nil, fmt.Errorf("error getting proof for deposit %d on network %d: %w", deposit.DepositCnt, deposit.NetworkID, err) + } + + if len(proof.MerkleProof) == 0 { + errMsg := "the Merkle Proofs cannot be retrieved, double check the input arguments and try again" + log.Error(). + Str("url", bridgeService.Url()). + Uint32("NetworkID", deposit.NetworkID). + Uint32("DepositCnt", deposit.DepositCnt). + Msg(errMsg) + return nil, errors.New(errMsg) + } + if len(proof.RollupMerkleProof) == 0 { + errMsg := "the Rollup Merkle Proofs cannot be retrieved, double check the input arguments and try again" + log.Error(). + Str("url", bridgeService.Url()). + Uint32("NetworkID", deposit.NetworkID). + Uint32("DepositCnt", deposit.DepositCnt). + Msg(errMsg) + return nil, errors.New(errMsg) + } + + if proof.MainExitRoot == nil || proof.RollupExitRoot == nil { + errMsg := "the exit roots from the bridge service were empty" + log.Warn(). + Uint32("DepositCnt", deposit.DepositCnt). + Uint32("OrigNet", deposit.OrigNet). + Uint32("DestNet", deposit.DestNet). + Uint32("NetworkID", deposit.NetworkID). + Stringer("OrigAddr", deposit.OrigAddr). + Stringer("DestAddr", deposit.DestAddr). + Msg("deposit can't be claimed!") + log.Error(). + Str("url", bridgeService.Url()). + Uint32("NetworkID", deposit.NetworkID). + Uint32("DepositCnt", deposit.DepositCnt). + Msg(errMsg) + return nil, errors.New(errMsg) + } + + return proof, nil +} + +func getDeposit(depositNetwork, depositCount uint32) (*bridge_service.Deposit, error) { + deposit, err := bridgeService.GetDeposit(depositNetwork, depositCount) + if err != nil { + return nil, err + } + + if inputUlxlyArgs.legacy { + if !deposit.ReadyForClaim { + log.Error().Msg("The claim transaction is not yet ready to be claimed. Try again in a few blocks.") + return nil, ErrNotReadyForClaim + } else if deposit.ClaimTxHash != nil { + log.Info().Str("claimTxHash", deposit.ClaimTxHash.String()).Msg(ErrDepositAlreadyClaimed.Error()) + return nil, ErrDepositAlreadyClaimed + } + } + + return deposit, nil +} + +func getDepositsForAddress(bridgeService bridge_service.BridgeService, destinationAddress string, offset, limit int) ([]bridge_service.Deposit, int, error) { + deposits, total, err := bridgeService.GetDeposits(destinationAddress, offset, limit) + if err != nil { + return nil, 0, err + } + + if len(deposits) != total { + log.Warn().Int("total_deposits", total).Int("retrieved_deposits", len(deposits)).Msg("not all deposits were retrieved") + } + + return deposits, total, nil +} + +// Add the helper function to create an insecure client +func createInsecureEthClient(rpcURL string) (*ethclient.Client, error) { + // WARNING: This disables TLS certificate verification + log.Warn().Msg("WARNING: TLS certificate verification is disabled. This is unsafe for production use.") + + httpClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + } + + rpcClient, err := ethrpc.DialOptions(context.Background(), rpcURL, ethrpc.WithHTTPClient(httpClient)) + if err != nil { + return nil, err + } + + return ethclient.NewClient(rpcClient), nil +} + +// Add helper function to create either secure or insecure client based on flag +func createEthClient(ctx context.Context, rpcURL string) (*ethclient.Client, error) { + if inputUlxlyArgs.insecure { + return createInsecureEthClient(rpcURL) + } + return ethclient.DialContext(ctx, rpcURL) +} + +//go:embed BridgeAssetUsage.md +var bridgeAssetUsage string + +//go:embed BridgeMessageUsage.md +var bridgeMessageUsage string + +//go:embed BridgeWETHMessageUsage.md +var bridgeWETHMessageUsage string + +//go:embed ClaimAssetUsage.md +var claimAssetUsage string + +//go:embed ClaimMessageUsage.md +var claimMessageUsage string + +//go:embed proofUsage.md +var proofUsage string + +//go:embed rollupsProofUsage.md +var rollupsProofUsage string + +//go:embed balanceTreeUsage.md +var balanceTreeUsage string + +//go:embed nullifierAndBalanceTreeUsage.md +var nullifierAndBalanceTreeUsage string + +//go:embed nullifierTreeUsage.md +var nullifierTreeUsage string + +//go:embed depositGetUsage.md +var depositGetUsage string + +//go:embed claimGetUsage.md +var claimGetUsage string + +//go:embed verifyBatchesGetUsage.md +var verifyBatchesGetUsage string + var ULxLyCmd = &cobra.Command{ Use: "ulxly", Short: "Utilities for interacting with the uLxLy bridge.", Long: "Basic utility commands for interacting with the bridge contracts, bridge services, and generating proofs.", Args: cobra.NoArgs, } - -// Hidden parent command for bridge and claim to share flags var ulxlyBridgeAndClaimCmd = &cobra.Command{ Args: cobra.NoArgs, Hidden: true, PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { - common.InputUlxlyArgs.RPCURL, err = flag.GetRequiredRPCURL(cmd) + inputUlxlyArgs.rpcURL, err = flag.GetRequiredRPCURL(cmd) if err != nil { return err } - common.InputUlxlyArgs.PrivateKey, err = flag.GetRequiredPrivateKey(cmd) + inputUlxlyArgs.privateKey, err = flag.GetRequiredPrivateKey(cmd) if err != nil { return err } @@ -35,48 +2020,547 @@ var ulxlyBridgeAndClaimCmd = &cobra.Command{ }, } +var ulxlyGetEventsCmd = &cobra.Command{ + Args: cobra.NoArgs, + Hidden: true, +} + +var ulxlyProofsCmd = &cobra.Command{ + Args: cobra.NoArgs, + Hidden: true, +} + +var ulxlyBridgeCmd = &cobra.Command{ + Use: "bridge", + Short: "Commands for moving funds and sending messages from one chain to another.", + Args: cobra.NoArgs, +} + +var ulxlyClaimCmd = &cobra.Command{ + Use: "claim", + Short: "Commands for claiming deposits on a particular chain.", + Args: cobra.NoArgs, +} + +type ulxlyArgs struct { + gasLimit uint64 + chainID string + privateKey string + addressOfPrivateKey string + value string + rpcURL string + bridgeAddress string + destNetwork uint32 + destAddress string + tokenAddress string + forceUpdate bool + callData string + callDataFile string + timeout uint64 + depositCount uint32 + depositNetwork uint32 + bridgeServiceURL string + globalIndex string + gasPrice string + dryRun bool + bridgeServiceURLs []string + bridgeLimit int + bridgeOffset int + wait time.Duration + concurrency uint + insecure bool + legacy bool + proofGER string + proofL1InfoTreeIndex uint32 +} + +var inputUlxlyArgs = ulxlyArgs{} + +var ( + bridgeAssetCommand *cobra.Command + bridgeMessageCommand *cobra.Command + bridgeMessageWETHCommand *cobra.Command + claimAssetCommand *cobra.Command + claimMessageCommand *cobra.Command + claimEverythingCommand *cobra.Command + emptyProofCommand *cobra.Command + zeroProofCommand *cobra.Command + proofCommand *cobra.Command + rollupsProofCommand *cobra.Command + balanceTreeCommand *cobra.Command + nullifierAndBalanceTreeCommand *cobra.Command + nullifierTreeCommand *cobra.Command + getDepositCommand *cobra.Command + getClaimCommand *cobra.Command + getVerifyBatchesCommand *cobra.Command + + getEvent = &GetEvent{} + getSmcOptions = &GetSmcOptions{} + getVerifyBatchesOptions = &GetVerifyBatchesOptions{} + fileOptions = &FileOptions{} + balanceTreeOptions = &BalanceTreeOptions{} + proofOptions = &ProofOptions{} + rollupsProofOptions = &RollupsProofOptions{} +) + +const ( + ArgGasLimit = "gas-limit" + ArgChainID = "chain-id" + ArgPrivateKey = flag.PrivateKey + ArgValue = "value" + ArgRPCURL = flag.RPCURL + ArgBridgeAddress = "bridge-address" + ArgRollupManagerAddress = "rollup-manager-address" + ArgDestNetwork = "destination-network" + ArgDestAddress = "destination-address" + ArgForceUpdate = "force-update-root" + ArgCallData = "call-data" + ArgCallDataFile = "call-data-file" + ArgTimeout = "transaction-receipt-timeout" + ArgDepositCount = "deposit-count" + ArgDepositNetwork = "deposit-network" + ArgRollupID = "rollup-id" + ArgCompleteMT = "complete-merkle-tree" + ArgBridgeServiceURL = "bridge-service-url" + ArgFileName = "file-name" + ArgL2ClaimsFileName = "l2-claims-file" + ArgL2DepositsFileName = "l2-deposits-file" + ArgL2NetworkID = "l2-network-id" + ArgFromBlock = "from-block" + ArgToBlock = "to-block" + ArgFilterSize = "filter-size" + ArgTokenAddress = "token-address" + ArgGlobalIndex = "global-index" + ArgDryRun = "dry-run" + ArgGasPrice = "gas-price" + ArgBridgeMappings = "bridge-service-map" + ArgBridgeLimit = "bridge-limit" + ArgBridgeOffset = "bridge-offset" + ArgWait = "wait" + ArgConcurrency = "concurrency" + ArgInsecure = "insecure" + ArgLegacy = "legacy" + ArgProofGER = "proof-ger" + ArgProofL1InfoTreeIndex = "proof-l1-info-tree-index" +) + +var ( + bridgeService bridge_service.BridgeService + bridgeServices map[uint32]bridge_service.BridgeService = make(map[uint32]bridge_service.BridgeService) +) + +func prepInputs(cmd *cobra.Command, args []string) (err error) { + if inputUlxlyArgs.dryRun && inputUlxlyArgs.gasLimit == 0 { + inputUlxlyArgs.gasLimit = uint64(10_000_000) + } + pvtKey := strings.TrimPrefix(inputUlxlyArgs.privateKey, "0x") + privateKey, err := crypto.HexToECDSA(pvtKey) + if err != nil { + return fmt.Errorf("invalid --%s: %w", ArgPrivateKey, err) + } + + publicKey := privateKey.Public() + + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return fmt.Errorf("cannot assert type: publicKey is not of type *ecdsa.PublicKey") + } + fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA) + inputUlxlyArgs.addressOfPrivateKey = fromAddress.String() + if inputUlxlyArgs.destAddress == "" { + inputUlxlyArgs.destAddress = fromAddress.String() + log.Info().Stringer("destAddress", fromAddress).Msg("No destination address specified. Using private key's address") + } + + if inputUlxlyArgs.callDataFile != "" { + rawCallData, iErr := os.ReadFile(inputUlxlyArgs.callDataFile) + if iErr != nil { + return iErr + } + if inputUlxlyArgs.callData != "0x" { + return fmt.Errorf("both %s and %s flags were provided", ArgCallData, ArgCallDataFile) + } + inputUlxlyArgs.callData = string(rawCallData) + } + + bridgeService, err = bridge_service_factory.NewBridgeService(inputUlxlyArgs.bridgeServiceURL, inputUlxlyArgs.insecure, inputUlxlyArgs.legacy) + if err != nil { + log.Error().Err(err).Msg("Unable to create bridge service") + return err + } + + bridgeServicesURLs, err := getBridgeServiceURLs() + if err != nil { + log.Error().Err(err).Msg("Unable to get bridge service URLs") + return err + } + + for networkID, url := range bridgeServicesURLs { + bs, err := bridge_service_factory.NewBridgeService(url, inputUlxlyArgs.insecure, inputUlxlyArgs.legacy) + if err != nil { + log.Error().Err(err).Str("url", url).Msg("Unable to create bridge service") + return err + } + if _, exists := bridgeServices[networkID]; exists { + log.Warn().Uint32("networkID", networkID).Str("url", url).Msg("Duplicate network ID found for bridge service URL. Overwriting previous entry.") + } + bridgeServices[networkID] = bs + log.Info().Uint32("networkID", networkID).Str("url", url).Msg("Added bridge service") + } + + return nil +} + +type FileOptions struct { + FileName string +} + +func (o *FileOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().StringVarP(&o.FileName, ArgFileName, "", "", "ndjson file with events data") +} + +type BalanceTreeOptions struct { + L2ClaimsFile, L2DepositsFile, BridgeAddress, RpcURL string + L2NetworkID uint32 + Insecure bool +} + +func (o *BalanceTreeOptions) AddFlags(cmd *cobra.Command) { + f := cmd.Flags() + f.StringVarP(&o.L2ClaimsFile, ArgL2ClaimsFileName, "", "", "ndjson file with l2 claim events data") + f.StringVarP(&o.L2DepositsFile, ArgL2DepositsFileName, "", "", "ndjson file with l2 deposit events data") + f.StringVarP(&o.BridgeAddress, ArgBridgeAddress, "", "", "bridge address") + f.StringVarP(&o.RpcURL, ArgRPCURL, "r", "", "RPC URL") + f.Uint32VarP(&o.L2NetworkID, ArgL2NetworkID, "", 0, "L2 network ID") + f.BoolVarP(&o.Insecure, ArgInsecure, "", false, "skip TLS certificate verification") +} + +type ProofOptions struct { + DepositCount uint32 +} + +func (o *ProofOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().Uint32VarP(&o.DepositCount, ArgDepositCount, "", 0, "deposit number to generate a proof for") +} + +type RollupsProofOptions struct { + RollupID uint32 + CompleteMerkleTree bool +} + +func (o *RollupsProofOptions) AddFlags(cmd *cobra.Command) { + f := cmd.Flags() + f.Uint32VarP(&o.RollupID, ArgRollupID, "", 0, "rollup ID number to generate a proof for") + f.BoolVarP(&o.CompleteMerkleTree, ArgCompleteMT, "", false, "get proof for a leave higher than the highest rollup ID") +} + +type GetEvent struct { + URL string + FromBlock, ToBlock, FilterSize uint64 + Insecure bool +} + +func (o *GetEvent) AddFlags(cmd *cobra.Command) { + f := cmd.Flags() + f.StringVarP(&o.URL, ArgRPCURL, "u", "", "RPC URL to read the events data") + f.Uint64VarP(&o.FromBlock, ArgFromBlock, "f", 0, "start of the range of blocks to retrieve") + f.Uint64VarP(&o.ToBlock, ArgToBlock, "t", 0, "end of the range of blocks to retrieve") + f.Uint64VarP(&o.FilterSize, ArgFilterSize, "i", 1000, "batch size for individual filter queries") + f.BoolVarP(&o.Insecure, ArgInsecure, "", false, "skip TLS certificate verification") + flag.MarkFlagsRequired(cmd, ArgFromBlock, ArgToBlock, ArgRPCURL) +} + +type GetSmcOptions struct { + BridgeAddress string +} + +func (o *GetSmcOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().StringVarP(&o.BridgeAddress, ArgBridgeAddress, "a", "", "address of the ulxly bridge") +} + +type GetVerifyBatchesOptions struct { + RollupManagerAddress string +} + +func (o *GetVerifyBatchesOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().StringVarP(&o.RollupManagerAddress, ArgRollupManagerAddress, "a", "", "address of the rollup manager contract") +} + func init() { + bridgeAssetCommand = &cobra.Command{ + Use: "asset", + Short: "Move ETH or an ERC20 between to chains.", + Long: bridgeAssetUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := bridgeAsset(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + bridgeMessageCommand = &cobra.Command{ + Use: "message", + Short: "Send some ETH along with data from one chain to another chain.", + Long: bridgeMessageUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := bridgeMessage(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + bridgeMessageWETHCommand = &cobra.Command{ + Use: "weth", + Short: "For L2's that use a gas token, use this to transfer WETH to another chain.", + Long: bridgeWETHMessageUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := bridgeWETHMessage(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + claimAssetCommand = &cobra.Command{ + Use: "asset", + Short: "Claim a deposit.", + Long: claimAssetUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := claimAsset(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + claimMessageCommand = &cobra.Command{ + Use: "message", + Short: "Claim a message.", + Long: claimMessageUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := claimMessage(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + claimEverythingCommand = &cobra.Command{ + Use: "claim-everything", + Short: "Attempt to claim as many deposits and messages as possible.", + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := claimEverything(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + emptyProofCommand = &cobra.Command{ + Use: "empty-proof", + Short: "Create an empty proof.", + Long: "Use this command to print an empty proof response that's filled with zero-valued siblings like 0x0000000000000000000000000000000000000000000000000000000000000000. This can be useful when you need to submit a dummy proof.", + RunE: func(cmd *cobra.Command, args []string) error { + return emptyProof() + }, + SilenceUsage: true, + } + zeroProofCommand = &cobra.Command{ + Use: "zero-proof", + Short: "Create a proof that's filled with zeros.", + Long: `Use this command to print a proof response that's filled with the zero +hashes. These values are very helpful for debugging because they would +tell you how populated the tree is and roughly which leaves and +siblings are empty. It's also helpful for sanity checking a proof +response to understand if the hashed value is part of the zero hashes +or if it's actually an intermediate hash.`, + RunE: func(cmd *cobra.Command, args []string) error { + return zeroProof() + }, + SilenceUsage: true, + } + proofCommand = &cobra.Command{ + Use: "proof", + Short: "Generate a proof for a given range of deposits.", + Long: proofUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return proof(args) + }, + SilenceUsage: true, + } + fileOptions.AddFlags(proofCommand) + proofOptions.AddFlags(proofCommand) + ulxlyProofsCmd.AddCommand(proofCommand) + ULxLyCmd.AddCommand(proofCommand) + + rollupsProofCommand = &cobra.Command{ + Use: "rollups-proof", + Short: "Generate a proof for a given range of rollups.", + Long: rollupsProofUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return rollupsExitRootProof(args) + }, + SilenceUsage: true, + } + fileOptions.AddFlags(rollupsProofCommand) + rollupsProofOptions.AddFlags(rollupsProofCommand) + ulxlyProofsCmd.AddCommand(rollupsProofCommand) + ULxLyCmd.AddCommand(rollupsProofCommand) + + balanceTreeCommand = &cobra.Command{ + Use: "compute-balance-tree", + Short: "Compute the balance tree given the deposits.", + Long: balanceTreeUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return balanceTree() + }, + SilenceUsage: true, + } + balanceTreeOptions.AddFlags(balanceTreeCommand) + ULxLyCmd.AddCommand(balanceTreeCommand) + + nullifierAndBalanceTreeCommand = &cobra.Command{ + Use: "compute-balance-nullifier-tree", + Short: "Compute the balance tree and the nullifier tree given the deposits and claims.", + Long: nullifierAndBalanceTreeUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return nullifierAndBalanceTree() + }, + SilenceUsage: true, + } + balanceTreeOptions.AddFlags(nullifierAndBalanceTreeCommand) + ULxLyCmd.AddCommand(nullifierAndBalanceTreeCommand) + + nullifierTreeCommand = &cobra.Command{ + Use: "compute-nullifier-tree", + Short: "Compute the nullifier tree given the claims.", + Long: nullifierTreeUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return nullifierTree(args) + }, + SilenceUsage: true, + } + fileOptions.AddFlags(nullifierTreeCommand) + ULxLyCmd.AddCommand(nullifierTreeCommand) + + getDepositCommand = &cobra.Command{ + Use: "get-deposits", + Short: "Generate ndjson for each bridge deposit over a particular range of blocks.", + Long: depositGetUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return readDeposit(cmd) + }, + SilenceUsage: true, + } + getEvent.AddFlags(getDepositCommand) + getSmcOptions.AddFlags(getDepositCommand) + ulxlyGetEventsCmd.AddCommand(getDepositCommand) + ULxLyCmd.AddCommand(getDepositCommand) + + getClaimCommand = &cobra.Command{ + Use: "get-claims", + Short: "Generate ndjson for each bridge claim over a particular range of blocks.", + Long: claimGetUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return readClaim(cmd) + }, + SilenceUsage: true, + } + getEvent.AddFlags(getClaimCommand) + getSmcOptions.AddFlags(getClaimCommand) + ulxlyGetEventsCmd.AddCommand(getClaimCommand) + ULxLyCmd.AddCommand(getClaimCommand) + + getVerifyBatchesCommand = &cobra.Command{ + Use: "get-verify-batches", + Short: "Generate ndjson for each verify batch over a particular range of blocks.", + Long: verifyBatchesGetUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return readVerifyBatches(cmd) + }, + SilenceUsage: true, + } + getEvent.AddFlags(getVerifyBatchesCommand) + getVerifyBatchesOptions.AddFlags(getVerifyBatchesCommand) + ulxlyGetEventsCmd.AddCommand(getVerifyBatchesCommand) + ULxLyCmd.AddCommand(getVerifyBatchesCommand) + // Arguments for both bridge and claim fBridgeAndClaim := ulxlyBridgeAndClaimCmd.PersistentFlags() - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.RPCURL, common.ArgRPCURL, "", "RPC URL to send the transaction") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.BridgeAddress, common.ArgBridgeAddress, "", "address of the lxly bridge") - fBridgeAndClaim.Uint64Var(&common.InputUlxlyArgs.GasLimit, common.ArgGasLimit, 0, "force specific gas limit for transaction") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.ChainID, common.ArgChainID, "", "chain ID to use in the transaction") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.PrivateKey, common.ArgPrivateKey, "", "hex encoded private key for sending transaction") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.DestAddress, common.ArgDestAddress, "", "destination address for the bridge") - fBridgeAndClaim.Uint64Var(&common.InputUlxlyArgs.Timeout, common.ArgTimeout, 60, "timeout in seconds to wait for transaction receipt confirmation") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.GasPrice, common.ArgGasPrice, "", "gas price to use") - fBridgeAndClaim.BoolVar(&common.InputUlxlyArgs.DryRun, common.ArgDryRun, false, "do all of the transaction steps but do not send the transaction") - fBridgeAndClaim.BoolVar(&common.InputUlxlyArgs.Insecure, common.ArgInsecure, false, "skip TLS certificate verification") - fBridgeAndClaim.BoolVar(&common.InputUlxlyArgs.Legacy, common.ArgLegacy, true, "force usage of legacy bridge service") - flag.MarkPersistentFlagsRequired(ulxlyBridgeAndClaimCmd, common.ArgBridgeAddress) - - // Bridge and Claim subcommands under hidden parent - ulxlyBridgeAndClaimCmd.AddCommand(bridge.BridgeCmd) - ulxlyBridgeAndClaimCmd.AddCommand(claim.ClaimCmd) - ulxlyBridgeAndClaimCmd.AddCommand(claim.ClaimEverythingCmd) - - // Add hidden parent to root + fBridgeAndClaim.StringVar(&inputUlxlyArgs.rpcURL, ArgRPCURL, "", "RPC URL to send the transaction") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.bridgeAddress, ArgBridgeAddress, "", "address of the lxly bridge") + fBridgeAndClaim.Uint64Var(&inputUlxlyArgs.gasLimit, ArgGasLimit, 0, "force specific gas limit for transaction") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.chainID, ArgChainID, "", "chain ID to use in the transaction") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.privateKey, ArgPrivateKey, "", "hex encoded private key for sending transaction") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.destAddress, ArgDestAddress, "", "destination address for the bridge") + fBridgeAndClaim.Uint64Var(&inputUlxlyArgs.timeout, ArgTimeout, 60, "timeout in seconds to wait for transaction receipt confirmation") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.gasPrice, ArgGasPrice, "", "gas price to use") + fBridgeAndClaim.BoolVar(&inputUlxlyArgs.dryRun, ArgDryRun, false, "do all of the transaction steps but do not send the transaction") + fBridgeAndClaim.BoolVar(&inputUlxlyArgs.insecure, ArgInsecure, false, "skip TLS certificate verification") + fBridgeAndClaim.BoolVar(&inputUlxlyArgs.legacy, ArgLegacy, true, "force usage of legacy bridge service") + flag.MarkPersistentFlagsRequired(ulxlyBridgeAndClaimCmd, ArgBridgeAddress) + + // bridge specific args + fBridge := ulxlyBridgeCmd.PersistentFlags() + fBridge.BoolVar(&inputUlxlyArgs.forceUpdate, ArgForceUpdate, true, "update the new global exit root") + fBridge.StringVar(&inputUlxlyArgs.value, ArgValue, "0", "amount in wei to send with the transaction") + fBridge.Uint32Var(&inputUlxlyArgs.destNetwork, ArgDestNetwork, 0, "rollup ID of the destination network") + fBridge.StringVar(&inputUlxlyArgs.tokenAddress, ArgTokenAddress, "0x0000000000000000000000000000000000000000", "address of ERC20 token to use") + fBridge.StringVar(&inputUlxlyArgs.callData, ArgCallData, "0x", "call data to be passed directly with bridge-message or as an ERC20 Permit") + fBridge.StringVar(&inputUlxlyArgs.callDataFile, ArgCallDataFile, "", "a file containing hex encoded call data") + flag.MarkPersistentFlagsRequired(ulxlyBridgeCmd, ArgDestNetwork) + + // Claim specific args + fClaim := ulxlyClaimCmd.PersistentFlags() + fClaim.Uint32Var(&inputUlxlyArgs.depositCount, ArgDepositCount, 0, "deposit count of the bridge transaction") + fClaim.Uint32Var(&inputUlxlyArgs.depositNetwork, ArgDepositNetwork, 0, "rollup ID of the network where the deposit was made") + fClaim.StringVar(&inputUlxlyArgs.bridgeServiceURL, ArgBridgeServiceURL, "", "URL of the bridge service") + fClaim.StringVar(&inputUlxlyArgs.globalIndex, ArgGlobalIndex, "", "an override of the global index value") + fClaim.DurationVar(&inputUlxlyArgs.wait, ArgWait, time.Duration(0), "retry claiming until deposit is ready, up to specified duration (available for claim asset and claim message)") + fClaim.StringVar(&inputUlxlyArgs.proofGER, ArgProofGER, "", "if specified and using legacy mode, the proof will be generated against this GER") + fClaim.Uint32Var(&inputUlxlyArgs.proofL1InfoTreeIndex, ArgProofL1InfoTreeIndex, 0, "if specified and using aggkit mode, the proof will be generated against this L1 Info Tree Index") + flag.MarkPersistentFlagsRequired(ulxlyClaimCmd, ArgDepositCount, ArgDepositNetwork, ArgBridgeServiceURL) + ulxlyClaimCmd.MarkFlagsMutuallyExclusive(ArgProofGER, ArgProofL1InfoTreeIndex) + + // Claim Everything Helper Command + fClaimEverything := claimEverythingCommand.Flags() + fClaimEverything.StringSliceVar(&inputUlxlyArgs.bridgeServiceURLs, ArgBridgeMappings, nil, "network ID to bridge service URL mappings (e.g. '1=http://network-1-bridgeurl,7=http://network-2-bridgeurl')") + fClaimEverything.IntVar(&inputUlxlyArgs.bridgeLimit, ArgBridgeLimit, 25, "limit the number or responses returned by the bridge service when claiming") + fClaimEverything.IntVar(&inputUlxlyArgs.bridgeOffset, ArgBridgeOffset, 0, "offset to specify for pagination of underlying bridge service deposits") + fClaimEverything.UintVar(&inputUlxlyArgs.concurrency, ArgConcurrency, 1, "worker pool size for claims") + flag.MarkFlagsRequired(claimEverythingCommand, ArgBridgeMappings) + + // Top Level ULxLyCmd.AddCommand(ulxlyBridgeAndClaimCmd) + ULxLyCmd.AddCommand(ulxlyGetEventsCmd) + ULxLyCmd.AddCommand(ulxlyProofsCmd) + ULxLyCmd.AddCommand(emptyProofCommand) + ULxLyCmd.AddCommand(zeroProofCommand) + ULxLyCmd.AddCommand(proofCommand) + + ULxLyCmd.AddCommand(ulxlyBridgeCmd) + ULxLyCmd.AddCommand(ulxlyClaimCmd) + ULxLyCmd.AddCommand(claimEverythingCommand) + + // Bridge and Claim + ulxlyBridgeAndClaimCmd.AddCommand(ulxlyBridgeCmd) + ulxlyBridgeAndClaimCmd.AddCommand(ulxlyClaimCmd) + ulxlyBridgeAndClaimCmd.AddCommand(claimEverythingCommand) + + // Bridge + ulxlyBridgeCmd.AddCommand(bridgeAssetCommand) + ulxlyBridgeCmd.AddCommand(bridgeMessageCommand) + ulxlyBridgeCmd.AddCommand(bridgeMessageWETHCommand) - // Add bridge and claim directly to root (so they're visible in help) - ULxLyCmd.AddCommand(bridge.BridgeCmd) - ULxLyCmd.AddCommand(claim.ClaimCmd) - ULxLyCmd.AddCommand(claim.ClaimEverythingCmd) - - // Proof commands - ULxLyCmd.AddCommand(proof.ProofCmd) - ULxLyCmd.AddCommand(proof.RollupsProofCmd) - ULxLyCmd.AddCommand(proof.EmptyProofCmd) - ULxLyCmd.AddCommand(proof.ZeroProofCmd) - - // Event commands - ULxLyCmd.AddCommand(events.GetDepositCmd) - ULxLyCmd.AddCommand(events.GetClaimCmd) - ULxLyCmd.AddCommand(events.GetVerifyBatchesCmd) - - // Tree commands - ULxLyCmd.AddCommand(tree.BalanceTreeCmd) - ULxLyCmd.AddCommand(tree.NullifierTreeCmd) - ULxLyCmd.AddCommand(tree.NullifierAndBalanceTreeCmd) + // Claim + ulxlyClaimCmd.AddCommand(claimAssetCommand) + ulxlyClaimCmd.AddCommand(claimMessageCommand) } From 1eafffd8f34f7b55d7c95e34b50c17fe582d8cba Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 20 Jan 2026 11:51:07 -0500 Subject: [PATCH 03/48] fix: make gen --- doc/polycli_p2p_sensor.md | 108 +++++++++++++++++++------------------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/doc/polycli_p2p_sensor.md b/doc/polycli_p2p_sensor.md index f79f610cb..d6cf51399 100644 --- a/doc/polycli_p2p_sensor.md +++ b/doc/polycli_p2p_sensor.md @@ -91,60 +91,60 @@ polycli p2p sensor amoy-nodes.json \ ## Flags ```bash - --api-port uint port API server will listen on (default 8080) - --blocks-cache-ttl duration time to live for block cache entries (0 for no expiration) (default 10m0s) - -b, --bootnodes string comma separated nodes used for bootstrapping - --broadcast-block-hashes broadcast block hashes to peers - --broadcast-blocks broadcast full blocks to peers - --broadcast-tx broadcast full transactions to peers - --broadcast-tx-hashes broadcast transaction hashes to peers - --database string which database to persist data to, options are: - - datastore (GCP Datastore) - - json (output to stdout) - - none (no persistence) (default "none") - -d, --database-id string datastore database ID - --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) - --discovery-dns string DNS discovery ENR tree URL - --discovery-port int UDP P2P discovery port (default 30303) - --fork-id bytesHex hex encoded fork ID (omit 0x) (default F097BC13) - --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") - -h, --help help for sensor - --key string hex-encoded private key (cannot be set with --key-file) - -k, --key-file string private key file (cannot be set with --key) - --known-blocks-cache-ttl duration time to live for known block cache entries (0 for no expiration) (default 5m0s) - --known-txs-cache-ttl duration time to live for known transaction cache entries (0 for no expiration) (default 5m0s) - --max-blocks int maximum blocks to track across all peers (0 for no limit) (default 1024) - -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this - will result in less chance of missing data but can significantly increase memory usage) (default 10000) - --max-known-blocks int maximum block hashes to track per peer (0 for no limit) (default 1024) - --max-known-txs int maximum transaction hashes to track per peer (0 for no limit) (default 8192) - --max-parents int maximum parent block hashes to track per peer (0 for no limit) (default 1024) - -m, --max-peers int maximum number of peers to connect to (default 2000) - --max-requests int maximum request IDs to track per peer (0 for no limit) (default 2048) - --max-txs int maximum transactions to cache for serving to peers (0 for no limit) (default 8192) - --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") - -n, --network-id uint filter discovered nodes by this network ID - --no-discovery disable P2P peer discovery - --parents-cache-ttl duration time to live for parent hash cache entries (0 for no expiration) (default 5m0s) - --port int TCP network listening port (default 30303) - --pprof run pprof server - --pprof-port uint port pprof runs on (default 6060) - -p, --project-id string GCP project ID - --prom run Prometheus server (default true) - --prom-port uint port Prometheus runs on (default 2112) - --requests-cache-ttl duration time to live for requests cache entries (0 for no expiration) (default 5m0s) - --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") - --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) - -s, --sensor-id string sensor ID when writing block/tx events - --static-nodes string static nodes file - --trusted-nodes string trusted nodes file - --ttl duration time to live (default 336h0m0s) - --txs-cache-ttl duration time to live for transaction cache entries (0 for no expiration) (default 10m0s) - --write-block-events write block events to database (default true) - -B, --write-blocks write blocks to database (default true) - --write-peers write peers to database (default true) - --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) - -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) + --api-port uint port API server will listen on (default 8080) + --blocks-cache-ttl duration time to live for block cache entries (0 for no expiration) (default 10m0s) + -b, --bootnodes string comma separated nodes used for bootstrapping + --broadcast-block-hashes broadcast block hashes to peers + --broadcast-blocks broadcast full blocks to peers + --broadcast-tx broadcast full transactions to peers + --broadcast-tx-hashes broadcast transaction hashes to peers + --database string which database to persist data to, options are: + - datastore (GCP Datastore) + - json (output to stdout) + - none (no persistence) (default "none") + -d, --database-id string datastore database ID + --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) + --discovery-dns string DNS discovery ENR tree URL + --discovery-port int UDP P2P discovery port (default 30303) + --fork-id bytesHex hex encoded fork ID (omit 0x) (default F097BC13) + --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") + -h, --help help for sensor + --key string hex-encoded private key (cannot be set with --key-file) + -k, --key-file string private key file (cannot be set with --key) + --known-blocks-cache-ttl duration time to live for known block cache entries (0 for no expiration) (default 5m0s) + --known-txs-cache-ttl duration time to live for known transaction cache entries (0 for no expiration) (default 5m0s) + --max-blocks int maximum blocks to track across all peers (0 for no limit) (default 1024) + -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this + will result in less chance of missing data but can significantly increase memory usage) (default 10000) + --max-known-blocks int maximum block hashes to track per peer (0 for no limit) (default 1024) + --max-known-txs int maximum transaction hashes to track per peer (0 for no limit) (default 8192) + --max-parents int maximum parent block hashes to track per peer (0 for no limit) (default 1024) + -m, --max-peers int maximum number of peers to connect to (default 2000) + --max-requests int maximum request IDs to track per peer (0 for no limit) (default 2048) + --max-txs int maximum transactions to cache for serving to peers (0 for no limit) (default 8192) + --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") + -n, --network-id uint filter discovered nodes by this network ID + --no-discovery disable P2P peer discovery + --parents-cache-ttl duration time to live for parent hash cache entries (0 for no expiration) (default 5m0s) + --port int TCP network listening port (default 30303) + --pprof run pprof server + --pprof-port uint port pprof runs on (default 6060) + -p, --project-id string GCP project ID + --prom run Prometheus server (default true) + --prom-port uint port Prometheus runs on (default 2112) + --requests-cache-ttl duration time to live for requests cache entries (0 for no expiration) (default 5m0s) + --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") + --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) + -s, --sensor-id string sensor ID when writing block/tx events + --static-nodes string static nodes file + --trusted-nodes string trusted nodes file + --ttl duration time to live (default 336h0m0s) + --txs-cache-ttl duration time to live for transaction cache entries (0 for no expiration) (default 10m0s) + --write-block-events write block events to database (default true) + -B, --write-blocks write blocks to database (default true) + --write-peers write peers to database (default true) + --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) + -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) ``` The command also inherits flags from parent commands. From 9b73909ee925c6cc820804bdd99a5b7c34e460e3 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Thu, 19 Feb 2026 16:44:58 -0500 Subject: [PATCH 04/48] fix: comments --- p2p/conns.go | 16 ++++++++-------- p2p/protocol.go | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/p2p/conns.go b/p2p/conns.go index 76eb03d58..4b85b2fff 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -87,31 +87,31 @@ func NewConns(opts ConnsOptions) *Conns { } } -// AddConn adds a connection to the manager. -func (c *Conns) AddConn(cn *conn) { +// Add adds a connection to the manager. +func (c *Conns) Add(cn *conn) { c.mu.Lock() defer c.mu.Unlock() c.conns[cn.node.ID().String()] = cn cn.logger.Debug().Msg("Added connection") } -// RemoveConn removes a connection from the manager when a peer disconnects. -func (c *Conns) RemoveConn(cn *conn) { +// Remove removes a connection from the manager when a peer disconnects. +func (c *Conns) Remove(cn *conn) { c.mu.Lock() defer c.mu.Unlock() delete(c.conns, cn.node.ID().String()) cn.logger.Debug().Msg("Removed connection") } -// BroadcastTx broadcasts a single transaction to all connected peers and -// returns the number of peers the transaction was successfully sent to. +// BroadcastTx broadcasts a single transaction to all connected peers. +// Returns the number of peers the transaction was successfully sent to. func (c *Conns) BroadcastTx(tx *types.Transaction) int { return c.BroadcastTxs(types.Transactions{tx}) } // BroadcastTxs broadcasts multiple transactions to all connected peers, -// filtering out transactions that each peer already knows about, and returns -// the number of peers the transactions were successfully sent to. +// filtering out transactions that each peer already knows about. +// Returns the number of peers the transactions were successfully sent to. // If broadcast flags are disabled, this is a no-op. func (c *Conns) BroadcastTxs(txs types.Transactions) int { if !c.shouldBroadcastTx { diff --git a/p2p/protocol.go b/p2p/protocol.go index e55a8de22..d869ae2a3 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -133,8 +133,8 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { } // Send the connection object to the conns manager for RPC broadcasting - opts.Conns.AddConn(c) - defer opts.Conns.RemoveConn(c) + opts.Conns.Add(c) + defer opts.Conns.Remove(c) ctx := opts.Context From bf9719a55155505d303eb010595ea6516cb6e196 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Thu, 19 Feb 2026 17:00:16 -0500 Subject: [PATCH 05/48] fix: flag name --- cmd/p2p/sensor/sensor.go | 2 +- doc/polycli_p2p_sensor.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index f2e3c2cd7..204e38e02 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -467,7 +467,7 @@ will result in less chance of missing data but can significantly increase memory f.BoolVar(&inputSensorParams.ShouldWriteTransactionEvents, "write-tx-events", true, `write transaction events to database (this option can significantly increase CPU and memory usage)`) f.BoolVar(&inputSensorParams.ShouldWritePeers, "write-peers", true, "write peers to database") - f.BoolVar(&inputSensorParams.ShouldBroadcastTx, "broadcast-tx", false, "broadcast full transactions to peers") + f.BoolVar(&inputSensorParams.ShouldBroadcastTx, "broadcast-txs", false, "broadcast full transactions to peers") f.BoolVar(&inputSensorParams.ShouldBroadcastTxHashes, "broadcast-tx-hashes", false, "broadcast transaction hashes to peers") f.BoolVar(&inputSensorParams.ShouldBroadcastBlocks, "broadcast-blocks", false, "broadcast full blocks to peers") f.BoolVar(&inputSensorParams.ShouldBroadcastBlockHashes, "broadcast-block-hashes", false, "broadcast block hashes to peers") diff --git a/doc/polycli_p2p_sensor.md b/doc/polycli_p2p_sensor.md index 77a83944e..eed8195cc 100644 --- a/doc/polycli_p2p_sensor.md +++ b/doc/polycli_p2p_sensor.md @@ -96,8 +96,8 @@ polycli p2p sensor amoy-nodes.json \ -b, --bootnodes string comma separated nodes used for bootstrapping --broadcast-block-hashes broadcast block hashes to peers --broadcast-blocks broadcast full blocks to peers - --broadcast-tx broadcast full transactions to peers --broadcast-tx-hashes broadcast transaction hashes to peers + --broadcast-txs broadcast full transactions to peers --database string which database to persist data to, options are: - datastore (GCP Datastore) - json (output to stdout) From 1e2306a3a2ce11c2831f133c869dfd51e4ce04e6 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Thu, 19 Feb 2026 17:30:21 -0500 Subject: [PATCH 06/48] fix: expiresAt --- p2p/cache.go | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/p2p/cache.go b/p2p/cache.go index def29458b..b73309743 100644 --- a/p2p/cache.go +++ b/p2p/cache.go @@ -24,7 +24,7 @@ type Cache[K comparable, V any] struct { type entry[K comparable, V any] struct { key K value V - expiresAt time.Time + expiresAt *time.Time // nil when TTL=0, saves 16 bytes per entry } // NewCache creates a new cache with the given options. @@ -44,10 +44,10 @@ func (c *Cache[K, V]) Add(key K, value V) { c.mu.Lock() defer c.mu.Unlock() - now := time.Now() - expiresAt := time.Time{} + var expiresAt *time.Time if c.ttl > 0 { - expiresAt = now.Add(c.ttl) + t := time.Now().Add(c.ttl) + expiresAt = &t } if elem, ok := c.items[key]; ok { @@ -89,7 +89,7 @@ func (c *Cache[K, V]) Get(key K) (V, bool) { e := elem.Value.(*entry[K, V]) - if c.ttl > 0 && time.Now().After(e.expiresAt) { + if e.expiresAt != nil && time.Now().After(*e.expiresAt) { c.list.Remove(elem) delete(c.items, key) var zero V @@ -114,7 +114,7 @@ func (c *Cache[K, V]) Peek(key K) (V, bool) { e := elem.Value.(*entry[K, V]) - if c.ttl > 0 && time.Now().After(e.expiresAt) { + if e.expiresAt != nil && time.Now().After(*e.expiresAt) { var zero V return zero, false } @@ -131,15 +131,16 @@ func (c *Cache[K, V]) Update(key K, updateFn func(V) V) { defer c.mu.Unlock() now := time.Now() - expiresAt := time.Time{} + var expiresAt *time.Time if c.ttl > 0 { - expiresAt = now.Add(c.ttl) + t := now.Add(c.ttl) + expiresAt = &t } var currentVal V if elem, ok := c.items[key]; ok { e := elem.Value.(*entry[K, V]) - if c.ttl == 0 || !now.After(e.expiresAt) { + if e.expiresAt == nil || !now.After(*e.expiresAt) { currentVal = e.value // Update existing entry c.list.MoveToFront(elem) @@ -186,7 +187,7 @@ func (c *Cache[K, V]) Contains(key K) bool { e := elem.Value.(*entry[K, V]) - if c.ttl > 0 && time.Now().After(e.expiresAt) { + if e.expiresAt != nil && time.Now().After(*e.expiresAt) { return false } From 8041cc641265503ce9ff820dc5fe39a08991d537 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Thu, 19 Feb 2026 17:42:02 -0500 Subject: [PATCH 07/48] fix: graceful shutdown --- cmd/p2p/sensor/api.go | 15 +++++++-- cmd/p2p/sensor/rpc.go | 15 +++++++-- cmd/p2p/sensor/sensor.go | 68 +++++++++++++++++++++++++++++++--------- 3 files changed, 79 insertions(+), 19 deletions(-) diff --git a/cmd/p2p/sensor/api.go b/cmd/p2p/sensor/api.go index efba4b8c8..ab195c42a 100644 --- a/cmd/p2p/sensor/api.go +++ b/cmd/p2p/sensor/api.go @@ -1,6 +1,7 @@ package sensor import ( + "context" "encoding/json" "fmt" "net/http" @@ -57,7 +58,8 @@ type apiData struct { // handleAPI sets up the API for interacting with the sensor. All endpoints // return information about the sensor node and all connected peers, including // the types and counts of eth packets sent and received by each peer. -func handleAPI(server *ethp2p.Server, conns *p2p.Conns) { +// The server gracefully shuts down when the context is cancelled. +func handleAPI(ctx context.Context, server *ethp2p.Server, conns *p2p.Conns) { mux := http.NewServeMux() mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { @@ -117,7 +119,16 @@ func handleAPI(server *ethp2p.Server, conns *p2p.Conns) { }) addr := fmt.Sprintf(":%d", inputSensorParams.APIPort) - if err := http.ListenAndServe(addr, mux); err != nil { + httpServer := &http.Server{Addr: addr, Handler: mux} + + go func() { + <-ctx.Done() + if err := httpServer.Shutdown(context.Background()); err != nil { + log.Error().Err(err).Msg("Failed to shutdown API server") + } + }() + + if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { log.Error().Err(err).Msg("Failed to start API handler") } } diff --git a/cmd/p2p/sensor/rpc.go b/cmd/p2p/sensor/rpc.go index f7dde01ee..c05586ac1 100644 --- a/cmd/p2p/sensor/rpc.go +++ b/cmd/p2p/sensor/rpc.go @@ -1,6 +1,7 @@ package sensor import ( + "context" "encoding/json" "fmt" "io" @@ -41,7 +42,8 @@ type rpcError struct { // It handles eth_sendRawTransaction requests, validates transaction signatures, // and broadcasts valid transactions to all connected peers. // Supports both single requests and batch requests per JSON-RPC 2.0 specification. -func handleRPC(conns *p2p.Conns, networkID uint64) { +// The server gracefully shuts down when the context is cancelled. +func handleRPC(ctx context.Context, conns *p2p.Conns, networkID uint64) { // Use network ID as chain ID for signature validation chainID := new(big.Int).SetUint64(networkID) @@ -85,8 +87,17 @@ func handleRPC(conns *p2p.Conns, networkID uint64) { }) addr := fmt.Sprintf(":%d", inputSensorParams.RPCPort) + server := &http.Server{Addr: addr, Handler: mux} + + go func() { + <-ctx.Done() + if err := server.Shutdown(context.Background()); err != nil { + log.Error().Err(err).Msg("Failed to shutdown RPC server") + } + }() + log.Info().Str("addr", addr).Msg("Starting JSON-RPC server") - if err := http.ListenAndServe(addr, mux); err != nil { + if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { log.Error().Err(err).Msg("Failed to start RPC server") } } diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index 204e38e02..b1a911a81 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -274,21 +274,25 @@ var SensorCmd = &cobra.Command{ signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) + // Create a cancellable context for graceful shutdown of background goroutines. + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + if inputSensorParams.ShouldRunPprof { - go handlePprof() + go handlePprof(ctx) } if inputSensorParams.ShouldRunPrometheus { - go handlePrometheus() + go handlePrometheus(ctx) } - go handleAPI(&server, conns) + go handleAPI(ctx, &server, conns) // Start the RPC server for receiving transactions - go handleRPC(conns, inputSensorParams.NetworkID) + go handleRPC(ctx, conns, inputSensorParams.NetworkID) // Run DNS discovery immediately at startup. - go handleDNSDiscovery(&server, dnsLock) + go handleDNSDiscovery(ctx, &server, dnsLock) for { select { @@ -307,7 +311,7 @@ var SensorCmd = &cobra.Command{ log.Error().Err(err).Msg("Failed to write nodes to file") } case <-ticker1h.C: - go handleDNSDiscovery(&server, dnsLock) + go handleDNSDiscovery(ctx, &server, dnsLock) case <-signals: // This gracefully stops the sensor so that the peers can be written to // the nodes file. @@ -325,10 +329,20 @@ var SensorCmd = &cobra.Command{ // handlePprof starts a server for performance profiling using pprof on the // specified port. This allows for real-time monitoring and analysis of the // sensor's performance. The port number is configured through -// inputSensorParams.PprofPort. An error is logged if the server fails to start. -func handlePprof() { +// inputSensorParams.PprofPort. The server gracefully shuts down when the +// context is cancelled. +func handlePprof(ctx context.Context) { addr := fmt.Sprintf(":%d", inputSensorParams.PprofPort) - if err := http.ListenAndServe(addr, nil); err != nil { + server := &http.Server{Addr: addr} + + go func() { + <-ctx.Done() + if err := server.Shutdown(context.Background()); err != nil { + log.Error().Err(err).Msg("Failed to shutdown pprof server") + } + }() + + if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { log.Error().Err(err).Msg("Failed to start pprof") } } @@ -336,12 +350,23 @@ func handlePprof() { // handlePrometheus starts a server to expose Prometheus metrics at the /metrics // endpoint. This enables Prometheus to scrape and collect metrics data for // monitoring purposes. The port number is configured through -// inputSensorParams.PrometheusPort. An error is logged if the server fails to -// start. -func handlePrometheus() { - http.Handle("/metrics", promhttp.Handler()) +// inputSensorParams.PrometheusPort. The server gracefully shuts down when the +// context is cancelled. +func handlePrometheus(ctx context.Context) { + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.Handler()) + addr := fmt.Sprintf(":%d", inputSensorParams.PrometheusPort) - if err := http.ListenAndServe(addr, nil); err != nil { + server := &http.Server{Addr: addr, Handler: mux} + + go func() { + <-ctx.Done() + if err := server.Shutdown(context.Background()); err != nil { + log.Error().Err(err).Msg("Failed to shutdown Prometheus server") + } + }() + + if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { log.Error().Err(err).Msg("Failed to start Prometheus handler") } } @@ -349,7 +374,8 @@ func handlePrometheus() { // handleDNSDiscovery performs DNS-based peer discovery and adds new peers to // the p2p server. It uses an iterator to discover peers incrementally rather // than loading all nodes at once. The lock channel prevents concurrent runs. -func handleDNSDiscovery(server *ethp2p.Server, lock chan struct{}) { +// Discovery stops when the context is cancelled. +func handleDNSDiscovery(ctx context.Context, server *ethp2p.Server, lock chan struct{}) { if len(inputSensorParams.DiscoveryDNS) == 0 { return } @@ -357,6 +383,8 @@ func handleDNSDiscovery(server *ethp2p.Server, lock chan struct{}) { select { case lock <- struct{}{}: defer func() { <-lock }() + case <-ctx.Done(): + return default: log.Warn().Msg("DNS discovery already running, skipping") return @@ -377,6 +405,16 @@ func handleDNSDiscovery(server *ethp2p.Server, lock chan struct{}) { // Add DNS-discovered peers using the iterator. count := 0 for iter.Next() { + // Check for context cancellation to stop discovery promptly. + select { + case <-ctx.Done(): + log.Info(). + Int("discovered_peers", count). + Msg("DNS discovery interrupted") + return + default: + } + node := iter.Node() log.Debug(). Str("enode", node.URLv4()). From 5409e0b9b137387811e58e1284a7ad15a86ca891 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Thu, 19 Feb 2026 18:23:42 -0500 Subject: [PATCH 08/48] fix: handle disconnects for peer conns --- p2p/protocol.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/p2p/protocol.go b/p2p/protocol.go index d869ae2a3..0fcc547da 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -138,10 +138,27 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { ctx := opts.Context + // Disconnect peer when context is cancelled to unblock ReadMsg. + go func() { + <-ctx.Done() + p.Disconnect(ethp2p.DiscQuitting) + }() + // Handle all the of the messages here. for { + // Check for context cancellation before processing next message. + select { + case <-ctx.Done(): + return nil + default: + } + msg, err := rw.ReadMsg() if err != nil { + // Return nil on context cancellation to avoid error logging. + if ctx.Err() != nil { + return nil + } return err } From c474e29e1d7a76b4a405bea6f4a28cf65d5d6780 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Thu, 19 Feb 2026 19:16:49 -0500 Subject: [PATCH 09/48] fix: call cancel() --- cmd/p2p/sensor/sensor.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index b1a911a81..3bdb815bb 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -316,6 +316,7 @@ var SensorCmd = &cobra.Command{ // This gracefully stops the sensor so that the peers can be written to // the nodes file. log.Info().Msg("Stopping sensor...") + cancel() return nil case event := <-events: log.Debug().Any("event", event).Send() From 64932193643c8a119b7b2aa227cb52934770dc97 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 20 Feb 2026 14:16:09 -0500 Subject: [PATCH 10/48] fix: revert some graceful shutdown handling --- cmd/p2p/sensor/api.go | 15 +--- cmd/p2p/sensor/sensor.go | 158 ++++++++++++++++----------------------- p2p/protocol.go | 23 ------ 3 files changed, 65 insertions(+), 131 deletions(-) diff --git a/cmd/p2p/sensor/api.go b/cmd/p2p/sensor/api.go index ab195c42a..efba4b8c8 100644 --- a/cmd/p2p/sensor/api.go +++ b/cmd/p2p/sensor/api.go @@ -1,7 +1,6 @@ package sensor import ( - "context" "encoding/json" "fmt" "net/http" @@ -58,8 +57,7 @@ type apiData struct { // handleAPI sets up the API for interacting with the sensor. All endpoints // return information about the sensor node and all connected peers, including // the types and counts of eth packets sent and received by each peer. -// The server gracefully shuts down when the context is cancelled. -func handleAPI(ctx context.Context, server *ethp2p.Server, conns *p2p.Conns) { +func handleAPI(server *ethp2p.Server, conns *p2p.Conns) { mux := http.NewServeMux() mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { @@ -119,16 +117,7 @@ func handleAPI(ctx context.Context, server *ethp2p.Server, conns *p2p.Conns) { }) addr := fmt.Sprintf(":%d", inputSensorParams.APIPort) - httpServer := &http.Server{Addr: addr, Handler: mux} - - go func() { - <-ctx.Done() - if err := httpServer.Shutdown(context.Background()); err != nil { - log.Error().Err(err).Msg("Failed to shutdown API server") - } - }() - - if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { + if err := http.ListenAndServe(addr, mux); err != nil { log.Error().Err(err).Msg("Failed to start API handler") } } diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index 3bdb815bb..0032a079d 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -6,7 +6,6 @@ import ( _ "embed" "errors" "fmt" - "os" "os/signal" "syscall" "time" @@ -173,7 +172,10 @@ var SensorCmd = &cobra.Command{ return nil }, RunE: func(cmd *cobra.Command, args []string) error { - db, err := newDatabase(cmd.Context()) + ctx, stop := signal.NotifyContext(cmd.Context(), syscall.SIGINT, syscall.SIGTERM) + defer stop() + + db, err := newDatabase(ctx) if err != nil { return err } @@ -214,7 +216,7 @@ var SensorCmd = &cobra.Command{ }) opts := p2p.EthProtocolOptions{ - Context: cmd.Context(), + Context: ctx, Database: db, GenesisHash: common.HexToHash(inputSensorParams.GenesisHash), RPC: inputSensorParams.RPC, @@ -259,64 +261,36 @@ var SensorCmd = &cobra.Command{ if err = server.Start(); err != nil { return err } - defer server.Stop() + defer stopServer(&server) events := make(chan *ethp2p.PeerEvent) sub := server.SubscribeEvents(events) defer sub.Unsubscribe() - ticker := time.NewTicker(2 * time.Second) // Ticker for recurring tasks every 2 seconds. - ticker1h := time.NewTicker(time.Hour) // Ticker for running DNS discovery every hour. + ticker := time.NewTicker(2 * time.Second) defer ticker.Stop() - defer ticker1h.Stop() - - dnsLock := make(chan struct{}, 1) - signals := make(chan os.Signal, 1) - signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) - - // Create a cancellable context for graceful shutdown of background goroutines. - ctx, cancel := context.WithCancel(cmd.Context()) - defer cancel() if inputSensorParams.ShouldRunPprof { - go handlePprof(ctx) + go handlePprof() } if inputSensorParams.ShouldRunPrometheus { - go handlePrometheus(ctx) + go handlePrometheus() } - go handleAPI(ctx, &server, conns) - - // Start the RPC server for receiving transactions - go handleRPC(ctx, conns, inputSensorParams.NetworkID) - - // Run DNS discovery immediately at startup. - go handleDNSDiscovery(ctx, &server, dnsLock) + go handleAPI(&server, conns) + go handleRPC(conns, inputSensorParams.NetworkID) + go handleDNSDiscovery(&server) for { select { case <-ticker.C: peersGauge.Set(float64(server.PeerCount())) - db.WritePeers(cmd.Context(), server.Peers(), time.Now()) - + db.WritePeers(ctx, server.Peers(), time.Now()) metrics.Update(conns.HeadBlock().Block, conns.OldestBlock()) - - urls := []string{} - for _, peer := range server.Peers() { - urls = append(urls, peer.Node().URLv4()) - } - - if err := p2p.WritePeers(inputSensorParams.NodesFile, urls); err != nil { - log.Error().Err(err).Msg("Failed to write nodes to file") - } - case <-ticker1h.C: - go handleDNSDiscovery(ctx, &server, dnsLock) - case <-signals: - // This gracefully stops the sensor so that the peers can be written to - // the nodes file. + writePeers(server.Peers()) + case <-ctx.Done(): log.Info().Msg("Stopping sensor...") - cancel() return nil case event := <-events: log.Debug().Any("event", event).Send() @@ -327,23 +301,41 @@ var SensorCmd = &cobra.Command{ }, } -// handlePprof starts a server for performance profiling using pprof on the -// specified port. This allows for real-time monitoring and analysis of the -// sensor's performance. The port number is configured through -// inputSensorParams.PprofPort. The server gracefully shuts down when the -// context is cancelled. -func handlePprof(ctx context.Context) { - addr := fmt.Sprintf(":%d", inputSensorParams.PprofPort) - server := &http.Server{Addr: addr} +// writePeers writes the enode URLs of connected peers to the nodes file. +func writePeers(peers []*ethp2p.Peer) { + urls := make([]string, 0, len(peers)) + for _, peer := range peers { + urls = append(urls, peer.Node().URLv4()) + } + + if err := p2p.WritePeers(inputSensorParams.NodesFile, urls); err != nil { + log.Error().Err(err).Msg("Failed to write nodes to file") + } +} + +// stopServer stops the p2p server with a timeout to avoid hanging on shutdown. +// This is necessary because go-ethereum's discovery shutdown can deadlock. +func stopServer(server *ethp2p.Server) { + done := make(chan struct{}) go func() { - <-ctx.Done() - if err := server.Shutdown(context.Background()); err != nil { - log.Error().Err(err).Msg("Failed to shutdown pprof server") - } + server.Stop() + close(done) }() - if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + select { + case <-done: + case <-time.After(5 * time.Second): + } +} + +// handlePprof starts a server for performance profiling using pprof on the +// specified port. This allows for real-time monitoring and analysis of the +// sensor's performance. The port number is configured through +// inputSensorParams.PprofPort. An error is logged if the server fails to start. +func handlePprof() { + addr := fmt.Sprintf(":%d", inputSensorParams.PprofPort) + if err := http.ListenAndServe(addr, nil); err != nil { log.Error().Err(err).Msg("Failed to start pprof") } } @@ -351,46 +343,36 @@ func handlePprof(ctx context.Context) { // handlePrometheus starts a server to expose Prometheus metrics at the /metrics // endpoint. This enables Prometheus to scrape and collect metrics data for // monitoring purposes. The port number is configured through -// inputSensorParams.PrometheusPort. The server gracefully shuts down when the -// context is cancelled. -func handlePrometheus(ctx context.Context) { - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.Handler()) - +// inputSensorParams.PrometheusPort. An error is logged if the server fails to +// start. +func handlePrometheus() { + http.Handle("/metrics", promhttp.Handler()) addr := fmt.Sprintf(":%d", inputSensorParams.PrometheusPort) - server := &http.Server{Addr: addr, Handler: mux} - - go func() { - <-ctx.Done() - if err := server.Shutdown(context.Background()); err != nil { - log.Error().Err(err).Msg("Failed to shutdown Prometheus server") - } - }() - - if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + if err := http.ListenAndServe(addr, nil); err != nil { log.Error().Err(err).Msg("Failed to start Prometheus handler") } } // handleDNSDiscovery performs DNS-based peer discovery and adds new peers to // the p2p server. It uses an iterator to discover peers incrementally rather -// than loading all nodes at once. The lock channel prevents concurrent runs. -// Discovery stops when the context is cancelled. -func handleDNSDiscovery(ctx context.Context, server *ethp2p.Server, lock chan struct{}) { +// than loading all nodes at once. Runs immediately and then hourly. +func handleDNSDiscovery(server *ethp2p.Server) { if len(inputSensorParams.DiscoveryDNS) == 0 { return } - select { - case lock <- struct{}{}: - defer func() { <-lock }() - case <-ctx.Done(): - return - default: - log.Warn().Msg("DNS discovery already running, skipping") - return + discoverPeers(server) + + ticker := time.NewTicker(time.Hour) + defer ticker.Stop() + + for range ticker.C { + discoverPeers(server) } +} +// discoverPeers performs a single DNS discovery round. +func discoverPeers(server *ethp2p.Server) { log.Info(). Str("discovery-dns", inputSensorParams.DiscoveryDNS). Msg("Starting DNS discovery") @@ -403,27 +385,13 @@ func handleDNSDiscovery(ctx context.Context, server *ethp2p.Server, lock chan st } defer iter.Close() - // Add DNS-discovered peers using the iterator. count := 0 for iter.Next() { - // Check for context cancellation to stop discovery promptly. - select { - case <-ctx.Done(): - log.Info(). - Int("discovered_peers", count). - Msg("DNS discovery interrupted") - return - default: - } - node := iter.Node() log.Debug(). Str("enode", node.URLv4()). Msg("Discovered peer through DNS") - // Add the peer to the static node set. The server itself handles whether to - // connect to the peer if it's already connected. If a node is part of the - // static peer set, the server will handle reconnecting after disconnects. server.AddPeer(node) count++ } diff --git a/p2p/protocol.go b/p2p/protocol.go index 0fcc547da..1336bcc55 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -138,27 +138,10 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { ctx := opts.Context - // Disconnect peer when context is cancelled to unblock ReadMsg. - go func() { - <-ctx.Done() - p.Disconnect(ethp2p.DiscQuitting) - }() - // Handle all the of the messages here. for { - // Check for context cancellation before processing next message. - select { - case <-ctx.Done(): - return nil - default: - } - msg, err := rw.ReadMsg() if err != nil { - // Return nil on context cancellation to avoid error logging. - if ctx.Err() != nil { - return nil - } return err } @@ -262,12 +245,6 @@ func (c *conn) readStatus(packet *eth.StatusPacket68) error { return err } - defer func() { - if msgErr := msg.Discard(); msgErr != nil { - c.logger.Error().Err(msgErr).Msg("Failed to discard message") - } - }() - if msg.Code != eth.StatusMsg { return errors.New("expected status message code") } From 1ef011582859e965e5bf0a86f2cf15f7d9d1e80f Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 20 Feb 2026 14:18:35 -0500 Subject: [PATCH 11/48] fix: revert rpc.go --- cmd/p2p/sensor/rpc.go | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/cmd/p2p/sensor/rpc.go b/cmd/p2p/sensor/rpc.go index c05586ac1..f6b76dcbb 100644 --- a/cmd/p2p/sensor/rpc.go +++ b/cmd/p2p/sensor/rpc.go @@ -1,7 +1,6 @@ package sensor import ( - "context" "encoding/json" "fmt" "io" @@ -43,7 +42,7 @@ type rpcError struct { // and broadcasts valid transactions to all connected peers. // Supports both single requests and batch requests per JSON-RPC 2.0 specification. // The server gracefully shuts down when the context is cancelled. -func handleRPC(ctx context.Context, conns *p2p.Conns, networkID uint64) { +func handleRPC(conns *p2p.Conns, networkID uint64) { // Use network ID as chain ID for signature validation chainID := new(big.Int).SetUint64(networkID) @@ -89,15 +88,8 @@ func handleRPC(ctx context.Context, conns *p2p.Conns, networkID uint64) { addr := fmt.Sprintf(":%d", inputSensorParams.RPCPort) server := &http.Server{Addr: addr, Handler: mux} - go func() { - <-ctx.Done() - if err := server.Shutdown(context.Background()); err != nil { - log.Error().Err(err).Msg("Failed to shutdown RPC server") - } - }() - log.Info().Str("addr", addr).Msg("Starting JSON-RPC server") - if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + if err := server.ListenAndServe(); err != nil { log.Error().Err(err).Msg("Failed to start RPC server") } } From 7a25ede883f1408b0b96db49b4b28646f6741292 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 20 Feb 2026 14:19:22 -0500 Subject: [PATCH 12/48] fix: revert again --- cmd/p2p/sensor/rpc.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/p2p/sensor/rpc.go b/cmd/p2p/sensor/rpc.go index f6b76dcbb..2b51a6de6 100644 --- a/cmd/p2p/sensor/rpc.go +++ b/cmd/p2p/sensor/rpc.go @@ -41,7 +41,6 @@ type rpcError struct { // It handles eth_sendRawTransaction requests, validates transaction signatures, // and broadcasts valid transactions to all connected peers. // Supports both single requests and batch requests per JSON-RPC 2.0 specification. -// The server gracefully shuts down when the context is cancelled. func handleRPC(conns *p2p.Conns, networkID uint64) { // Use network ID as chain ID for signature validation chainID := new(big.Int).SetUint64(networkID) @@ -86,10 +85,9 @@ func handleRPC(conns *p2p.Conns, networkID uint64) { }) addr := fmt.Sprintf(":%d", inputSensorParams.RPCPort) - server := &http.Server{Addr: addr, Handler: mux} log.Info().Str("addr", addr).Msg("Starting JSON-RPC server") - if err := server.ListenAndServe(); err != nil { + if err := http.ListenAndServe(addr, mux); err != nil { log.Error().Err(err).Msg("Failed to start RPC server") } } From b33386ca3f782986dddffdbe093dc8f7cfbc7b05 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 20 Feb 2026 14:19:38 -0500 Subject: [PATCH 13/48] fix: one more revert --- cmd/p2p/sensor/rpc.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/p2p/sensor/rpc.go b/cmd/p2p/sensor/rpc.go index 2b51a6de6..f7dde01ee 100644 --- a/cmd/p2p/sensor/rpc.go +++ b/cmd/p2p/sensor/rpc.go @@ -85,7 +85,6 @@ func handleRPC(conns *p2p.Conns, networkID uint64) { }) addr := fmt.Sprintf(":%d", inputSensorParams.RPCPort) - log.Info().Str("addr", addr).Msg("Starting JSON-RPC server") if err := http.ListenAndServe(addr, mux); err != nil { log.Error().Err(err).Msg("Failed to start RPC server") From 5f6fd24de0be7f0886ce5037e38427f060c29c3e Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 20 Feb 2026 14:25:37 -0500 Subject: [PATCH 14/48] fix: lint --- p2p/conns.go | 38 ++------------------------------------ p2p/protocol.go | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 36 deletions(-) diff --git a/p2p/conns.go b/p2p/conns.go index 4b85b2fff..7c291ae03 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -299,44 +299,10 @@ func (c *Conns) BroadcastBlockHashes(hashes []common.Hash, numbers []uint64) int } count := 0 - for _, cn := range c.conns { - // Filter hashes this peer doesn't know about - unknownHashes := make([]common.Hash, 0, len(hashes)) - unknownNumbers := make([]uint64, 0, len(numbers)) - - for i, hash := range hashes { - if !cn.hasKnownBlock(hash) { - unknownHashes = append(unknownHashes, hash) - unknownNumbers = append(unknownNumbers, numbers[i]) - } - } - - if len(unknownHashes) == 0 { - continue - } - - // Send NewBlockHashesPacket - packet := make(eth.NewBlockHashesPacket, len(unknownHashes)) - for i := range unknownHashes { - packet[i].Hash = unknownHashes[i] - packet[i].Number = unknownNumbers[i] + if cn.sendBlockHashes(hashes, numbers) { + count++ } - - cn.countMsgSent(packet.Name(), float64(len(unknownHashes))) - if err := ethp2p.Send(cn.rw, eth.NewBlockHashesMsg, packet); err != nil { - cn.logger.Debug(). - Err(err). - Msg("Failed to send block hashes") - continue - } - - // Mark hashes as known for this peer - for _, hash := range unknownHashes { - cn.addKnownBlock(hash) - } - - count++ } if count > 0 { diff --git a/p2p/protocol.go b/p2p/protocol.go index 1336bcc55..f74510aaf 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -440,6 +440,45 @@ func (c *conn) hasKnownBlock(hash common.Hash) bool { return c.knownBlocks.Contains(hash) } +// broadcastBlockHashes sends block hashes to a peer, filtering out hashes the peer +// already knows about. Returns true if the send was successful. +func (c *conn) broadcastBlockHashes(hashes []common.Hash, numbers []uint64) bool { + // Filter hashes this peer doesn't know about + unknownHashes := make([]common.Hash, 0, len(hashes)) + unknownNumbers := make([]uint64, 0, len(numbers)) + + for i, hash := range hashes { + if !c.hasKnownBlock(hash) { + unknownHashes = append(unknownHashes, hash) + unknownNumbers = append(unknownNumbers, numbers[i]) + } + } + + if len(unknownHashes) == 0 { + return false + } + + // Send NewBlockHashesPacket + packet := make(eth.NewBlockHashesPacket, len(unknownHashes)) + for i := range unknownHashes { + packet[i].Hash = unknownHashes[i] + packet[i].Number = unknownNumbers[i] + } + + c.countMsgSent(packet.Name(), float64(len(unknownHashes))) + if err := ethp2p.Send(c.rw, eth.NewBlockHashesMsg, packet); err != nil { + c.logger.Debug().Err(err).Msg("Failed to send block hashes") + return false + } + + // Mark hashes as known for this peer + for _, hash := range unknownHashes { + c.addKnownBlock(hash) + } + + return true +} + func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { payload, err := io.ReadAll(msg.Payload) if err != nil { From 0aec08029ad8a46cf4eb649709614d3607b8ea1c Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 20 Feb 2026 14:25:57 -0500 Subject: [PATCH 15/48] fix: rename --- p2p/protocol.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/p2p/protocol.go b/p2p/protocol.go index f74510aaf..7262ae0fb 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -440,9 +440,9 @@ func (c *conn) hasKnownBlock(hash common.Hash) bool { return c.knownBlocks.Contains(hash) } -// broadcastBlockHashes sends block hashes to a peer, filtering out hashes the peer +// sendBlockHashes sends block hashes to a peer, filtering out hashes the peer // already knows about. Returns true if the send was successful. -func (c *conn) broadcastBlockHashes(hashes []common.Hash, numbers []uint64) bool { +func (c *conn) sendBlockHashes(hashes []common.Hash, numbers []uint64) bool { // Filter hashes this peer doesn't know about unknownHashes := make([]common.Hash, 0, len(hashes)) unknownNumbers := make([]uint64, 0, len(numbers)) From 7fa8aaed8866105ceba154f55e80e74cc68b1fd8 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 24 Feb 2026 20:41:30 -0500 Subject: [PATCH 16/48] fix: async tx announcements and goroutine cleanup --- p2p/conns.go | 101 +++++++++--------------- p2p/protocol.go | 201 +++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 200 insertions(+), 102 deletions(-) diff --git a/p2p/conns.go b/p2p/conns.go index 7c291ae03..1a2624214 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -167,63 +167,33 @@ func (c *Conns) BroadcastTxs(txs types.Transactions) int { return count } -// BroadcastTxHashes broadcasts transaction hashes to peers that don't already -// know about them and returns the number of peers the hashes were successfully -// sent to. If broadcast flags are disabled, this is a no-op. +// BroadcastTxHashes enqueues transaction hashes to per-peer broadcast queues. +// Each peer has a dedicated goroutine that drains the queue and batches sends. +// Returns the number of peers the hashes were enqueued to. +// If broadcast flags are disabled, this is a no-op. func (c *Conns) BroadcastTxHashes(hashes []common.Hash) int { - if !c.shouldBroadcastTxHashes { + if !c.shouldBroadcastTxHashes || len(hashes) == 0 { return 0 } + // Copy peers to avoid holding lock during sends c.mu.RLock() - defer c.mu.RUnlock() - - if len(hashes) == 0 { - return 0 - } - - count := 0 + peers := make([]*conn, 0, len(c.conns)) for _, cn := range c.conns { - // Filter hashes this peer doesn't know about - unknownHashes := make([]common.Hash, 0, len(hashes)) - for _, hash := range hashes { - if !cn.hasKnownTx(hash) { - unknownHashes = append(unknownHashes, hash) - } - } - - if len(unknownHashes) == 0 { - continue + if cn.txAnnounce != nil { + peers = append(peers, cn) } - - // Send NewPooledTransactionHashesPacket - packet := eth.NewPooledTransactionHashesPacket{ - Types: make([]byte, len(unknownHashes)), - Sizes: make([]uint32, len(unknownHashes)), - Hashes: unknownHashes, - } - - cn.countMsgSent(packet.Name(), float64(len(unknownHashes))) - if err := ethp2p.Send(cn.rw, eth.NewPooledTransactionHashesMsg, packet); err != nil { - cn.logger.Debug(). - Err(err). - Msg("Failed to send transaction hashes") - continue - } - - // Mark hashes as known for this peer - for _, hash := range unknownHashes { - cn.addKnownTx(hash) - } - - count++ } + c.mu.RUnlock() - if count > 0 { - log.Debug(). - Int("peers", count). - Int("hashes", len(hashes)). - Msg("Broadcasted transaction hashes") + count := 0 + for _, cn := range peers { + select { + case cn.txAnnounce <- hashes: + count++ + case <-cn.closeCh: + // Peer closing, skip + } } return count @@ -283,35 +253,38 @@ func (c *Conns) BroadcastBlock(block *types.Block, td *big.Int) int { return count } -// BroadcastBlockHashes broadcasts block hashes with their corresponding block -// numbers to peers that don't already know about them and returns the number -// of peers the hashes were successfully sent to. If broadcast flags are disabled, this is a no-op. +// BroadcastBlockHashes enqueues block hashes to per-peer broadcast queues. +// Each peer has a dedicated goroutine that drains the queue and sends. +// Returns the number of peers the hashes were enqueued to. +// If broadcast flags are disabled, this is a no-op. func (c *Conns) BroadcastBlockHashes(hashes []common.Hash, numbers []uint64) int { - if !c.shouldBroadcastBlockHashes { + if !c.shouldBroadcastBlockHashes || len(hashes) == 0 || len(hashes) != len(numbers) { return 0 } + // Build packet once, share across all peers + packet := make(eth.NewBlockHashesPacket, len(hashes)) + for i := range hashes { + packet[i].Hash = hashes[i] + packet[i].Number = numbers[i] + } + c.mu.RLock() defer c.mu.RUnlock() - if len(hashes) == 0 || len(hashes) != len(numbers) { - return 0 - } - count := 0 for _, cn := range c.conns { - if cn.sendBlockHashes(hashes, numbers) { + if cn.blockAnnounce == nil { + continue + } + // Non-blocking send, drop if queue full (matches Bor) + select { + case cn.blockAnnounce <- packet: count++ + default: } } - if count > 0 { - log.Debug(). - Int("peers", count). - Int("hashes", len(hashes)). - Msg("Broadcasted block hashes") - } - return count } diff --git a/p2p/protocol.go b/p2p/protocol.go index 7262ae0fb..037ba508e 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -22,6 +22,20 @@ import ( "github.com/0xPolygon/polygon-cli/p2p/database" ) +const ( + // maxTxPacketSize is the target size for transaction announcement packets. + // Matches Bor's limit of 100KB. + maxTxPacketSize = 100 * 1024 + + // maxQueuedTxAnns is the maximum number of transaction announcements to + // queue before dropping oldest. Matches Bor. + maxQueuedTxAnns = 4096 + + // maxQueuedBlockAnns is the maximum number of block announcements to queue + // before dropping. Matches Bor. + maxQueuedBlockAnns = 4 +) + // conn represents an individual connection with a peer. type conn struct { sensorID string @@ -63,6 +77,13 @@ type conn struct { // messages tracks per-peer message counts for API visibility. messages *PeerMessages + + // Broadcast queues for per-peer rate limiting. These decouple message + // reception from broadcasting to prevent flooding peers with immediate + // broadcasts. + txAnnounce chan []common.Hash + blockAnnounce chan eth.NewBlockHashesPacket + closeCh chan struct{} } // EthProtocolOptions is the options used when creating a new eth protocol. @@ -116,6 +137,23 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { knownTxs: NewCache[common.Hash, struct{}](opts.Conns.KnownTxsOpts()), knownBlocks: NewCache[common.Hash, struct{}](opts.Conns.KnownBlocksOpts()), messages: NewPeerMessages(), + txAnnounce: make(chan []common.Hash), + blockAnnounce: make(chan eth.NewBlockHashesPacket, maxQueuedBlockAnns), + closeCh: make(chan struct{}), + } + + // Ensure cleanup happens on any exit path (including statusExchange failure) + defer func() { + close(c.closeCh) + opts.Conns.Remove(c) + }() + + // Start broadcast loops for per-peer queued broadcasting + if opts.ShouldBroadcastTxHashes { + go c.txAnnouncementLoop() + } + if opts.ShouldBroadcastBlockHashes { + go c.blockAnnouncementLoop() } head := c.conns.HeadBlock() @@ -134,7 +172,6 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { // Send the connection object to the conns manager for RPC broadcasting opts.Conns.Add(c) - defer opts.Conns.Remove(c) ctx := opts.Context @@ -398,8 +435,8 @@ func (c *conn) handleNewBlockHashes(ctx context.Context, msg ethp2p.Msg) error { c.db.WriteBlockHashes(ctx, c.node, uniqueHashes, tfs) - // Broadcast block hashes to other peers - c.conns.BroadcastBlockHashes(uniqueHashes, uniqueNumbers) + // Broadcast block hashes to other peers asynchronously + go c.conns.BroadcastBlockHashes(uniqueHashes, uniqueNumbers) return nil } @@ -440,43 +477,131 @@ func (c *conn) hasKnownBlock(hash common.Hash) bool { return c.knownBlocks.Contains(hash) } -// sendBlockHashes sends block hashes to a peer, filtering out hashes the peer -// already knows about. Returns true if the send was successful. -func (c *conn) sendBlockHashes(hashes []common.Hash, numbers []uint64) bool { - // Filter hashes this peer doesn't know about - unknownHashes := make([]common.Hash, 0, len(hashes)) - unknownNumbers := make([]uint64, 0, len(numbers)) - - for i, hash := range hashes { - if !c.hasKnownBlock(hash) { - unknownHashes = append(unknownHashes, hash) - unknownNumbers = append(unknownNumbers, numbers[i]) +// txAnnouncementLoop schedules transaction hash announcements to the peer. +// Matches Bor's announceTransactions pattern: async sends with internal queue. +func (c *conn) txAnnouncementLoop() { + var ( + queue []common.Hash // Queue of hashes to announce + done chan struct{} // Non-nil if background announcer is running + fail = make(chan error, 1) // Channel used to receive network error + failed bool // Flag whether a send failed + ) + + for { + // If there's no in-flight announce running, check if a new one is needed + if done == nil && len(queue) > 0 { + // Pile transaction hashes until we reach our allowed network limit + var ( + count int + pending []common.Hash + size int + ) + for count = 0; count < len(queue) && size < maxTxPacketSize; count++ { + hash := queue[count] + if !c.hasKnownTx(hash) { + pending = append(pending, hash) + size += common.HashLength + } + } + // Shift and trim queue + queue = queue[:copy(queue, queue[count:])] + + // If there's anything available to transfer, fire up an async writer + if len(pending) > 0 { + done = make(chan struct{}) + go func() { + if err := c.sendTxAnnouncements(pending); err != nil { + fail <- err + return + } + close(done) + }() + } + } + + // Transfer goroutine may or may not have been started, listen for events + select { + case hashes := <-c.txAnnounce: + // If the connection failed, discard all transaction events + if failed { + continue + } + // New batch of transactions to be broadcast, queue them (with cap) + queue = append(queue, hashes...) + if len(queue) > maxQueuedTxAnns { + // Drop oldest to keep newest + queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxAnns:])] + } + + case <-done: + done = nil + + case <-fail: + failed = true + + case <-c.closeCh: + return } } +} - if len(unknownHashes) == 0 { - return false +// sendTxAnnouncements sends a batch of transaction hashes to the peer. +func (c *conn) sendTxAnnouncements(hashes []common.Hash) error { + packet := eth.NewPooledTransactionHashesPacket{ + Types: make([]byte, len(hashes)), + Sizes: make([]uint32, len(hashes)), + Hashes: hashes, + } + c.countMsgSent(packet.Name(), float64(len(hashes))) + if err := ethp2p.Send(c.rw, eth.NewPooledTransactionHashesMsg, packet); err != nil { + c.logger.Debug().Err(err).Msg("Failed to send tx announcements") + return err + } + for _, hash := range hashes { + c.addKnownTx(hash) } + return nil +} - // Send NewBlockHashesPacket - packet := make(eth.NewBlockHashesPacket, len(unknownHashes)) - for i := range unknownHashes { - packet[i].Hash = unknownHashes[i] - packet[i].Number = unknownNumbers[i] +// blockAnnouncementLoop drains the blockAnnounce queue and sends block +// announcements. Matches Bor's broadcastBlocks pattern. +func (c *conn) blockAnnouncementLoop() { + for { + select { + case packet := <-c.blockAnnounce: + if err := c.sendBlockAnnouncements(packet); err != nil { + return + } + case <-c.closeCh: + return + } } +} - c.countMsgSent(packet.Name(), float64(len(unknownHashes))) - if err := ethp2p.Send(c.rw, eth.NewBlockHashesMsg, packet); err != nil { - c.logger.Debug().Err(err).Msg("Failed to send block hashes") - return false +// sendBlockAnnouncements sends a batch of block hashes to the peer, +// filtering out blocks the peer already knows about. +func (c *conn) sendBlockAnnouncements(packet eth.NewBlockHashesPacket) error { + // Filter to only unknown blocks + var filtered eth.NewBlockHashesPacket + for _, entry := range packet { + if !c.hasKnownBlock(entry.Hash) { + filtered = append(filtered, entry) + } } - // Mark hashes as known for this peer - for _, hash := range unknownHashes { - c.addKnownBlock(hash) + if len(filtered) == 0 { + return nil } - return true + c.countMsgSent(filtered.Name(), float64(len(filtered))) + if err := ethp2p.Send(c.rw, eth.NewBlockHashesMsg, filtered); err != nil { + c.logger.Debug().Err(err).Msg("Failed to send block announcements") + return err + } + for _, entry := range filtered { + c.addKnownBlock(entry.Hash) + } + return nil } func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { @@ -512,9 +637,9 @@ func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { hashes[i] = tx.Hash() } - // Broadcast transactions or hashes to other peers - c.conns.BroadcastTxs(types.Transactions(txs)) - c.conns.BroadcastTxHashes(hashes) + // Broadcast transactions or hashes to other peers asynchronously + go c.conns.BroadcastTxs(types.Transactions(txs)) + go c.conns.BroadcastTxHashes(hashes) return nil } @@ -710,9 +835,9 @@ func (c *conn) handleNewBlock(ctx context.Context, msg ethp2p.Msg) error { TD: packet.TD, }) - // Broadcast block or block hash to other peers - c.conns.BroadcastBlock(packet.Block, packet.TD) - c.conns.BroadcastBlockHashes( + // Broadcast block or block hash to other peers asynchronously + go c.conns.BroadcastBlock(packet.Block, packet.TD) + go c.conns.BroadcastBlockHashes( []common.Hash{hash}, []uint64{packet.Block.Number().Uint64()}, ) @@ -807,9 +932,9 @@ func (c *conn) handlePooledTransactions(ctx context.Context, msg ethp2p.Msg) err hashes[i] = tx.Hash() } - // Broadcast transactions or hashes to other peers - c.conns.BroadcastTxs(types.Transactions(packet.PooledTransactionsResponse)) - c.conns.BroadcastTxHashes(hashes) + // Broadcast transactions or hashes to other peers asynchronously + go c.conns.BroadcastTxs(types.Transactions(packet.PooledTransactionsResponse)) + go c.conns.BroadcastTxHashes(hashes) return nil } From 5d4ce45ad49c2a4da199dd85aea161e9db1f7922 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Wed, 25 Feb 2026 08:38:28 -0500 Subject: [PATCH 17/48] fix: remove comment --- p2p/cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/cache.go b/p2p/cache.go index b73309743..2c0bf1a62 100644 --- a/p2p/cache.go +++ b/p2p/cache.go @@ -24,7 +24,7 @@ type Cache[K comparable, V any] struct { type entry[K comparable, V any] struct { key K value V - expiresAt *time.Time // nil when TTL=0, saves 16 bytes per entry + expiresAt *time.Time } // NewCache creates a new cache with the given options. From 865141a9c40a0812d0e471c9a852c2902ffd0b7a Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 20 Feb 2026 12:09:45 -0500 Subject: [PATCH 18/48] feat(sensor): support more rpc methods --- cmd/p2p/sensor/rpc.go | 508 +++++++++++++++++++++++++++++++++++----- cmd/p2p/sensor/usage.md | 28 +++ p2p/conns.go | 84 +++++++ 3 files changed, 556 insertions(+), 64 deletions(-) diff --git a/cmd/p2p/sensor/rpc.go b/cmd/p2p/sensor/rpc.go index f7dde01ee..1812dd2f5 100644 --- a/cmd/p2p/sensor/rpc.go +++ b/cmd/p2p/sensor/rpc.go @@ -9,8 +9,10 @@ import ( "strings" "github.com/0xPolygon/polygon-cli/p2p" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/rs/zerolog/log" ) @@ -74,14 +76,22 @@ func handleRPC(conns *p2p.Conns, networkID uint64) { return } - // Handle eth_sendRawTransaction - if req.Method == "eth_sendRawTransaction" { - handleSendRawTransaction(w, req, conns, chainID) - return + // Process request (reuse same logic as batch) + var txs types.Transactions + resp := processRequest(req, conns, chainID, &txs) + + // Broadcast any transactions + if len(txs) > 0 { + log.Info().Str("hash", txs[0].Hash().Hex()).Msg("Broadcasting transaction") + count := conns.BroadcastTxs(txs) + log.Info().Str("hash", txs[0].Hash().Hex()).Int("peers", count).Msg("Transaction broadcast complete") } - // Method not found - writeError(w, -32601, "Method not found", req.ID) + // Write response + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(resp); err != nil { + log.Error().Err(err).Msg("Failed to encode response") + } }) addr := fmt.Sprintf(":%d", inputSensorParams.RPCPort) @@ -107,22 +117,9 @@ func writeError(w http.ResponseWriter, code int, message string, id any) { } } -// writeResult writes a JSON-RPC 2.0 success response with the specified result and request ID. -func writeResult(w http.ResponseWriter, result any, id any) { - w.Header().Set("Content-Type", "application/json") - response := rpcResponse{ - JSONRPC: "2.0", - Result: result, - ID: id, - } - if err := json.NewEncoder(w).Encode(response); err != nil { - log.Error().Err(err).Msg("Failed to encode result response") - } -} - -// handleBatchRequest processes JSON-RPC 2.0 batch requests, validates all transactions, -// and broadcasts valid transactions to connected peers. Returns a batch response with -// results or errors for each request in the batch. +// handleBatchRequest processes JSON-RPC 2.0 batch requests. +// For eth_sendRawTransaction requests, it collects valid transactions for batch broadcasting. +// Returns a batch response with results or errors for each request. func handleBatchRequest(w http.ResponseWriter, body []byte, conns *p2p.Conns, chainID *big.Int) { // Parse batch of requests var requests []rpcRequest @@ -137,35 +134,13 @@ func handleBatchRequest(w http.ResponseWriter, body []byte, conns *p2p.Conns, ch return } - // Process all requests and collect valid transactions for batch broadcasting + // Process all requests responses := make([]rpcResponse, 0, len(requests)) txs := make(types.Transactions, 0) for _, req := range requests { - if req.Method != "eth_sendRawTransaction" { - responses = append(responses, rpcResponse{ - JSONRPC: "2.0", - Error: &rpcError{ - Code: -32601, - Message: "Method not found", - }, - ID: req.ID, - }) - continue - } - - tx, response := validateTx(req, chainID) - if tx == nil { - responses = append(responses, response) - continue - } - - txs = append(txs, tx) - responses = append(responses, rpcResponse{ - JSONRPC: "2.0", - Result: tx.Hash().Hex(), - ID: req.ID, - }) + resp := processRequest(req, conns, chainID, &txs) + responses = append(responses, resp) } // Broadcast all valid transactions in a single batch if there are any @@ -190,6 +165,78 @@ func handleBatchRequest(w http.ResponseWriter, body []byte, conns *p2p.Conns, ch } } +// newResultResponse creates a success response. +func newResultResponse(result any, id any) rpcResponse { + return rpcResponse{JSONRPC: "2.0", Result: result, ID: id} +} + +// newErrorResponse creates an error response. +func newErrorResponse(err *rpcError, id any) rpcResponse { + return rpcResponse{JSONRPC: "2.0", Error: err, ID: id} +} + +// processRequest handles a single RPC request and returns a response. +// For eth_sendRawTransaction, valid transactions are appended to txs for batch broadcasting. +func processRequest(req rpcRequest, conns *p2p.Conns, chainID *big.Int, txs *types.Transactions) rpcResponse { + switch req.Method { + case "eth_sendRawTransaction": + tx, resp := validateTx(req, chainID) + if tx == nil { + return resp + } + *txs = append(*txs, tx) + return newResultResponse(tx.Hash().Hex(), req.ID) + + case "eth_chainId": + return newResultResponse(hexutil.EncodeBig(chainID), req.ID) + + case "eth_blockNumber": + head := conns.HeadBlock() + if head.Block == nil { + return newResultResponse(nil, req.ID) + } + return newResultResponse(hexutil.EncodeUint64(head.Block.NumberU64()), req.ID) + + case "eth_gasPrice": + return newResultResponse(hexutil.EncodeBig(conns.SuggestGasPrice()), req.ID) + + case "eth_getBlockByHash": + result, err := getBlockByHash(req, conns) + return handleMethodResult(result, err, req.ID) + + case "eth_getBlockByNumber": + result, err := getBlockByNumber(req, conns) + return handleMethodResult(result, err, req.ID) + + case "eth_getTransactionByHash": + result, err := getTransactionByHash(req, conns) + return handleMethodResult(result, err, req.ID) + + case "eth_getTransactionByBlockHashAndIndex": + result, err := getTransactionByBlockHashAndIndex(req, conns) + return handleMethodResult(result, err, req.ID) + + case "eth_getBlockTransactionCountByHash": + result, err := getBlockTransactionCountByHash(req, conns) + return handleMethodResult(result, err, req.ID) + + case "eth_getUncleCountByBlockHash": + result, err := getUncleCountByBlockHash(req, conns) + return handleMethodResult(result, err, req.ID) + + default: + return newErrorResponse(&rpcError{Code: -32601, Message: "Method not found"}, req.ID) + } +} + +// handleMethodResult converts a method's result and error into an rpcResponse. +func handleMethodResult(result any, err *rpcError, id any) rpcResponse { + if err != nil { + return newErrorResponse(err, id) + } + return newResultResponse(result, id) +} + // validateTx validates a transaction from a JSON-RPC request by decoding the raw // transaction hex, unmarshaling it, and verifying the signature. Returns the transaction if valid // (with an empty response), or nil transaction with an error response if validation fails. @@ -276,26 +323,359 @@ func validateTx(req rpcRequest, chainID *big.Int) (*types.Transaction, rpcRespon return tx, rpcResponse{} } -// handleSendRawTransaction processes eth_sendRawTransaction requests, validates the -// transaction, broadcasts it to all connected peers, and writes the transaction hash -// as a JSON-RPC response. -func handleSendRawTransaction(w http.ResponseWriter, req rpcRequest, conns *p2p.Conns, chainID *big.Int) { - tx, response := validateTx(req, chainID) - if tx == nil { - writeError(w, response.Error.Code, response.Error.Message, response.ID) - return +// parseFullTxParam extracts the fullTx boolean from params[1], defaulting to false. +func parseFullTxParam(params []any) bool { + if len(params) >= 2 { + if fullTx, ok := params[1].(bool); ok { + return fullTx + } } + return false +} - log.Info(). - Str("hash", tx.Hash().Hex()). - Msg("Broadcasting transaction") +// getBlockByHash retrieves a block by its hash from the cache. +func getBlockByHash(req rpcRequest, conns *p2p.Conns) (any, *rpcError) { + if len(req.Params) < 1 { + return nil, &rpcError{Code: -32602, Message: "missing block hash parameter"} + } - count := conns.BroadcastTx(tx) + hashStr, ok := req.Params[0].(string) + if !ok { + return nil, &rpcError{Code: -32602, Message: "invalid block hash parameter"} + } - log.Info(). - Str("hash", tx.Hash().Hex()). - Int("peers", count). - Msg("Transaction broadcast complete") + hash := common.HexToHash(hashStr) + cache, ok := conns.Blocks().Get(hash) + if !ok { + return nil, nil // Return null for not found (per spec) + } + + return formatBlockResponse(hash, cache, parseFullTxParam(req.Params)), nil +} + +// getBlockByNumber retrieves a block by its number from the cache. +func getBlockByNumber(req rpcRequest, conns *p2p.Conns) (any, *rpcError) { + if len(req.Params) < 1 { + return nil, &rpcError{Code: -32602, Message: "missing block number parameter"} + } + + blockNumParam, ok := req.Params[0].(string) + if !ok { + return nil, &rpcError{Code: -32602, Message: "invalid block number parameter"} + } + + var hash common.Hash + var cache p2p.BlockCache + var found bool + + switch blockNumParam { + case "latest", "pending": + head := conns.HeadBlock() + if head.Block == nil { + return nil, nil + } + hash = head.Block.Hash() + cache, found = conns.Blocks().Get(hash) + if !found { + // Construct cache from head block + cache = p2p.BlockCache{ + Header: head.Block.Header(), + Body: ð.BlockBody{ + Transactions: head.Block.Transactions(), + Uncles: head.Block.Uncles(), + }, + TD: head.TD, + } + found = true + } + case "earliest": + hash, cache, found = conns.GetBlockByNumber(0) + default: + num, err := hexutil.DecodeUint64(blockNumParam) + if err != nil { + return nil, &rpcError{Code: -32602, Message: "invalid block number: " + err.Error()} + } + hash, cache, found = conns.GetBlockByNumber(num) + } + + if !found { + return nil, nil + } + + return formatBlockResponse(hash, cache, parseFullTxParam(req.Params)), nil +} + +// getTransactionByHash retrieves a transaction by its hash from the cache. +func getTransactionByHash(req rpcRequest, conns *p2p.Conns) (any, *rpcError) { + if len(req.Params) < 1 { + return nil, &rpcError{Code: -32602, Message: "missing transaction hash parameter"} + } + + hashStr, ok := req.Params[0].(string) + if !ok { + return nil, &rpcError{Code: -32602, Message: "invalid transaction hash parameter"} + } + + hash := common.HexToHash(hashStr) + + // First check the transactions cache + tx, ok := conns.GetTx(hash) + if ok { + return formatTransactionResponse(tx, common.Hash{}, nil, 0), nil + } + + // Search in blocks for the transaction + for _, blockHash := range conns.Blocks().Keys() { + cache, ok := conns.Blocks().Peek(blockHash) + if !ok || cache.Body == nil { + continue + } + for i, tx := range cache.Body.Transactions { + if tx.Hash() == hash { + return formatTransactionResponse(tx, blockHash, cache.Header, uint64(i)), nil + } + } + } + + return nil, nil +} + +// getTransactionByBlockHashAndIndex retrieves a transaction by block hash and index. +func getTransactionByBlockHashAndIndex(req rpcRequest, conns *p2p.Conns) (any, *rpcError) { + if len(req.Params) < 2 { + return nil, &rpcError{Code: -32602, Message: "missing block hash or index parameter"} + } + + hashStr, ok := req.Params[0].(string) + if !ok { + return nil, &rpcError{Code: -32602, Message: "invalid block hash parameter"} + } + + indexStr, ok := req.Params[1].(string) + if !ok { + return nil, &rpcError{Code: -32602, Message: "invalid index parameter"} + } + + index, err := hexutil.DecodeUint64(indexStr) + if err != nil { + return nil, &rpcError{Code: -32602, Message: "invalid index: " + err.Error()} + } + + blockHash := common.HexToHash(hashStr) + cache, ok := conns.Blocks().Get(blockHash) + if !ok || cache.Body == nil { + return nil, nil + } + + if int(index) >= len(cache.Body.Transactions) { + return nil, nil + } + + tx := cache.Body.Transactions[index] + return formatTransactionResponse(tx, blockHash, cache.Header, index), nil +} + +// getBlockCacheByHashParam parses a block hash from params[0] and returns the block cache. +// Returns the cache and nil error on success, or nil cache and error on parse failure. +// If the block is not found, returns nil cache with nil error (per JSON-RPC spec). +func getBlockCacheByHashParam(req rpcRequest, conns *p2p.Conns) (p2p.BlockCache, *rpcError) { + if len(req.Params) < 1 { + return p2p.BlockCache{}, &rpcError{Code: -32602, Message: "missing block hash parameter"} + } + + hashStr, ok := req.Params[0].(string) + if !ok { + return p2p.BlockCache{}, &rpcError{Code: -32602, Message: "invalid block hash parameter"} + } + + hash := common.HexToHash(hashStr) + cache, ok := conns.Blocks().Get(hash) + if !ok || cache.Body == nil { + return p2p.BlockCache{}, nil + } + + return cache, nil +} + +// getBlockTransactionCountByHash returns the transaction count in a block. +func getBlockTransactionCountByHash(req rpcRequest, conns *p2p.Conns) (any, *rpcError) { + cache, err := getBlockCacheByHashParam(req, conns) + if err != nil || cache.Body == nil { + return nil, err + } + return hexutil.EncodeUint64(uint64(len(cache.Body.Transactions))), nil +} + +// getUncleCountByBlockHash returns the uncle count in a block. +func getUncleCountByBlockHash(req rpcRequest, conns *p2p.Conns) (any, *rpcError) { + cache, err := getBlockCacheByHashParam(req, conns) + if err != nil || cache.Body == nil { + return nil, err + } + return hexutil.EncodeUint64(uint64(len(cache.Body.Uncles))), nil +} + +// formatBlockResponse formats a block cache into the Ethereum JSON-RPC block format. +func formatBlockResponse(hash common.Hash, cache p2p.BlockCache, fullTx bool) map[string]any { + header := cache.Header + if header == nil { + return nil + } + + result := map[string]any{ + "hash": hash.Hex(), + "number": hexutil.EncodeUint64(header.Number.Uint64()), + "parentHash": header.ParentHash.Hex(), + "nonce": hexutil.EncodeUint64(header.Nonce.Uint64()), + "sha3Uncles": header.UncleHash.Hex(), + "logsBloom": hexutil.Encode(header.Bloom.Bytes()), + "transactionsRoot": header.TxHash.Hex(), + "stateRoot": header.Root.Hex(), + "receiptsRoot": header.ReceiptHash.Hex(), + "miner": header.Coinbase.Hex(), + "difficulty": hexutil.EncodeBig(header.Difficulty), + "extraData": hexutil.Encode(header.Extra), + "gasLimit": hexutil.EncodeUint64(header.GasLimit), + "gasUsed": hexutil.EncodeUint64(header.GasUsed), + "timestamp": hexutil.EncodeUint64(header.Time), + "mixHash": header.MixDigest.Hex(), + } + + if header.BaseFee != nil { + result["baseFeePerGas"] = hexutil.EncodeBig(header.BaseFee) + } + + if header.WithdrawalsHash != nil { + result["withdrawalsRoot"] = header.WithdrawalsHash.Hex() + } + + if header.BlobGasUsed != nil { + result["blobGasUsed"] = hexutil.EncodeUint64(*header.BlobGasUsed) + } + + if header.ExcessBlobGas != nil { + result["excessBlobGas"] = hexutil.EncodeUint64(*header.ExcessBlobGas) + } + + if header.ParentBeaconRoot != nil { + result["parentBeaconBlockRoot"] = header.ParentBeaconRoot.Hex() + } + + // Add total difficulty if available + if cache.TD != nil { + result["totalDifficulty"] = hexutil.EncodeBig(cache.TD) + } + + // Add transactions + if cache.Body != nil && cache.Body.Transactions != nil { + if fullTx { + txs := make([]map[string]any, len(cache.Body.Transactions)) + for i, tx := range cache.Body.Transactions { + txs[i] = formatTransactionResponse(tx, hash, header, uint64(i)) + } + result["transactions"] = txs + } else { + txHashes := make([]string, len(cache.Body.Transactions)) + for i, tx := range cache.Body.Transactions { + txHashes[i] = tx.Hash().Hex() + } + result["transactions"] = txHashes + } + } else { + result["transactions"] = []string{} + } + + // Add uncles + if cache.Body != nil && cache.Body.Uncles != nil { + uncleHashes := make([]string, len(cache.Body.Uncles)) + for i, uncle := range cache.Body.Uncles { + uncleHashes[i] = uncle.Hash().Hex() + } + result["uncles"] = uncleHashes + } else { + result["uncles"] = []string{} + } + + // Add size (approximate based on header + body) + result["size"] = hexutil.EncodeUint64(0) // We don't have exact size; use 0 + + return result +} + +// formatTransactionResponse formats a transaction into the Ethereum JSON-RPC format. +// If blockHash is empty, the transaction is considered pending. +func formatTransactionResponse(tx *types.Transaction, blockHash common.Hash, header *types.Header, index uint64) map[string]any { + v, r, s := tx.RawSignatureValues() + + result := map[string]any{ + "hash": tx.Hash().Hex(), + "nonce": hexutil.EncodeUint64(tx.Nonce()), + "gas": hexutil.EncodeUint64(tx.Gas()), + "value": hexutil.EncodeBig(tx.Value()), + "input": hexutil.Encode(tx.Data()), + "v": hexutil.EncodeBig(v), + "r": hexutil.EncodeBig(r), + "s": hexutil.EncodeBig(s), + "type": hexutil.EncodeUint64(uint64(tx.Type())), + } + + if tx.To() != nil { + result["to"] = tx.To().Hex() + } else { + result["to"] = nil + } + + // Add from address if we can derive it + signer := types.LatestSignerForChainID(tx.ChainId()) + if from, err := types.Sender(signer, tx); err == nil { + result["from"] = from.Hex() + } + + // Set gas price fields based on transaction type + switch tx.Type() { + case types.LegacyTxType, types.AccessListTxType: + result["gasPrice"] = hexutil.EncodeBig(tx.GasPrice()) + case types.DynamicFeeTxType, types.BlobTxType: + result["maxFeePerGas"] = hexutil.EncodeBig(tx.GasFeeCap()) + result["maxPriorityFeePerGas"] = hexutil.EncodeBig(tx.GasTipCap()) + // For EIP-1559 txs, also set gasPrice to effective gas price if in a block + if header != nil && header.BaseFee != nil { + effectiveGasPrice := new(big.Int).Add(header.BaseFee, tx.GasTipCap()) + if effectiveGasPrice.Cmp(tx.GasFeeCap()) > 0 { + effectiveGasPrice = tx.GasFeeCap() + } + result["gasPrice"] = hexutil.EncodeBig(effectiveGasPrice) + } else { + result["gasPrice"] = hexutil.EncodeBig(tx.GasFeeCap()) + } + } + + // Add chain ID if present + if tx.ChainId() != nil { + result["chainId"] = hexutil.EncodeBig(tx.ChainId()) + } + + // Add access list if present + if tx.AccessList() != nil { + result["accessList"] = tx.AccessList() + } + + // Add blob-specific fields + if tx.Type() == types.BlobTxType { + result["maxFeePerBlobGas"] = hexutil.EncodeBig(tx.BlobGasFeeCap()) + result["blobVersionedHashes"] = tx.BlobHashes() + } + + // Add block info if transaction is in a block + if blockHash != (common.Hash{}) && header != nil { + result["blockHash"] = blockHash.Hex() + result["blockNumber"] = hexutil.EncodeUint64(header.Number.Uint64()) + result["transactionIndex"] = hexutil.EncodeUint64(index) + } else { + result["blockHash"] = nil + result["blockNumber"] = nil + result["transactionIndex"] = nil + } - writeResult(w, tx.Hash().Hex(), req.ID) + return result } diff --git a/cmd/p2p/sensor/usage.md b/cmd/p2p/sensor/usage.md index c45a77c54..371095525 100644 --- a/cmd/p2p/sensor/usage.md +++ b/cmd/p2p/sensor/usage.md @@ -9,6 +9,34 @@ created automatically. The bootnodes may change, so refer to the [Polygon Knowledge Layer][bootnodes] if the sensor is not discovering peers. +## JSON-RPC Server + +The sensor runs a JSON-RPC server on port 8545 (configurable via `--rpc-port`) +that supports a subset of Ethereum JSON-RPC methods using cached data. + +### Supported Methods + +| Method | Description | +|--------|-------------| +| `eth_chainId` | Returns the chain ID | +| `eth_blockNumber` | Returns the current head block number | +| `eth_gasPrice` | Returns suggested gas price based on recent blocks | +| `eth_getBlockByHash` | Returns block by hash | +| `eth_getBlockByNumber` | Returns block by number (if cached) | +| `eth_getTransactionByHash` | Returns transaction by hash | +| `eth_getTransactionByBlockHashAndIndex` | Returns transaction at index in block | +| `eth_getBlockTransactionCountByHash` | Returns transaction count in block | +| `eth_getUncleCountByBlockHash` | Returns uncle count in block | +| `eth_sendRawTransaction` | Broadcasts signed transaction to peers | + +### Limitations + +Methods requiring state or receipts are not supported: +- `eth_getBalance`, `eth_getCode`, `eth_call`, `eth_estimateGas` +- `eth_getTransactionReceipt`, `eth_getLogs` + +Data is served from an LRU cache, so older blocks/transactions may not be available. + ## Metrics The sensor exposes Prometheus metrics at `http://localhost:2112/metrics` diff --git a/p2p/conns.go b/p2p/conns.go index 1a2624214..28c75cf1f 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -2,6 +2,7 @@ package p2p import ( "math/big" + "sort" "sync" "time" @@ -408,3 +409,86 @@ func (c *Conns) GetPeerName(peerID string) string { return "" } + +// GetBlockByNumber iterates through the cache to find a block by its number. +// Returns the hash, block cache, and true if found; empty values and false otherwise. +func (c *Conns) GetBlockByNumber(number uint64) (common.Hash, BlockCache, bool) { + for _, hash := range c.blocks.Keys() { + if cache, ok := c.blocks.Peek(hash); ok && cache.Header != nil { + if cache.Header.Number.Uint64() == number { + return hash, cache, true + } + } + } + return common.Hash{}, BlockCache{}, false +} + +// SuggestGasPrice estimates the gas price based on recent blocks in the cache. +// Follows a geth-style gas price oracle approach using the 60th percentile. +func (c *Conns) SuggestGasPrice() *big.Int { + defaultGasPrice := big.NewInt(1e9) // 1 gwei + + keys := c.blocks.Keys() + if len(keys) == 0 { + return defaultGasPrice + } + + // Collect effective gas prices from transactions in cached blocks + var prices []*big.Int + for _, hash := range keys { + cache, ok := c.blocks.Peek(hash) + if !ok || cache.Body == nil || cache.Header == nil { + continue + } + + baseFee := cache.Header.BaseFee + if baseFee == nil { + baseFee = big.NewInt(0) + } + + for _, tx := range cache.Body.Transactions { + price := calculateEffectiveGasPrice(tx, baseFee) + if price != nil && price.Sign() > 0 { + prices = append(prices, price) + } + } + } + + if len(prices) == 0 { + // Fallback to head block base fee + 1 gwei tip + head := c.HeadBlock() + if head.Block != nil && head.Block.BaseFee() != nil { + return new(big.Int).Add(head.Block.BaseFee(), big.NewInt(1e9)) + } + return defaultGasPrice + } + + // Sort and return 60th percentile (geth default) + sort.Slice(prices, func(i, j int) bool { + return prices[i].Cmp(prices[j]) < 0 + }) + return prices[len(prices)*60/100] +} + +// calculateEffectiveGasPrice returns the effective gas price for a transaction. +// For EIP-1559 transactions, this is min(maxFeePerGas, baseFee + maxPriorityFeePerGas). +// For legacy transactions, this is the gas price directly. +// Returns nil if the price cannot be determined. +func calculateEffectiveGasPrice(tx *types.Transaction, baseFee *big.Int) *big.Int { + if tx.Type() == types.DynamicFeeTxType { + tip := tx.GasTipCap() + if tip == nil { + return nil + } + effectiveGasPrice := new(big.Int).Add(baseFee, tip) + if tx.GasFeeCap() != nil && effectiveGasPrice.Cmp(tx.GasFeeCap()) > 0 { + return new(big.Int).Set(tx.GasFeeCap()) + } + return effectiveGasPrice + } + // Legacy transactions: use gas price directly + if price := tx.GasPrice(); price != nil { + return new(big.Int).Set(price) + } + return nil +} From 3b66e34f3185602ac11ac8873a227fa62cefc09c Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 27 Feb 2026 22:20:05 -0500 Subject: [PATCH 19/48] fix: lint --- p2p/protocol.go | 54 ++++++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/p2p/protocol.go b/p2p/protocol.go index f804998bb..1686ab6ae 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -593,21 +593,8 @@ func (c *conn) txAnnouncementLoop() { for { // If there's no in-flight announce running, check if a new one is needed if done == nil && len(queue) > 0 { - // Pile transaction hashes until we reach our allowed network limit - var ( - count int - pending []common.Hash - size int - ) - for count = 0; count < len(queue) && size < maxTxPacketSize; count++ { - hash := queue[count] - if !c.hasKnownTx(hash) { - pending = append(pending, hash) - size += common.HashLength - } - } - // Shift and trim queue - queue = queue[:copy(queue, queue[count:])] + var pending []common.Hash + pending, queue = c.prepareTxAnnouncements(queue) // If there's anything available to transfer, fire up an async writer if len(pending) > 0 { @@ -625,15 +612,8 @@ func (c *conn) txAnnouncementLoop() { // Transfer goroutine may or may not have been started, listen for events select { case hashes := <-c.txAnnounce: - // If the connection failed, discard all transaction events - if failed { - continue - } - // New batch of transactions to be broadcast, queue them (with cap) - queue = append(queue, hashes...) - if len(queue) > maxQueuedTxAnns { - // Drop oldest to keep newest - queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxAnns:])] + if !failed { + queue = c.enqueueTxHashes(queue, hashes) } case <-done: @@ -648,6 +628,30 @@ func (c *conn) txAnnouncementLoop() { } } +// prepareTxAnnouncements extracts a batch of unknown tx hashes from the queue +// up to maxTxPacketSize bytes. Returns the pending hashes and remaining queue. +func (c *conn) prepareTxAnnouncements(queue []common.Hash) (pending, remaining []common.Hash) { + var size int + var count int + for count = 0; count < len(queue) && size < maxTxPacketSize; count++ { + if hash := queue[count]; !c.hasKnownTx(hash) { + pending = append(pending, hash) + size += common.HashLength + } + } + remaining = queue[:copy(queue, queue[count:])] + return pending, remaining +} + +// enqueueTxHashes adds hashes to the queue, dropping oldest if over capacity. +func (c *conn) enqueueTxHashes(queue []common.Hash, hashes []common.Hash) []common.Hash { + queue = append(queue, hashes...) + if len(queue) > maxQueuedTxAnns { + queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxAnns:])] + } + return queue +} + // sendTxAnnouncements sends a batch of transaction hashes to the peer. func (c *conn) sendTxAnnouncements(hashes []common.Hash) error { packet := eth.NewPooledTransactionHashesPacket{ @@ -672,7 +676,7 @@ func (c *conn) blockAnnouncementLoop() { for { select { case packet := <-c.blockAnnounce: - if err := c.sendBlockAnnouncements(packet); err != nil { + if c.sendBlockAnnouncements(packet) != nil { return } case <-c.closeCh: From 97b6e01648010c545a3ba9a05a97a40bd2406201 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 27 Feb 2026 22:23:15 -0500 Subject: [PATCH 20/48] docs: make gen --- doc/polycli_p2p_sensor.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/doc/polycli_p2p_sensor.md b/doc/polycli_p2p_sensor.md index eed8195cc..c4fd7c356 100644 --- a/doc/polycli_p2p_sensor.md +++ b/doc/polycli_p2p_sensor.md @@ -30,6 +30,34 @@ created automatically. The bootnodes may change, so refer to the [Polygon Knowledge Layer][bootnodes] if the sensor is not discovering peers. +## JSON-RPC Server + +The sensor runs a JSON-RPC server on port 8545 (configurable via `--rpc-port`) +that supports a subset of Ethereum JSON-RPC methods using cached data. + +### Supported Methods + +| Method | Description | +|--------|-------------| +| `eth_chainId` | Returns the chain ID | +| `eth_blockNumber` | Returns the current head block number | +| `eth_gasPrice` | Returns suggested gas price based on recent blocks | +| `eth_getBlockByHash` | Returns block by hash | +| `eth_getBlockByNumber` | Returns block by number (if cached) | +| `eth_getTransactionByHash` | Returns transaction by hash | +| `eth_getTransactionByBlockHashAndIndex` | Returns transaction at index in block | +| `eth_getBlockTransactionCountByHash` | Returns transaction count in block | +| `eth_getUncleCountByBlockHash` | Returns uncle count in block | +| `eth_sendRawTransaction` | Broadcasts signed transaction to peers | + +### Limitations + +Methods requiring state or receipts are not supported: +- `eth_getBalance`, `eth_getCode`, `eth_call`, `eth_estimateGas` +- `eth_getTransactionReceipt`, `eth_getLogs` + +Data is served from an LRU cache, so older blocks/transactions may not be available. + ## Metrics The sensor exposes Prometheus metrics at `http://localhost:2112/metrics` From da98090852d88544ea947e1162a2e2f3bae4e37b Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Mon, 2 Mar 2026 10:56:36 -0500 Subject: [PATCH 21/48] fix: lint --- p2p/protocol.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/protocol.go b/p2p/protocol.go index 1686ab6ae..7839fe388 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -644,7 +644,7 @@ func (c *conn) prepareTxAnnouncements(queue []common.Hash) (pending, remaining [ } // enqueueTxHashes adds hashes to the queue, dropping oldest if over capacity. -func (c *conn) enqueueTxHashes(queue []common.Hash, hashes []common.Hash) []common.Hash { +func (c *conn) enqueueTxHashes(queue, hashes []common.Hash) []common.Hash { queue = append(queue, hashes...) if len(queue) > maxQueuedTxAnns { queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxAnns:])] From 15494bdd6de5914aae342598b18ebb388a649cf6 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Mon, 2 Mar 2026 11:08:20 -0500 Subject: [PATCH 22/48] fix: lint --- cmd/p2p/sensor/rpc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/p2p/sensor/rpc.go b/cmd/p2p/sensor/rpc.go index 1812dd2f5..bf18838bc 100644 --- a/cmd/p2p/sensor/rpc.go +++ b/cmd/p2p/sensor/rpc.go @@ -166,7 +166,7 @@ func handleBatchRequest(w http.ResponseWriter, body []byte, conns *p2p.Conns, ch } // newResultResponse creates a success response. -func newResultResponse(result any, id any) rpcResponse { +func newResultResponse(result, id any) rpcResponse { return rpcResponse{JSONRPC: "2.0", Result: result, ID: id} } From 743f953e8ef2fdbf4a35bead18b9defeee1e92ee Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Mon, 2 Mar 2026 18:29:20 -0500 Subject: [PATCH 23/48] fix: protocol lengths --- p2p/protocol.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/p2p/protocol.go b/p2p/protocol.go index 7839fe388..b675e7f91 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -36,6 +36,14 @@ const ( maxQueuedBlockAnns = 4 ) +// protocolLengths maps protocol versions to their message counts. +var protocolLengths = map[uint]uint64{ + 66: 17, + 67: 17, + 68: 17, + 69: 18, +} + // conn represents an individual connection with a peer. type conn struct { sensorID string @@ -114,7 +122,7 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { return ethp2p.Protocol{ Name: "eth", Version: version, - Length: 17, + Length: protocolLengths[version], Run: func(p *ethp2p.Peer, rw ethp2p.MsgReadWriter) error { peerURL := p.Node().URLv4() c := &conn{ From 9ccc61db610407c0b7cc24ecb621c6e84271cb16 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Mon, 2 Mar 2026 18:53:14 -0500 Subject: [PATCH 24/48] fix: use bor status packet --- p2p/protocol.go | 11 ++++++----- p2p/types.go | 17 +++++++++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/p2p/protocol.go b/p2p/protocol.go index b675e7f91..52418104a 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -231,9 +231,10 @@ func (c *conn) statusExchange(version uint, opts EthProtocolOptions) error { head := c.conns.HeadBlock() if version >= eth.ETH69 { - status := eth.StatusPacket69{ + status := BorStatusPacket69{ ProtocolVersion: uint32(version), NetworkID: opts.NetworkID, + TD: head.TD, Genesis: opts.GenesisHash, ForkID: opts.ForkID, EarliestBlock: head.Block.NumberU64(), @@ -287,11 +288,11 @@ func (c *conn) statusExchange68(packet *eth.StatusPacket68) error { } // statusExchange69 will exchange status message for ETH69. -func (c *conn) statusExchange69(packet *eth.StatusPacket69) error { +func (c *conn) statusExchange69(packet *BorStatusPacket69) error { errc := make(chan error, 2) go func() { - c.countMsgSent((ð.StatusPacket69{}).Name(), 1) + c.countMsgSent(packet.Name(), 1) errc <- ethp2p.Send(c.rw, eth.StatusMsg, packet) }() @@ -369,7 +370,7 @@ func (c *conn) readStatus68(packet *eth.StatusPacket68) error { return nil } -func (c *conn) readStatus69(packet *eth.StatusPacket69) error { +func (c *conn) readStatus69(packet *BorStatusPacket69) error { msg, err := c.rw.ReadMsg() if err != nil { return err @@ -379,7 +380,7 @@ func (c *conn) readStatus69(packet *eth.StatusPacket69) error { return errors.New("expected status message code") } - var status eth.StatusPacket69 + var status BorStatusPacket69 if err := msg.Decode(&status); err != nil { return err } diff --git a/p2p/types.go b/p2p/types.go index f89cec2e0..924fbce1a 100644 --- a/p2p/types.go +++ b/p2p/types.go @@ -8,6 +8,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -86,6 +87,22 @@ type Status eth.StatusPacket68 func (msg Status) Code() int { return 16 } func (msg Status) ReqID() uint64 { return 0 } +// BorStatusPacket69 is the Bor-compatible status packet for ETH69. +// Bor's implementation includes the TD field which upstream go-ethereum removed. +type BorStatusPacket69 struct { + ProtocolVersion uint32 + NetworkID uint64 + TD *big.Int + Genesis common.Hash + ForkID forkid.ID + EarliestBlock uint64 + LatestBlock uint64 + LatestBlockHash common.Hash +} + +func (*BorStatusPacket69) Name() string { return "Status" } +func (*BorStatusPacket69) Kind() byte { return eth.StatusMsg } + // NewBlockHashes is the network packet for the block announcements. type NewBlockHashes eth.NewBlockHashesPacket From 6584163b613bf0e5f0f4e30391ca86a98440c04d Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Mon, 2 Mar 2026 22:01:18 -0500 Subject: [PATCH 25/48] feat: enable pprof lock profiling --- cmd/p2p/sensor/sensor.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index c3c9703a7..7aadbf375 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "os/signal" + "runtime" "syscall" "time" @@ -335,6 +336,10 @@ func stopServer(server *ethp2p.Server) { // sensor's performance. The port number is configured through // inputSensorParams.PprofPort. An error is logged if the server fails to start. func handlePprof() { + // Enable mutex and block profiling to detect lock contention. + runtime.SetMutexProfileFraction(1) + runtime.SetBlockProfileRate(1) + addr := fmt.Sprintf(":%d", inputSensorParams.PprofPort) if err := http.ListenAndServe(addr, nil); err != nil { log.Error().Err(err).Msg("Failed to start pprof") From 832a4639a12d42f2a8b4e67fb34374db556e3a65 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Mon, 2 Mar 2026 22:34:21 -0500 Subject: [PATCH 26/48] fix: lock contention --- p2p/cache.go | 108 ++++++++++++++++++++++++++++++++++++++++++++++++ p2p/conns.go | 5 +++ p2p/protocol.go | 44 ++++++++++++-------- 3 files changed, 140 insertions(+), 17 deletions(-) diff --git a/p2p/cache.go b/p2p/cache.go index 2c0bf1a62..35608ef44 100644 --- a/p2p/cache.go +++ b/p2p/cache.go @@ -100,6 +100,41 @@ func (c *Cache[K, V]) Get(key K) (V, bool) { return e.value, true } +// GetMany retrieves multiple values from the cache and updates LRU ordering. +// Uses a single write lock for all lookups, reducing lock contention compared +// to calling Get in a loop. Returns a slice of values for keys that were found. +func (c *Cache[K, V]) GetMany(keys []K) []V { + if len(keys) == 0 { + return nil + } + + c.mu.Lock() + defer c.mu.Unlock() + + now := time.Now() + result := make([]V, 0, len(keys)) + + for _, key := range keys { + elem, ok := c.items[key] + if !ok { + continue + } + + e := elem.Value.(*entry[K, V]) + + if e.expiresAt != nil && now.After(*e.expiresAt) { + c.list.Remove(elem) + delete(c.items, key) + continue + } + + c.list.MoveToFront(elem) + result = append(result, e.value) + } + + return result +} + // Peek retrieves a value from the cache without updating LRU ordering. // Uses a read lock for better concurrency. func (c *Cache[K, V]) Peek(key K) (V, bool) { @@ -238,3 +273,76 @@ func (c *Cache[K, V]) Keys() []K { } return keys } + +// FilterNotContained returns the subset of keys that are not in the cache. +// Uses a single read lock for all lookups, reducing lock contention compared +// to calling Contains in a loop. +func (c *Cache[K, V]) FilterNotContained(keys []K) []K { + c.mu.RLock() + defer c.mu.RUnlock() + + now := time.Now() + result := make([]K, 0, len(keys)) + + for _, key := range keys { + elem, ok := c.items[key] + if !ok { + result = append(result, key) + continue + } + + e := elem.Value.(*entry[K, V]) + if e.expiresAt != nil && now.After(*e.expiresAt) { + result = append(result, key) + } + } + + return result +} + +// AddMany adds multiple keys with the same value to the cache. +// Uses a single write lock for all additions, reducing lock contention +// compared to calling Add in a loop. +func (c *Cache[K, V]) AddMany(keys []K, value V) { + if len(keys) == 0 { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + + var expiresAt *time.Time + if c.ttl > 0 { + t := time.Now().Add(c.ttl) + expiresAt = &t + } + + for _, key := range keys { + if elem, ok := c.items[key]; ok { + c.list.MoveToFront(elem) + e := elem.Value.(*entry[K, V]) + e.value = value + e.expiresAt = expiresAt + continue + } + + e := &entry[K, V]{ + key: key, + value: value, + expiresAt: expiresAt, + } + elem := c.list.PushFront(e) + c.items[key] = elem + } + + // Enforce size limit after all additions + for c.maxSize > 0 && c.list.Len() > c.maxSize { + back := c.list.Back() + if back == nil { + break + } + c.list.Remove(back) + e := back.Value.(*entry[K, V]) + delete(c.items, e.key) + } +} diff --git a/p2p/conns.go b/p2p/conns.go index 1a2624214..0713c6fa4 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -324,6 +324,11 @@ func (c *Conns) GetTx(hash common.Hash) (*types.Transaction, bool) { return c.txs.Get(hash) } +// GetTxs retrieves multiple transactions from the shared cache in a single lock operation. +func (c *Conns) GetTxs(hashes []common.Hash) []*types.Transaction { + return c.txs.GetMany(hashes) +} + // Blocks returns the global blocks cache. func (c *Conns) Blocks() *Cache[common.Hash, BlockCache] { return c.blocks diff --git a/p2p/protocol.go b/p2p/protocol.go index 52418104a..358305cf0 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -640,15 +640,28 @@ func (c *conn) txAnnouncementLoop() { // prepareTxAnnouncements extracts a batch of unknown tx hashes from the queue // up to maxTxPacketSize bytes. Returns the pending hashes and remaining queue. func (c *conn) prepareTxAnnouncements(queue []common.Hash) (pending, remaining []common.Hash) { - var size int - var count int - for count = 0; count < len(queue) && size < maxTxPacketSize; count++ { - if hash := queue[count]; !c.hasKnownTx(hash) { - pending = append(pending, hash) - size += common.HashLength - } + if !c.shouldBroadcastTx && !c.shouldBroadcastTxHashes { + return nil, nil + } + + // Calculate max hashes we can send based on packet size limit + maxHashes := maxTxPacketSize / common.HashLength + if maxHashes > len(queue) { + maxHashes = len(queue) + } + + // Filter out known hashes in a single lock operation + batch := queue[:maxHashes] + pending = c.knownTxs.FilterNotContained(batch) + + // If we got fewer pending than the batch size, we processed some known hashes. + // Limit pending to maxTxPacketSize worth of hashes. + maxPending := maxTxPacketSize / common.HashLength + if len(pending) > maxPending { + pending = pending[:maxPending] } - remaining = queue[:copy(queue, queue[count:])] + + remaining = queue[:copy(queue, queue[maxHashes:])] return pending, remaining } @@ -673,8 +686,10 @@ func (c *conn) sendTxAnnouncements(hashes []common.Hash) error { c.logger.Debug().Err(err).Msg("Failed to send tx announcements") return err } - for _, hash := range hashes { - c.addKnownTx(hash) + + // Mark all hashes as known in a single lock operation + if c.shouldBroadcastTx || c.shouldBroadcastTxHashes { + c.knownTxs.AddMany(hashes, struct{}{}) } return nil } @@ -969,13 +984,8 @@ func (c *conn) handleGetPooledTransactions(msg ethp2p.Msg) error { c.countMsgReceived(request.Name(), float64(len(request.GetPooledTransactionsRequest))) - // Try to serve from cache - var txs []*types.Transaction - for _, hash := range request.GetPooledTransactionsRequest { - if tx, ok := c.conns.GetTx(hash); ok { - txs = append(txs, tx) - } - } + // Try to serve from cache using batch lookup (single lock operation) + txs := c.conns.GetTxs(request.GetPooledTransactionsRequest) response := ð.PooledTransactionsPacket{ RequestId: request.RequestId, From 2eed8ea79cc2155ab071db60b1f6983a3a0619ca Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Mon, 2 Mar 2026 22:40:37 -0500 Subject: [PATCH 27/48] fix: use min --- p2p/protocol.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/p2p/protocol.go b/p2p/protocol.go index 358305cf0..52d2884bf 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -645,10 +645,7 @@ func (c *conn) prepareTxAnnouncements(queue []common.Hash) (pending, remaining [ } // Calculate max hashes we can send based on packet size limit - maxHashes := maxTxPacketSize / common.HashLength - if maxHashes > len(queue) { - maxHashes = len(queue) - } + maxHashes := min(maxTxPacketSize/common.HashLength, len(queue)) // Filter out known hashes in a single lock operation batch := queue[:maxHashes] From dfcabbddc33533728bc0af9f79658178a94af8d4 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Mon, 2 Mar 2026 23:15:46 -0500 Subject: [PATCH 28/48] fix: increase tx cache size --- cmd/p2p/sensor/sensor.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index 7aadbf375..18100b5b4 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -517,9 +517,9 @@ will result in less chance of missing data but can significantly increase memory f.DurationVar(&inputSensorParams.ParentsCache.TTL, "parents-cache-ttl", 5*time.Minute, "time to live for parent hash cache entries (0 for no expiration)") f.IntVar(&inputSensorParams.BlocksCache.MaxSize, "max-blocks", 1024, "maximum blocks to track across all peers (0 for no limit)") f.DurationVar(&inputSensorParams.BlocksCache.TTL, "blocks-cache-ttl", 10*time.Minute, "time to live for block cache entries (0 for no expiration)") - f.IntVar(&inputSensorParams.TxsCache.MaxSize, "max-txs", 8192, "maximum transactions to cache for serving to peers (0 for no limit)") + f.IntVar(&inputSensorParams.TxsCache.MaxSize, "max-txs", 32768, "maximum transactions to cache for serving to peers (0 for no limit)") f.DurationVar(&inputSensorParams.TxsCache.TTL, "txs-cache-ttl", 10*time.Minute, "time to live for transaction cache entries (0 for no expiration)") - f.IntVar(&inputSensorParams.KnownTxsCache.MaxSize, "max-known-txs", 8192, "maximum transaction hashes to track per peer (0 for no limit)") + f.IntVar(&inputSensorParams.KnownTxsCache.MaxSize, "max-known-txs", 32768, "maximum transaction hashes to track per peer (0 for no limit)") f.DurationVar(&inputSensorParams.KnownTxsCache.TTL, "known-txs-cache-ttl", 5*time.Minute, "time to live for known transaction cache entries (0 for no expiration)") f.IntVar(&inputSensorParams.KnownBlocksCache.MaxSize, "max-known-blocks", 1024, "maximum block hashes to track per peer (0 for no limit)") f.DurationVar(&inputSensorParams.KnownBlocksCache.TTL, "known-blocks-cache-ttl", 5*time.Minute, "time to live for known block cache entries (0 for no expiration)") From 25ef16c4b3d42f6ceee23fc4a06df5bf8676fcbb Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Mon, 2 Mar 2026 23:57:38 -0500 Subject: [PATCH 29/48] feat: add protocol version --- cmd/p2p/sensor/api.go | 2 ++ p2p/conns.go | 13 +++++++++++++ p2p/protocol.go | 4 ++++ 3 files changed, 19 insertions(+) diff --git a/cmd/p2p/sensor/api.go b/cmd/p2p/sensor/api.go index efba4b8c8..fb68eb208 100644 --- a/cmd/p2p/sensor/api.go +++ b/cmd/p2p/sensor/api.go @@ -17,6 +17,7 @@ import ( // (number of p2p messages), along with connection timing information. type peerData struct { Name string `json:"name"` + ProtocolVersion uint `json:"protocol_version"` Received p2p.MessageCount `json:"received"` Sent p2p.MessageCount `json:"sent"` PacketsReceived p2p.MessageCount `json:"packets_received"` @@ -85,6 +86,7 @@ func handleAPI(server *ethp2p.Server, conns *p2p.Conns) { peers[url] = peerData{ Name: conns.GetPeerName(peerID), + ProtocolVersion: conns.GetPeerVersion(peerID), Received: messages.Received, Sent: messages.Sent, PacketsReceived: messages.PacketsReceived, diff --git a/p2p/conns.go b/p2p/conns.go index 0713c6fa4..d022a8e4b 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -413,3 +413,16 @@ func (c *Conns) GetPeerName(peerID string) string { return "" } + +// GetPeerVersion returns the negotiated eth protocol version for a specific peer. +// Returns 0 if the peer is not found. +func (c *Conns) GetPeerVersion(peerID string) uint { + c.mu.RLock() + defer c.mu.RUnlock() + + if cn, ok := c.conns[peerID]; ok { + return cn.version + } + + return 0 +} diff --git a/p2p/protocol.go b/p2p/protocol.go index 52d2884bf..fb6ea0f32 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -92,6 +92,9 @@ type conn struct { txAnnounce chan []common.Hash blockAnnounce chan eth.NewBlockHashesPacket closeCh chan struct{} + + // version stores the negotiated eth protocol version (e.g., 68 or 69). + version uint } // EthProtocolOptions is the options used when creating a new eth protocol. @@ -148,6 +151,7 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { txAnnounce: make(chan []common.Hash), blockAnnounce: make(chan eth.NewBlockHashesPacket, maxQueuedBlockAnns), closeCh: make(chan struct{}), + version: version, } // Ensure cleanup happens on any exit path (including statusExchange failure) From abf1c2b10dfca8484e026548c0f2b40e3dbb2397 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 00:26:10 -0500 Subject: [PATCH 30/48] fix: protocol --- p2p/protocol.go | 40 +++++++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/p2p/protocol.go b/p2p/protocol.go index fb6ea0f32..0f84f5c99 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -644,10 +644,6 @@ func (c *conn) txAnnouncementLoop() { // prepareTxAnnouncements extracts a batch of unknown tx hashes from the queue // up to maxTxPacketSize bytes. Returns the pending hashes and remaining queue. func (c *conn) prepareTxAnnouncements(queue []common.Hash) (pending, remaining []common.Hash) { - if !c.shouldBroadcastTx && !c.shouldBroadcastTxHashes { - return nil, nil - } - // Calculate max hashes we can send based on packet size limit maxHashes := min(maxTxPacketSize/common.HashLength, len(queue)) @@ -676,22 +672,44 @@ func (c *conn) enqueueTxHashes(queue, hashes []common.Hash) []common.Hash { } // sendTxAnnouncements sends a batch of transaction hashes to the peer. +// It looks up each transaction from the cache to populate Types and Sizes +// as required by the ETH68 protocol. func (c *conn) sendTxAnnouncements(hashes []common.Hash) error { + // Build packet with actual Types and Sizes from cached transactions. + // Skip hashes where the transaction is no longer in cache. + var ( + pending []common.Hash + pendingTypes []byte + pendingSizes []uint32 + ) + + for _, hash := range hashes { + tx, ok := c.conns.GetTx(hash) + if !ok || tx == nil { + continue + } + pending = append(pending, hash) + pendingTypes = append(pendingTypes, tx.Type()) + pendingSizes = append(pendingSizes, uint32(tx.Size())) + } + + if len(pending) == 0 { + return nil + } + packet := eth.NewPooledTransactionHashesPacket{ - Types: make([]byte, len(hashes)), - Sizes: make([]uint32, len(hashes)), - Hashes: hashes, + Types: pendingTypes, + Sizes: pendingSizes, + Hashes: pending, } - c.countMsgSent(packet.Name(), float64(len(hashes))) + c.countMsgSent(packet.Name(), float64(len(pending))) if err := ethp2p.Send(c.rw, eth.NewPooledTransactionHashesMsg, packet); err != nil { c.logger.Debug().Err(err).Msg("Failed to send tx announcements") return err } // Mark all hashes as known in a single lock operation - if c.shouldBroadcastTx || c.shouldBroadcastTxHashes { - c.knownTxs.AddMany(hashes, struct{}{}) - } + c.knownTxs.AddMany(pending, struct{}{}) return nil } From 0b8751370d5f3acd242c5ea04c01525dca9387ce Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 05:33:56 -0500 Subject: [PATCH 31/48] feat: improve logging --- p2p/protocol.go | 67 ++++++++++++++++++++++++++++++++++++++++++++++--- p2p/types.go | 50 ------------------------------------ 2 files changed, 63 insertions(+), 54 deletions(-) diff --git a/p2p/protocol.go b/p2p/protocol.go index 0f84f5c99..0c5a3fb19 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/protocols/eth" ethp2p "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" @@ -172,6 +173,9 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { return err } + // Update logger with peer name now that status exchange is complete + c.logger = log.With().Str("peer", peerURL).Str("peer_name", c.peer.Fullname()).Logger() + // Send the connection object to the conns manager for RPC broadcasting opts.Conns.Add(c) @@ -754,6 +758,61 @@ func (c *conn) sendBlockAnnouncements(packet eth.NewBlockHashesPacket) error { return nil } +// decodeTx attempts to decode a transaction from an RLP-encoded raw value. +func (c *conn) decodeTx(raw []byte) *types.Transaction { + if len(raw) == 0 { + return nil + } + + // Try decoding as RLP-wrapped bytes first (legacy format) + var bytes []byte + if rlp.DecodeBytes(raw, &bytes) == nil { + tx := new(types.Transaction) + err := tx.UnmarshalBinary(bytes) + if err == nil { + return tx + } + + c.logger.Warn(). + Err(err). + Uint8("type", bytes[0]). + Int("size", len(bytes)). + Str("hash", crypto.Keccak256Hash(bytes).Hex()). + Msg("Failed to decode transaction") + + return nil + } + + // Try decoding as raw binary (typed transaction format) + tx := new(types.Transaction) + err := tx.UnmarshalBinary(raw) + if err == nil { + return tx + } + + c.logger.Warn(). + Err(err). + Uint8("prefix", raw[0]). + Int("size", len(raw)). + Str("hash", crypto.Keccak256Hash(raw).Hex()). + Msg("Failed to decode transaction") + + return nil +} + +// decodeTxs decodes a list of transactions, returning only successfully decoded ones. +func (c *conn) decodeTxs(rawTxs []rlp.RawValue) []*types.Transaction { + var txs []*types.Transaction + + for _, raw := range rawTxs { + if tx := c.decodeTx(raw); tx != nil { + txs = append(txs, tx) + } + } + + return txs +} + func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { payload, err := io.ReadAll(msg.Payload) if err != nil { @@ -766,7 +825,7 @@ func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { return nil } - txs := decodeTxs(rawTxs) + txs := c.decodeTxs(rawTxs) tfs := time.Now() c.countMsgReceived((ð.TransactionsPacket{}).Name(), float64(len(txs))) @@ -911,7 +970,7 @@ func (c *conn) handleBlockBodies(ctx context.Context, msg ethp2p.Msg) error { } body := ð.BlockBody{ - Transactions: decodeTxs(decoded.Transactions), + Transactions: c.decodeTxs(decoded.Transactions), Uncles: decoded.Uncles, Withdrawals: decoded.Withdrawals, } @@ -940,7 +999,7 @@ func (c *conn) handleNewBlock(ctx context.Context, msg ethp2p.Msg) error { } block := types.NewBlockWithHeader(raw.Block.Header).WithBody(types.Body{ - Transactions: decodeTxs(raw.Block.Txs), + Transactions: c.decodeTxs(raw.Block.Txs), Uncles: raw.Block.Uncles, Withdrawals: raw.Block.Withdrawals, }) @@ -1054,7 +1113,7 @@ func (c *conn) handlePooledTransactions(ctx context.Context, msg ethp2p.Msg) err } packet := ð.PooledTransactionsPacket{ - PooledTransactionsResponse: decodeTxs(raw.Txs), + PooledTransactionsResponse: c.decodeTxs(raw.Txs), } tfs := time.Now() diff --git a/p2p/types.go b/p2p/types.go index 924fbce1a..56b25f16e 100644 --- a/p2p/types.go +++ b/p2p/types.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/stateless" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/p2p" @@ -19,7 +18,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/rlpx" "github.com/ethereum/go-ethereum/rlp" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" ) type Message interface { @@ -451,51 +449,3 @@ type rawPooledTransactionsPacket struct { Txs []rlp.RawValue } -// decodeTx attempts to decode a transaction from an RLP-encoded raw value. -func decodeTx(raw []byte) *types.Transaction { - if len(raw) == 0 { - return nil - } - - var bytes []byte - if rlp.DecodeBytes(raw, &bytes) == nil { - tx := new(types.Transaction) - if tx.UnmarshalBinary(bytes) == nil { - return tx - } - - log.Warn(). - Uint8("type", bytes[0]). - Int("size", len(bytes)). - Str("hash", crypto.Keccak256Hash(bytes).Hex()). - Msg("Failed to decode transaction") - - return nil - } - - tx := new(types.Transaction) - if tx.UnmarshalBinary(raw) == nil { - return tx - } - - log.Warn(). - Uint8("prefix", raw[0]). - Int("size", len(raw)). - Str("hash", crypto.Keccak256Hash(raw).Hex()). - Msg("Failed to decode transaction") - - return nil -} - -// decodeTxs decodes a list of transactions, returning only successfully decoded ones. -func decodeTxs(rawTxs []rlp.RawValue) []*types.Transaction { - var txs []*types.Transaction - - for _, raw := range rawTxs { - if tx := decodeTx(raw); tx != nil { - txs = append(txs, tx) - } - } - - return txs -} From 1af2505667339b16051cf6efe6832a99cb154502 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 06:08:23 -0500 Subject: [PATCH 32/48] docs: make gen --- doc/polycli_p2p_sensor.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/polycli_p2p_sensor.md b/doc/polycli_p2p_sensor.md index eed8195cc..7a8a62549 100644 --- a/doc/polycli_p2p_sensor.md +++ b/doc/polycli_p2p_sensor.md @@ -117,11 +117,11 @@ polycli p2p sensor amoy-nodes.json \ -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this will result in less chance of missing data but can significantly increase memory usage) (default 10000) --max-known-blocks int maximum block hashes to track per peer (0 for no limit) (default 1024) - --max-known-txs int maximum transaction hashes to track per peer (0 for no limit) (default 8192) + --max-known-txs int maximum transaction hashes to track per peer (0 for no limit) (default 32768) --max-parents int maximum parent block hashes to track per peer (0 for no limit) (default 1024) -m, --max-peers int maximum number of peers to connect to (default 2000) --max-requests int maximum request IDs to track per peer (0 for no limit) (default 2048) - --max-txs int maximum transactions to cache for serving to peers (0 for no limit) (default 8192) + --max-txs int maximum transactions to cache for serving to peers (0 for no limit) (default 32768) --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") -n, --network-id uint filter discovered nodes by this network ID --no-discovery disable P2P peer discovery From cf1dae5b356a96e44ccd455c97038e44b918273a Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 07:28:13 -0500 Subject: [PATCH 33/48] fix: merge conflicts --- p2p/conns.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/p2p/conns.go b/p2p/conns.go index d0c010977..36c3c4c6a 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -496,6 +496,8 @@ func calculateEffectiveGasPrice(tx *types.Transaction, baseFee *big.Int) *big.In return new(big.Int).Set(price) } return nil +} + // GetPeerVersion returns the negotiated eth protocol version for a specific peer. // Returns 0 if the peer is not found. func (c *Conns) GetPeerVersion(peerID string) uint { From 5d89c671b2da0fb0162ac83a00f1cce96a42c7f1 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 09:21:01 -0500 Subject: [PATCH 34/48] feat: bloomset --- cmd/p2p/sensor/sensor.go | 17 +-- doc/polycli_p2p_sensor.md | 108 +++++++++--------- go.mod | 4 +- p2p/bloomset.go | 173 +++++++++++++++++++++++++++++ p2p/bloomset_test.go | 224 ++++++++++++++++++++++++++++++++++++++ p2p/conns.go | 25 +++-- p2p/protocol.go | 58 +++++++--- 7 files changed, 516 insertions(+), 93 deletions(-) create mode 100644 p2p/bloomset.go create mode 100644 p2p/bloomset_test.go diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index 18100b5b4..712669612 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -79,8 +79,8 @@ type ( ParentsCache p2p.CacheOptions BlocksCache p2p.CacheOptions TxsCache p2p.CacheOptions - KnownTxsCache p2p.CacheOptions - KnownBlocksCache p2p.CacheOptions + KnownTxsBloom p2p.BloomSetOptions + KnownBlocksMax int bootnodes []*enode.Node staticNodes []*enode.Node @@ -207,8 +207,8 @@ var SensorCmd = &cobra.Command{ conns := p2p.NewConns(p2p.ConnsOptions{ BlocksCache: inputSensorParams.BlocksCache, TxsCache: inputSensorParams.TxsCache, - KnownTxsCache: inputSensorParams.KnownTxsCache, - KnownBlocksCache: inputSensorParams.KnownBlocksCache, + KnownTxsBloom: inputSensorParams.KnownTxsBloom, + KnownBlocksMax: inputSensorParams.KnownBlocksMax, Head: head, ShouldBroadcastTx: inputSensorParams.ShouldBroadcastTx, ShouldBroadcastTxHashes: inputSensorParams.ShouldBroadcastTxHashes, @@ -519,8 +519,9 @@ will result in less chance of missing data but can significantly increase memory f.DurationVar(&inputSensorParams.BlocksCache.TTL, "blocks-cache-ttl", 10*time.Minute, "time to live for block cache entries (0 for no expiration)") f.IntVar(&inputSensorParams.TxsCache.MaxSize, "max-txs", 32768, "maximum transactions to cache for serving to peers (0 for no limit)") f.DurationVar(&inputSensorParams.TxsCache.TTL, "txs-cache-ttl", 10*time.Minute, "time to live for transaction cache entries (0 for no expiration)") - f.IntVar(&inputSensorParams.KnownTxsCache.MaxSize, "max-known-txs", 32768, "maximum transaction hashes to track per peer (0 for no limit)") - f.DurationVar(&inputSensorParams.KnownTxsCache.TTL, "known-txs-cache-ttl", 5*time.Minute, "time to live for known transaction cache entries (0 for no expiration)") - f.IntVar(&inputSensorParams.KnownBlocksCache.MaxSize, "max-known-blocks", 1024, "maximum block hashes to track per peer (0 for no limit)") - f.DurationVar(&inputSensorParams.KnownBlocksCache.TTL, "known-blocks-cache-ttl", 5*time.Minute, "time to live for known block cache entries (0 for no expiration)") + f.UintVar(&inputSensorParams.KnownTxsBloom.Size, "known-txs-bloom-size", 327680, + `bloom filter size in bits for tracking known transactions per peer (default ~40KB per filter, +optimized for ~32K elements with ~1% false positive rate)`) + f.UintVar(&inputSensorParams.KnownTxsBloom.HashCount, "known-txs-bloom-hashes", 7, "number of hash functions for known txs bloom filter") + f.IntVar(&inputSensorParams.KnownBlocksMax, "max-known-blocks", 1024, "maximum block hashes to track per peer (0 for no limit)") } diff --git a/doc/polycli_p2p_sensor.md b/doc/polycli_p2p_sensor.md index 7a8a62549..0561950e4 100644 --- a/doc/polycli_p2p_sensor.md +++ b/doc/polycli_p2p_sensor.md @@ -91,60 +91,60 @@ polycli p2p sensor amoy-nodes.json \ ## Flags ```bash - --api-port uint port API server will listen on (default 8080) - --blocks-cache-ttl duration time to live for block cache entries (0 for no expiration) (default 10m0s) - -b, --bootnodes string comma separated nodes used for bootstrapping - --broadcast-block-hashes broadcast block hashes to peers - --broadcast-blocks broadcast full blocks to peers - --broadcast-tx-hashes broadcast transaction hashes to peers - --broadcast-txs broadcast full transactions to peers - --database string which database to persist data to, options are: - - datastore (GCP Datastore) - - json (output to stdout) - - none (no persistence) (default "none") - -d, --database-id string datastore database ID - --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) - --discovery-dns string DNS discovery ENR tree URL - --discovery-port int UDP P2P discovery port (default 30303) - --fork-id bytesHex hex encoded fork ID (omit 0x) (default 22D523B2) - --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") - -h, --help help for sensor - --key string hex-encoded private key (cannot be set with --key-file) - -k, --key-file string private key file (cannot be set with --key) - --known-blocks-cache-ttl duration time to live for known block cache entries (0 for no expiration) (default 5m0s) - --known-txs-cache-ttl duration time to live for known transaction cache entries (0 for no expiration) (default 5m0s) - --max-blocks int maximum blocks to track across all peers (0 for no limit) (default 1024) - -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this - will result in less chance of missing data but can significantly increase memory usage) (default 10000) - --max-known-blocks int maximum block hashes to track per peer (0 for no limit) (default 1024) - --max-known-txs int maximum transaction hashes to track per peer (0 for no limit) (default 32768) - --max-parents int maximum parent block hashes to track per peer (0 for no limit) (default 1024) - -m, --max-peers int maximum number of peers to connect to (default 2000) - --max-requests int maximum request IDs to track per peer (0 for no limit) (default 2048) - --max-txs int maximum transactions to cache for serving to peers (0 for no limit) (default 32768) - --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") - -n, --network-id uint filter discovered nodes by this network ID - --no-discovery disable P2P peer discovery - --parents-cache-ttl duration time to live for parent hash cache entries (0 for no expiration) (default 5m0s) - --port int TCP network listening port (default 30303) - --pprof run pprof server - --pprof-port uint port pprof runs on (default 6060) - -p, --project-id string GCP project ID - --prom run Prometheus server (default true) - --prom-port uint port Prometheus runs on (default 2112) - --requests-cache-ttl duration time to live for requests cache entries (0 for no expiration) (default 5m0s) - --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") - --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) - -s, --sensor-id string sensor ID when writing block/tx events - --static-nodes string static nodes file - --trusted-nodes string trusted nodes file - --ttl duration time to live (default 336h0m0s) - --txs-cache-ttl duration time to live for transaction cache entries (0 for no expiration) (default 10m0s) - --write-block-events write block events to database (default true) - -B, --write-blocks write blocks to database (default true) - --write-peers write peers to database (default true) - --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) - -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) + --api-port uint port API server will listen on (default 8080) + --blocks-cache-ttl duration time to live for block cache entries (0 for no expiration) (default 10m0s) + -b, --bootnodes string comma separated nodes used for bootstrapping + --broadcast-block-hashes broadcast block hashes to peers + --broadcast-blocks broadcast full blocks to peers + --broadcast-tx-hashes broadcast transaction hashes to peers + --broadcast-txs broadcast full transactions to peers + --database string which database to persist data to, options are: + - datastore (GCP Datastore) + - json (output to stdout) + - none (no persistence) (default "none") + -d, --database-id string datastore database ID + --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) + --discovery-dns string DNS discovery ENR tree URL + --discovery-port int UDP P2P discovery port (default 30303) + --fork-id bytesHex hex encoded fork ID (omit 0x) (default 22D523B2) + --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") + -h, --help help for sensor + --key string hex-encoded private key (cannot be set with --key-file) + -k, --key-file string private key file (cannot be set with --key) + --known-txs-bloom-hashes uint number of hash functions for known txs bloom filter (default 7) + --known-txs-bloom-size uint bloom filter size in bits for tracking known transactions per peer (default ~40KB per filter, + optimized for ~32K elements with ~1% false positive rate) (default 327680) + --max-blocks int maximum blocks to track across all peers (0 for no limit) (default 1024) + -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this + will result in less chance of missing data but can significantly increase memory usage) (default 10000) + --max-known-blocks int maximum block hashes to track per peer (0 for no limit) (default 1024) + --max-parents int maximum parent block hashes to track per peer (0 for no limit) (default 1024) + -m, --max-peers int maximum number of peers to connect to (default 2000) + --max-requests int maximum request IDs to track per peer (0 for no limit) (default 2048) + --max-txs int maximum transactions to cache for serving to peers (0 for no limit) (default 32768) + --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") + -n, --network-id uint filter discovered nodes by this network ID + --no-discovery disable P2P peer discovery + --parents-cache-ttl duration time to live for parent hash cache entries (0 for no expiration) (default 5m0s) + --port int TCP network listening port (default 30303) + --pprof run pprof server + --pprof-port uint port pprof runs on (default 6060) + -p, --project-id string GCP project ID + --prom run Prometheus server (default true) + --prom-port uint port Prometheus runs on (default 2112) + --requests-cache-ttl duration time to live for requests cache entries (0 for no expiration) (default 5m0s) + --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") + --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) + -s, --sensor-id string sensor ID when writing block/tx events + --static-nodes string static nodes file + --trusted-nodes string trusted nodes file + --ttl duration time to live (default 336h0m0s) + --txs-cache-ttl duration time to live for transaction cache entries (0 for no expiration) (default 10m0s) + --write-block-events write block events to database (default true) + -B, --write-blocks write blocks to database (default true) + --write-peers write peers to database (default true) + --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) + -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) ``` The command also inherits flags from parent commands. diff --git a/go.mod b/go.mod index 41159a61c..507b6ff75 100644 --- a/go.mod +++ b/go.mod @@ -102,7 +102,7 @@ require ( github.com/cockroachdb/redact v1.1.5 // indirect github.com/consensys/gnark-crypto v0.19.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/deckarep/golang-set/v2 v2.6.0 // indirect + github.com/deckarep/golang-set/v2 v2.6.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/getsentry/sentry-go v0.29.1 // indirect @@ -116,7 +116,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect github.com/googleapis/gax-go/v2 v2.17.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.3.2 github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect diff --git a/p2p/bloomset.go b/p2p/bloomset.go new file mode 100644 index 000000000..e64fd85f3 --- /dev/null +++ b/p2p/bloomset.go @@ -0,0 +1,173 @@ +package p2p + +import ( + "encoding/binary" + "sync" + + "github.com/ethereum/go-ethereum/common" + bloomfilter "github.com/holiman/bloomfilter/v2" +) + +// BloomSetOptions contains configuration for creating a BloomSet. +type BloomSetOptions struct { + // Size is the number of bits in the bloom filter. + // Larger size = lower false positive rate but more memory. + // Recommended: 10 * expected_elements for ~1% false positive rate. + Size uint + + // HashCount is the number of hash functions to use. + // Recommended: 7 for ~1% false positive rate. + HashCount uint +} + +// DefaultBloomSetOptions returns sensible defaults for tracking ~32K elements +// with approximately 1% false positive rate. +// Memory usage: ~80KB per BloomSet (2 filters of ~40KB each). +func DefaultBloomSetOptions() BloomSetOptions { + return BloomSetOptions{ + Size: 327680, // 32768 * 10 bits ≈ 40KB per filter + HashCount: 7, + } +} + +// BloomSet is a memory-efficient probabilistic set for tracking seen hashes. +// It uses a rotating dual-bloom-filter design: +// - "current" filter receives all new additions +// - "previous" filter is checked during lookups for recency +// - Rotate() moves current to previous and creates a fresh current +// +// Trade-offs vs LRU cache: +// - Pro: ~10x less memory, minimal GC pressure (fixed-size arrays) +// - Pro: O(1) add/lookup with very low constant factor +// - Con: False positives possible (~1% with default settings) +// - Con: No exact eviction control (use Rotate for approximate TTL) +// +// For knownTxs, false positives mean occasionally not broadcasting a tx +// to a peer that doesn't have it - acceptable since they'll get it elsewhere. +// +// This implementation wraps holiman/bloomfilter/v2, the same battle-tested +// bloom filter library used by geth for state pruning. +type BloomSet struct { + mu sync.RWMutex + current *bloomfilter.Filter + previous *bloomfilter.Filter + m uint64 // bits per filter + k uint64 // hash functions +} + +// NewBloomSet creates a new BloomSet with the given options. +// If options are zero-valued, defaults are applied. +func NewBloomSet(opts BloomSetOptions) *BloomSet { + defaults := DefaultBloomSetOptions() + if opts.Size == 0 { + opts.Size = defaults.Size + } + if opts.HashCount == 0 { + opts.HashCount = defaults.HashCount + } + + m := uint64(opts.Size) + k := uint64(opts.HashCount) + + current, _ := bloomfilter.New(m, k) + previous, _ := bloomfilter.New(m, k) + + return &BloomSet{ + current: current, + previous: previous, + m: m, + k: k, + } +} + +// bloomHash converts common.Hash to uint64 for the bloom filter. +// Uses first 8 bytes - sufficient since keccak256 hashes are already +// cryptographically distributed (same approach as geth). +func bloomHash(hash common.Hash) uint64 { + return binary.BigEndian.Uint64(hash[:8]) +} + +// Add adds a hash to the set. +func (b *BloomSet) Add(hash common.Hash) { + b.mu.Lock() + defer b.mu.Unlock() + + b.current.AddHash(bloomHash(hash)) +} + +// AddMany adds multiple hashes to the set efficiently. +func (b *BloomSet) AddMany(hashes []common.Hash) { + b.mu.Lock() + defer b.mu.Unlock() + + for _, hash := range hashes { + b.current.AddHash(bloomHash(hash)) + } +} + +// Contains checks if a hash might be in the set. +// Returns true if the hash is probably in the set (may have false positives). +// Returns false if the hash is definitely not in the set. +func (b *BloomSet) Contains(hash common.Hash) bool { + b.mu.RLock() + defer b.mu.RUnlock() + + h := bloomHash(hash) + return b.current.ContainsHash(h) || b.previous.ContainsHash(h) +} + +// FilterNotContained returns hashes that are definitely not in the set. +// Hashes that might be in the set (including false positives) are excluded. +func (b *BloomSet) FilterNotContained(hashes []common.Hash) []common.Hash { + b.mu.RLock() + defer b.mu.RUnlock() + + result := make([]common.Hash, 0, len(hashes)) + for _, hash := range hashes { + h := bloomHash(hash) + if !b.current.ContainsHash(h) && !b.previous.ContainsHash(h) { + result = append(result, hash) + } + } + return result +} + +// Rotate moves the current filter to previous and creates a fresh current. +// Call this periodically to maintain approximate recency (e.g., every N minutes). +// After rotation, lookups still check the previous filter, so recently-added +// items remain "known" for one more rotation period. +func (b *BloomSet) Rotate() { + b.mu.Lock() + defer b.mu.Unlock() + + b.previous = b.current + b.current, _ = bloomfilter.New(b.m, b.k) +} + +// Count returns the approximate number of elements added since last rotation. +// This uses the bloom filter's internal count of added elements. +func (b *BloomSet) Count() uint { + b.mu.RLock() + defer b.mu.RUnlock() + return uint(b.current.N()) +} + +// Reset clears both filters. +func (b *BloomSet) Reset() { + b.mu.Lock() + defer b.mu.Unlock() + + b.current, _ = bloomfilter.New(b.m, b.k) + b.previous, _ = bloomfilter.New(b.m, b.k) +} + +// MemoryUsage returns the approximate memory usage in bytes. +func (b *BloomSet) MemoryUsage() uint { + b.mu.RLock() + defer b.mu.RUnlock() + + // Two filters, each with m bits = m/8 bytes + // Round up to account for uint64 alignment + bytesPerFilter := (b.m + 63) / 64 * 8 + return uint(bytesPerFilter * 2) +} diff --git a/p2p/bloomset_test.go b/p2p/bloomset_test.go new file mode 100644 index 000000000..acb478cef --- /dev/null +++ b/p2p/bloomset_test.go @@ -0,0 +1,224 @@ +package p2p + +import ( + "encoding/binary" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +func TestBloomSet_AddAndContains(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) + + hash1 := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + hash2 := common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") + + // Initially should not contain anything + if b.Contains(hash1) { + t.Error("expected hash1 not to be contained initially") + } + + // Add hash1 + b.Add(hash1) + + // Should contain hash1 + if !b.Contains(hash1) { + t.Error("expected hash1 to be contained after add") + } + + // Should not contain hash2 + if b.Contains(hash2) { + t.Error("expected hash2 not to be contained") + } +} + +func TestBloomSet_AddMany(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) + + hashes := make([]common.Hash, 100) + for i := range hashes { + hashes[i] = common.BytesToHash([]byte{byte(i), byte(i + 1), byte(i + 2)}) + } + + b.AddMany(hashes) + + // All added hashes should be contained + for i, hash := range hashes { + if !b.Contains(hash) { + t.Errorf("expected hash %d to be contained", i) + } + } +} + +func TestBloomSet_FilterNotContained(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) + + // Add some hashes + known := []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222"), + } + b.AddMany(known) + + // Create a mixed list + unknown := []common.Hash{ + common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), + common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444"), + } + mixed := append(known, unknown...) + + // Filter should return only unknown hashes + result := b.FilterNotContained(mixed) + + if len(result) != 2 { + t.Errorf("expected 2 unknown hashes, got %d", len(result)) + } + + for _, h := range result { + if h == known[0] || h == known[1] { + t.Errorf("known hash %s should not be in result", h.Hex()) + } + } +} + +func TestBloomSet_Rotate(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) + + hash1 := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + hash2 := common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") + + // Add hash1 to current + b.Add(hash1) + + // Rotate - hash1 moves to previous + b.Rotate() + + // hash1 should still be found (in previous) + if !b.Contains(hash1) { + t.Error("expected hash1 to be found after first rotation") + } + + // Add hash2 to current + b.Add(hash2) + + // Rotate again - hash1's filter is now cleared, hash2 moves to previous + b.Rotate() + + // hash1 should no longer be found + if b.Contains(hash1) { + t.Error("expected hash1 not to be found after second rotation") + } + + // hash2 should still be found + if !b.Contains(hash2) { + t.Error("expected hash2 to be found after second rotation") + } +} + +func TestBloomSet_MemoryUsage(t *testing.T) { + opts := DefaultBloomSetOptions() + b := NewBloomSet(opts) + + usage := b.MemoryUsage() + + // With default 327680 bits = 5120 words per filter * 8 bytes * 2 filters = 81920 bytes + expectedBytes := uint((327680+63)/64) * 8 * 2 + if usage != expectedBytes { + t.Errorf("expected memory usage %d bytes, got %d", expectedBytes, usage) + } +} + +// generateTestHash creates a deterministic hash from a seed using keccak256. +func generateTestHash(seed uint64) common.Hash { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], seed) + return crypto.Keccak256Hash(buf[:]) +} + +func TestBloomSet_FalsePositiveRate(t *testing.T) { + // Test that false positive rate is approximately as expected + b := NewBloomSet(DefaultBloomSetOptions()) + + // Add 32768 unique hashes (the design capacity) + // Use keccak256 to generate properly distributed hashes + for i := uint64(0); i < 32768; i++ { + b.Add(generateTestHash(i)) + } + + // Test 10000 hashes that were NOT added (different seed range) + falsePositives := 0 + for i := uint64(100000); i < 110000; i++ { + if b.Contains(generateTestHash(i)) { + falsePositives++ + } + } + + // Expected ~1% false positive rate, allow up to 3% for statistical variance + rate := float64(falsePositives) / 10000.0 + if rate > 0.03 { + t.Errorf("false positive rate too high: %.2f%% (expected ~1%%)", rate*100) + } + t.Logf("False positive rate: %.2f%%", rate*100) +} + +func BenchmarkBloomSet_Add(b *testing.B) { + bloom := NewBloomSet(DefaultBloomSetOptions()) + hash := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + + for b.Loop() { + bloom.Add(hash) + } +} + +func BenchmarkBloomSet_Contains(b *testing.B) { + bloom := NewBloomSet(DefaultBloomSetOptions()) + hash := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + bloom.Add(hash) + + for b.Loop() { + bloom.Contains(hash) + } +} + +func BenchmarkBloomSet_FilterNotContained(b *testing.B) { + bloom := NewBloomSet(DefaultBloomSetOptions()) + + // Add 1000 hashes + for i := range 1000 { + hash := common.BytesToHash([]byte{byte(i >> 8), byte(i)}) + bloom.Add(hash) + } + + // Create a batch of 100 hashes (mix of known and unknown) + batch := make([]common.Hash, 100) + for i := range batch { + batch[i] = common.BytesToHash([]byte{byte(i >> 8), byte(i)}) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + bloom.FilterNotContained(batch) + } +} + +func BenchmarkCache_FilterNotContained(b *testing.B) { + cache := NewCache[common.Hash, struct{}](CacheOptions{MaxSize: 32768}) + + // Add 1000 hashes + for i := range 1000 { + hash := common.BytesToHash([]byte{byte(i >> 8), byte(i)}) + cache.Add(hash, struct{}{}) + } + + // Create a batch of 100 hashes (mix of known and unknown) + batch := make([]common.Hash, 100) + for i := range batch { + batch[i] = common.BytesToHash([]byte{byte(i >> 8), byte(i)}) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.FilterNotContained(batch) + } +} diff --git a/p2p/conns.go b/p2p/conns.go index d022a8e4b..818f29795 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -24,8 +24,8 @@ type BlockCache struct { type ConnsOptions struct { BlocksCache CacheOptions TxsCache CacheOptions - KnownTxsCache CacheOptions - KnownBlocksCache CacheOptions + KnownTxsBloom BloomSetOptions + KnownBlocksMax int Head eth.NewBlockPacket ShouldBroadcastTx bool ShouldBroadcastTxHashes bool @@ -46,9 +46,10 @@ type Conns struct { // txs caches transactions for serving to peers and duplicate detection txs *Cache[common.Hash, *types.Transaction] - // knownTxsOpts and knownBlocksOpts store cache options for per-peer caches - knownTxsOpts CacheOptions - knownBlocksOpts CacheOptions + // knownTxsOpts stores bloom filter options for per-peer known tx tracking + knownTxsOpts BloomSetOptions + // knownBlocksMax stores the maximum size for per-peer known block caches + knownBlocksMax int // oldest stores the first block the sensor has seen so when fetching // parent blocks, it does not request blocks older than this. @@ -76,8 +77,8 @@ func NewConns(opts ConnsOptions) *Conns { conns: make(map[string]*conn), blocks: NewCache[common.Hash, BlockCache](opts.BlocksCache), txs: NewCache[common.Hash, *types.Transaction](opts.TxsCache), - knownTxsOpts: opts.KnownTxsCache, - knownBlocksOpts: opts.KnownBlocksCache, + knownTxsOpts: opts.KnownTxsBloom, + knownBlocksMax: opts.KnownBlocksMax, oldest: oldest, head: head, shouldBroadcastTx: opts.ShouldBroadcastTx, @@ -357,14 +358,14 @@ func (c *Conns) UpdateHeadBlock(packet eth.NewBlockPacket) bool { }) } -// KnownTxsOpts returns the cache options for per-peer known tx caches. -func (c *Conns) KnownTxsOpts() CacheOptions { +// KnownTxsOpts returns the bloom filter options for per-peer known tx tracking. +func (c *Conns) KnownTxsOpts() BloomSetOptions { return c.knownTxsOpts } -// KnownBlocksOpts returns the cache options for per-peer known block caches. -func (c *Conns) KnownBlocksOpts() CacheOptions { - return c.knownBlocksOpts +// KnownBlocksMax returns the maximum size for per-peer known block caches. +func (c *Conns) KnownBlocksMax() int { + return c.knownBlocksMax } // ShouldBroadcastTx returns whether full transaction broadcasting is enabled. diff --git a/p2p/protocol.go b/p2p/protocol.go index 0c5a3fb19..6409c69c9 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -9,6 +9,7 @@ import ( "math/big" "time" + mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" @@ -45,6 +46,36 @@ var protocolLengths = map[uint]uint64{ 69: 18, } +// knownCache is a simple set-based cache for tracking known block hashes. +// Unlike the generic Cache type, this uses mapset for lower memory overhead +// per peer (~60% reduction). When the cache reaches capacity, the oldest +// element is evicted via Pop(). +type knownCache struct { + hashes mapset.Set[common.Hash] + max int +} + +// newKnownCache creates a new knownCache with the specified maximum size. +func newKnownCache(max int) *knownCache { + return &knownCache{ + max: max, + hashes: mapset.NewSet[common.Hash](), + } +} + +// Add adds a hash to the cache, evicting the oldest element if at capacity. +func (k *knownCache) Add(hash common.Hash) { + for k.hashes.Cardinality() >= k.max { + k.hashes.Pop() + } + k.hashes.Add(hash) +} + +// Contains returns true if the hash exists in the cache. +func (k *knownCache) Contains(hash common.Hash) bool { + return k.hashes.Contains(hash) +} + // conn represents an individual connection with a peer. type conn struct { sensorID string @@ -81,8 +112,10 @@ type conn struct { shouldBroadcastBlockHashes bool // Known caches track what this peer has seen to avoid redundant sends. - knownTxs *Cache[common.Hash, struct{}] - knownBlocks *Cache[common.Hash, struct{}] + // knownTxs uses a bloom filter for memory efficiency (~40KB vs ~4MB per peer). + // knownBlocks uses a simple mapset for lower memory overhead than the generic Cache. + knownTxs *BloomSet + knownBlocks *knownCache // messages tracks per-peer message counts for API visibility. messages *PeerMessages @@ -146,8 +179,8 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { shouldBroadcastTxHashes: opts.ShouldBroadcastTxHashes, shouldBroadcastBlocks: opts.ShouldBroadcastBlocks, shouldBroadcastBlockHashes: opts.ShouldBroadcastBlockHashes, - knownTxs: NewCache[common.Hash, struct{}](opts.Conns.KnownTxsOpts()), - knownBlocks: NewCache[common.Hash, struct{}](opts.Conns.KnownBlocksOpts()), + knownTxs: NewBloomSet(opts.Conns.KnownTxsOpts()), + knownBlocks: newKnownCache(opts.Conns.KnownBlocksMax()), messages: NewPeerMessages(), txAnnounce: make(chan []common.Hash), blockAnnounce: make(chan eth.NewBlockHashesPacket, maxQueuedBlockAnns), @@ -567,7 +600,7 @@ func (c *conn) addKnownTx(hash common.Hash) { return } - c.knownTxs.Add(hash, struct{}{}) + c.knownTxs.Add(hash) } // addKnownBlock adds a block hash to the known block cache. @@ -576,7 +609,7 @@ func (c *conn) addKnownBlock(hash common.Hash) { return } - c.knownBlocks.Add(hash, struct{}{}) + c.knownBlocks.Add(hash) } // hasKnownTx checks if a transaction hash is in the known tx cache. @@ -652,16 +685,7 @@ func (c *conn) prepareTxAnnouncements(queue []common.Hash) (pending, remaining [ maxHashes := min(maxTxPacketSize/common.HashLength, len(queue)) // Filter out known hashes in a single lock operation - batch := queue[:maxHashes] - pending = c.knownTxs.FilterNotContained(batch) - - // If we got fewer pending than the batch size, we processed some known hashes. - // Limit pending to maxTxPacketSize worth of hashes. - maxPending := maxTxPacketSize / common.HashLength - if len(pending) > maxPending { - pending = pending[:maxPending] - } - + pending = c.knownTxs.FilterNotContained(queue[:maxHashes]) remaining = queue[:copy(queue, queue[maxHashes:])] return pending, remaining } @@ -713,7 +737,7 @@ func (c *conn) sendTxAnnouncements(hashes []common.Hash) error { } // Mark all hashes as known in a single lock operation - c.knownTxs.AddMany(pending, struct{}{}) + c.knownTxs.AddMany(pending) return nil } From 4408b34ee5eff2e8153b2900cf307dbae9c734c9 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 09:36:47 -0500 Subject: [PATCH 35/48] feat: datastructures package --- cmd/p2p/sensor/sensor.go | 11 ++--- p2p/conns.go | 30 +++++++------ p2p/{ => datastructures}/bloomset.go | 2 +- p2p/{ => datastructures}/bloomset_test.go | 6 +-- p2p/datastructures/boundedset.go | 43 ++++++++++++++++++ p2p/{ => datastructures}/locked.go | 2 +- p2p/{cache.go => datastructures/lru.go} | 40 ++++++++--------- p2p/protocol.go | 54 +++++------------------ 8 files changed, 102 insertions(+), 86 deletions(-) rename p2p/{ => datastructures}/bloomset.go (99%) rename p2p/{ => datastructures}/bloomset_test.go (97%) create mode 100644 p2p/datastructures/boundedset.go rename p2p/{ => datastructures}/locked.go (97%) rename p2p/{cache.go => datastructures/lru.go} (87%) diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index 712669612..5daa5f397 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -29,6 +29,7 @@ import ( "github.com/rs/zerolog/log" "github.com/spf13/cobra" + ds "github.com/0xPolygon/polygon-cli/p2p/datastructures" "github.com/0xPolygon/polygon-cli/flag" "github.com/0xPolygon/polygon-cli/p2p" "github.com/0xPolygon/polygon-cli/p2p/database" @@ -75,11 +76,11 @@ type ( DiscoveryDNS string Database string NoDiscovery bool - RequestsCache p2p.CacheOptions - ParentsCache p2p.CacheOptions - BlocksCache p2p.CacheOptions - TxsCache p2p.CacheOptions - KnownTxsBloom p2p.BloomSetOptions + RequestsCache ds.LRUOptions + ParentsCache ds.LRUOptions + BlocksCache ds.LRUOptions + TxsCache ds.LRUOptions + KnownTxsBloom ds.BloomSetOptions KnownBlocksMax int bootnodes []*enode.Node diff --git a/p2p/conns.go b/p2p/conns.go index 818f29795..b89d42e3d 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -11,6 +11,8 @@ import ( ethp2p "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/rs/zerolog/log" + + ds "github.com/0xPolygon/polygon-cli/p2p/datastructures" ) // BlockCache stores the actual block data to avoid duplicate fetches and database queries. @@ -22,9 +24,9 @@ type BlockCache struct { // ConnsOptions contains configuration options for creating a new Conns manager. type ConnsOptions struct { - BlocksCache CacheOptions - TxsCache CacheOptions - KnownTxsBloom BloomSetOptions + BlocksCache ds.LRUOptions + TxsCache ds.LRUOptions + KnownTxsBloom ds.BloomSetOptions KnownBlocksMax int Head eth.NewBlockPacket ShouldBroadcastTx bool @@ -41,22 +43,22 @@ type Conns struct { // blocks tracks blocks written to the database across all peers // to avoid duplicate writes and requests. - blocks *Cache[common.Hash, BlockCache] + blocks *ds.LRU[common.Hash, BlockCache] // txs caches transactions for serving to peers and duplicate detection - txs *Cache[common.Hash, *types.Transaction] + txs *ds.LRU[common.Hash, *types.Transaction] // knownTxsOpts stores bloom filter options for per-peer known tx tracking - knownTxsOpts BloomSetOptions + knownTxsOpts ds.BloomSetOptions // knownBlocksMax stores the maximum size for per-peer known block caches knownBlocksMax int // oldest stores the first block the sensor has seen so when fetching // parent blocks, it does not request blocks older than this. - oldest *Locked[*types.Header] + oldest *ds.Locked[*types.Header] // head keeps track of the current head block of the chain. - head *Locked[eth.NewBlockPacket] + head *ds.Locked[eth.NewBlockPacket] // Broadcast flags control what gets cached and rebroadcasted shouldBroadcastTx bool @@ -67,16 +69,16 @@ type Conns struct { // NewConns creates a new connection manager with a blocks cache. func NewConns(opts ConnsOptions) *Conns { - head := &Locked[eth.NewBlockPacket]{} + head := &ds.Locked[eth.NewBlockPacket]{} head.Set(opts.Head) - oldest := &Locked[*types.Header]{} + oldest := &ds.Locked[*types.Header]{} oldest.Set(opts.Head.Block.Header()) return &Conns{ conns: make(map[string]*conn), - blocks: NewCache[common.Hash, BlockCache](opts.BlocksCache), - txs: NewCache[common.Hash, *types.Transaction](opts.TxsCache), + blocks: ds.NewLRU[common.Hash, BlockCache](opts.BlocksCache), + txs: ds.NewLRU[common.Hash, *types.Transaction](opts.TxsCache), knownTxsOpts: opts.KnownTxsBloom, knownBlocksMax: opts.KnownBlocksMax, oldest: oldest, @@ -331,7 +333,7 @@ func (c *Conns) GetTxs(hashes []common.Hash) []*types.Transaction { } // Blocks returns the global blocks cache. -func (c *Conns) Blocks() *Cache[common.Hash, BlockCache] { +func (c *Conns) Blocks() *ds.LRU[common.Hash, BlockCache] { return c.blocks } @@ -359,7 +361,7 @@ func (c *Conns) UpdateHeadBlock(packet eth.NewBlockPacket) bool { } // KnownTxsOpts returns the bloom filter options for per-peer known tx tracking. -func (c *Conns) KnownTxsOpts() BloomSetOptions { +func (c *Conns) KnownTxsOpts() ds.BloomSetOptions { return c.knownTxsOpts } diff --git a/p2p/bloomset.go b/p2p/datastructures/bloomset.go similarity index 99% rename from p2p/bloomset.go rename to p2p/datastructures/bloomset.go index e64fd85f3..500d65d56 100644 --- a/p2p/bloomset.go +++ b/p2p/datastructures/bloomset.go @@ -1,4 +1,4 @@ -package p2p +package datastructures import ( "encoding/binary" diff --git a/p2p/bloomset_test.go b/p2p/datastructures/bloomset_test.go similarity index 97% rename from p2p/bloomset_test.go rename to p2p/datastructures/bloomset_test.go index acb478cef..23a386e25 100644 --- a/p2p/bloomset_test.go +++ b/p2p/datastructures/bloomset_test.go @@ -1,4 +1,4 @@ -package p2p +package datastructures import ( "encoding/binary" @@ -202,8 +202,8 @@ func BenchmarkBloomSet_FilterNotContained(b *testing.B) { } } -func BenchmarkCache_FilterNotContained(b *testing.B) { - cache := NewCache[common.Hash, struct{}](CacheOptions{MaxSize: 32768}) +func BenchmarkLRU_FilterNotContained(b *testing.B) { + cache := NewLRU[common.Hash, struct{}](LRUOptions{MaxSize: 32768}) // Add 1000 hashes for i := range 1000 { diff --git a/p2p/datastructures/boundedset.go b/p2p/datastructures/boundedset.go new file mode 100644 index 000000000..e63128010 --- /dev/null +++ b/p2p/datastructures/boundedset.go @@ -0,0 +1,43 @@ +package datastructures + +import mapset "github.com/deckarep/golang-set/v2" + +// BoundedSet is a simple set-based collection with a maximum size. +// When the set reaches capacity, the oldest element is evicted via Pop(). +// This provides lower memory overhead compared to a full LRU cache when +// only membership tracking is needed without value storage. +type BoundedSet[T comparable] struct { + set mapset.Set[T] + max int +} + +// NewBoundedSet creates a new BoundedSet with the specified maximum size. +func NewBoundedSet[T comparable](max int) *BoundedSet[T] { + return &BoundedSet[T]{ + max: max, + set: mapset.NewSet[T](), + } +} + +// Add adds an element to the set, evicting the oldest element if at capacity. +func (b *BoundedSet[T]) Add(elem T) { + for b.set.Cardinality() >= b.max { + b.set.Pop() + } + b.set.Add(elem) +} + +// Contains returns true if the element exists in the set. +func (b *BoundedSet[T]) Contains(elem T) bool { + return b.set.Contains(elem) +} + +// Len returns the number of elements in the set. +func (b *BoundedSet[T]) Len() int { + return b.set.Cardinality() +} + +// Clear removes all elements from the set. +func (b *BoundedSet[T]) Clear() { + b.set.Clear() +} diff --git a/p2p/locked.go b/p2p/datastructures/locked.go similarity index 97% rename from p2p/locked.go rename to p2p/datastructures/locked.go index d2b8593e6..1489bd062 100644 --- a/p2p/locked.go +++ b/p2p/datastructures/locked.go @@ -1,4 +1,4 @@ -package p2p +package datastructures import "sync" diff --git a/p2p/cache.go b/p2p/datastructures/lru.go similarity index 87% rename from p2p/cache.go rename to p2p/datastructures/lru.go index 35608ef44..e5f4cec03 100644 --- a/p2p/cache.go +++ b/p2p/datastructures/lru.go @@ -1,4 +1,4 @@ -package p2p +package datastructures import ( "container/list" @@ -6,14 +6,14 @@ import ( "time" ) -// CacheOptions contains configuration for LRU caches with TTL. -type CacheOptions struct { +// LRUOptions contains configuration for LRU caches with TTL. +type LRUOptions struct { MaxSize int TTL time.Duration } -// Cache is a thread-safe LRU cache with optional TTL-based expiration. -type Cache[K comparable, V any] struct { +// LRU is a thread-safe LRU cache with optional TTL-based expiration. +type LRU[K comparable, V any] struct { mu sync.RWMutex maxSize int ttl time.Duration @@ -27,11 +27,11 @@ type entry[K comparable, V any] struct { expiresAt *time.Time } -// NewCache creates a new cache with the given options. +// NewLRU creates a new LRU cache with the given options. // If opts.MaxSize <= 0, the cache has no size limit. // If opts.TTL is 0, entries never expire based on time. -func NewCache[K comparable, V any](opts CacheOptions) *Cache[K, V] { - return &Cache[K, V]{ +func NewLRU[K comparable, V any](opts LRUOptions) *LRU[K, V] { + return &LRU[K, V]{ maxSize: opts.MaxSize, ttl: opts.TTL, items: make(map[K]*list.Element), @@ -40,7 +40,7 @@ func NewCache[K comparable, V any](opts CacheOptions) *Cache[K, V] { } // Add adds or updates a value in the cache. -func (c *Cache[K, V]) Add(key K, value V) { +func (c *LRU[K, V]) Add(key K, value V) { c.mu.Lock() defer c.mu.Unlock() @@ -77,7 +77,7 @@ func (c *Cache[K, V]) Add(key K, value V) { } // Get retrieves a value from the cache and updates LRU ordering. -func (c *Cache[K, V]) Get(key K) (V, bool) { +func (c *LRU[K, V]) Get(key K) (V, bool) { c.mu.Lock() defer c.mu.Unlock() @@ -103,7 +103,7 @@ func (c *Cache[K, V]) Get(key K) (V, bool) { // GetMany retrieves multiple values from the cache and updates LRU ordering. // Uses a single write lock for all lookups, reducing lock contention compared // to calling Get in a loop. Returns a slice of values for keys that were found. -func (c *Cache[K, V]) GetMany(keys []K) []V { +func (c *LRU[K, V]) GetMany(keys []K) []V { if len(keys) == 0 { return nil } @@ -137,7 +137,7 @@ func (c *Cache[K, V]) GetMany(keys []K) []V { // Peek retrieves a value from the cache without updating LRU ordering. // Uses a read lock for better concurrency. -func (c *Cache[K, V]) Peek(key K) (V, bool) { +func (c *LRU[K, V]) Peek(key K) (V, bool) { c.mu.RLock() defer c.mu.RUnlock() @@ -161,7 +161,7 @@ func (c *Cache[K, V]) Peek(key K) (V, bool) { // The update function receives the current value (or zero value if not found) and // returns the new value to store. This is thread-safe and prevents race conditions // in get-modify-add patterns. -func (c *Cache[K, V]) Update(key K, updateFn func(V) V) { +func (c *LRU[K, V]) Update(key K, updateFn func(V) V) { c.mu.Lock() defer c.mu.Unlock() @@ -211,7 +211,7 @@ func (c *Cache[K, V]) Update(key K, updateFn func(V) V) { // Contains checks if a key exists in the cache and is not expired. // Uses a read lock and doesn't update LRU ordering. -func (c *Cache[K, V]) Contains(key K) bool { +func (c *LRU[K, V]) Contains(key K) bool { c.mu.RLock() defer c.mu.RUnlock() @@ -230,7 +230,7 @@ func (c *Cache[K, V]) Contains(key K) bool { } // Remove removes a key from the cache and returns the value if it existed. -func (c *Cache[K, V]) Remove(key K) (V, bool) { +func (c *LRU[K, V]) Remove(key K) (V, bool) { c.mu.Lock() defer c.mu.Unlock() @@ -246,14 +246,14 @@ func (c *Cache[K, V]) Remove(key K) (V, bool) { } // Len returns the number of items in the cache. -func (c *Cache[K, V]) Len() int { +func (c *LRU[K, V]) Len() int { c.mu.RLock() defer c.mu.RUnlock() return c.list.Len() } // Purge clears all items from the cache. -func (c *Cache[K, V]) Purge() { +func (c *LRU[K, V]) Purge() { c.mu.Lock() defer c.mu.Unlock() @@ -262,7 +262,7 @@ func (c *Cache[K, V]) Purge() { } // Keys returns all keys in the cache. -func (c *Cache[K, V]) Keys() []K { +func (c *LRU[K, V]) Keys() []K { c.mu.RLock() defer c.mu.RUnlock() @@ -277,7 +277,7 @@ func (c *Cache[K, V]) Keys() []K { // FilterNotContained returns the subset of keys that are not in the cache. // Uses a single read lock for all lookups, reducing lock contention compared // to calling Contains in a loop. -func (c *Cache[K, V]) FilterNotContained(keys []K) []K { +func (c *LRU[K, V]) FilterNotContained(keys []K) []K { c.mu.RLock() defer c.mu.RUnlock() @@ -303,7 +303,7 @@ func (c *Cache[K, V]) FilterNotContained(keys []K) []K { // AddMany adds multiple keys with the same value to the cache. // Uses a single write lock for all additions, reducing lock contention // compared to calling Add in a loop. -func (c *Cache[K, V]) AddMany(keys []K, value V) { +func (c *LRU[K, V]) AddMany(keys []K, value V) { if len(keys) == 0 { return } diff --git a/p2p/protocol.go b/p2p/protocol.go index 6409c69c9..7fb287c46 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -9,7 +9,6 @@ import ( "math/big" "time" - mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" @@ -21,6 +20,7 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" + ds "github.com/0xPolygon/polygon-cli/p2p/datastructures" "github.com/0xPolygon/polygon-cli/p2p/database" ) @@ -46,36 +46,6 @@ var protocolLengths = map[uint]uint64{ 69: 18, } -// knownCache is a simple set-based cache for tracking known block hashes. -// Unlike the generic Cache type, this uses mapset for lower memory overhead -// per peer (~60% reduction). When the cache reaches capacity, the oldest -// element is evicted via Pop(). -type knownCache struct { - hashes mapset.Set[common.Hash] - max int -} - -// newKnownCache creates a new knownCache with the specified maximum size. -func newKnownCache(max int) *knownCache { - return &knownCache{ - max: max, - hashes: mapset.NewSet[common.Hash](), - } -} - -// Add adds a hash to the cache, evicting the oldest element if at capacity. -func (k *knownCache) Add(hash common.Hash) { - for k.hashes.Cardinality() >= k.max { - k.hashes.Pop() - } - k.hashes.Add(hash) -} - -// Contains returns true if the hash exists in the cache. -func (k *knownCache) Contains(hash common.Hash) bool { - return k.hashes.Contains(hash) -} - // conn represents an individual connection with a peer. type conn struct { sensorID string @@ -88,12 +58,12 @@ type conn struct { // requests is used to store the request ID and the block hash. This is used // when fetching block bodies because the eth protocol block bodies do not // contain information about the block hash. - requests *Cache[uint64, common.Hash] + requests *ds.LRU[uint64, common.Hash] requestNum uint64 // parents tracks hashes of blocks requested as parents to mark them // with IsParent=true when writing to the database. - parents *Cache[common.Hash, struct{}] + parents *ds.LRU[common.Hash, struct{}] // conns provides access to the global connection manager, which includes // the blocks cache shared across all peers. @@ -113,9 +83,9 @@ type conn struct { // Known caches track what this peer has seen to avoid redundant sends. // knownTxs uses a bloom filter for memory efficiency (~40KB vs ~4MB per peer). - // knownBlocks uses a simple mapset for lower memory overhead than the generic Cache. - knownTxs *BloomSet - knownBlocks *knownCache + // knownBlocks uses a simple bounded set for lower memory overhead than the generic LRU. + knownTxs *ds.BloomSet + knownBlocks *ds.BoundedSet[common.Hash] // messages tracks per-peer message counts for API visibility. messages *PeerMessages @@ -143,8 +113,8 @@ type EthProtocolOptions struct { ForkID forkid.ID // Cache configurations - RequestsCache CacheOptions - ParentsCache CacheOptions + RequestsCache ds.LRUOptions + ParentsCache ds.LRUOptions // Broadcast flags control what gets rebroadcasted to other peers ShouldBroadcastTx bool @@ -168,9 +138,9 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { logger: log.With().Str("peer", peerURL).Logger(), rw: rw, db: opts.Database, - requests: NewCache[uint64, common.Hash](opts.RequestsCache), + requests: ds.NewLRU[uint64, common.Hash](opts.RequestsCache), requestNum: 0, - parents: NewCache[common.Hash, struct{}](opts.ParentsCache), + parents: ds.NewLRU[common.Hash, struct{}](opts.ParentsCache), peer: p, conns: opts.Conns, connectedAt: time.Now(), @@ -179,8 +149,8 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { shouldBroadcastTxHashes: opts.ShouldBroadcastTxHashes, shouldBroadcastBlocks: opts.ShouldBroadcastBlocks, shouldBroadcastBlockHashes: opts.ShouldBroadcastBlockHashes, - knownTxs: NewBloomSet(opts.Conns.KnownTxsOpts()), - knownBlocks: newKnownCache(opts.Conns.KnownBlocksMax()), + knownTxs: ds.NewBloomSet(opts.Conns.KnownTxsOpts()), + knownBlocks: ds.NewBoundedSet[common.Hash](opts.Conns.KnownBlocksMax()), messages: NewPeerMessages(), txAnnounce: make(chan []common.Hash), blockAnnounce: make(chan eth.NewBlockHashesPacket, maxQueuedBlockAnns), From 99a460c02a06aca5f8ddeb5901060d2b339ff468 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 09:47:40 -0500 Subject: [PATCH 36/48] fix: improve lru --- p2p/conns.go | 15 ++++++++++++++- p2p/datastructures/lru.go | 32 ++++++++++++++++++++++++++++++++ p2p/protocol.go | 6 +++--- 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/p2p/conns.go b/p2p/conns.go index b89d42e3d..a7f7d2138 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -322,16 +322,29 @@ func (c *Conns) AddTx(hash common.Hash, tx *types.Transaction) { c.txs.Add(hash, tx) } -// GetTx retrieves a transaction from the shared cache. +// GetTx retrieves a transaction from the shared cache and updates LRU ordering. func (c *Conns) GetTx(hash common.Hash) (*types.Transaction, bool) { return c.txs.Get(hash) } // GetTxs retrieves multiple transactions from the shared cache in a single lock operation. +// Updates LRU ordering for found transactions. func (c *Conns) GetTxs(hashes []common.Hash) []*types.Transaction { return c.txs.GetMany(hashes) } +// PeekTx retrieves a transaction from the shared cache without updating LRU ordering. +// Uses a read lock for better concurrency when LRU ordering is not needed. +func (c *Conns) PeekTx(hash common.Hash) (*types.Transaction, bool) { + return c.txs.Peek(hash) +} + +// PeekTxs retrieves multiple transactions from the shared cache without updating LRU ordering. +// Uses a single read lock for better concurrency when LRU ordering is not needed. +func (c *Conns) PeekTxs(hashes []common.Hash) []*types.Transaction { + return c.txs.PeekMany(hashes) +} + // Blocks returns the global blocks cache. func (c *Conns) Blocks() *ds.LRU[common.Hash, BlockCache] { return c.blocks diff --git a/p2p/datastructures/lru.go b/p2p/datastructures/lru.go index e5f4cec03..ec5157bef 100644 --- a/p2p/datastructures/lru.go +++ b/p2p/datastructures/lru.go @@ -157,6 +157,38 @@ func (c *LRU[K, V]) Peek(key K) (V, bool) { return e.value, true } +// PeekMany retrieves multiple values from the cache without updating LRU ordering. +// Uses a single read lock for all lookups, providing better concurrency than GetMany +// when LRU ordering updates are not needed. +func (c *LRU[K, V]) PeekMany(keys []K) []V { + if len(keys) == 0 { + return nil + } + + c.mu.RLock() + defer c.mu.RUnlock() + + now := time.Now() + result := make([]V, 0, len(keys)) + + for _, key := range keys { + elem, ok := c.items[key] + if !ok { + continue + } + + e := elem.Value.(*entry[K, V]) + + if e.expiresAt != nil && now.After(*e.expiresAt) { + continue + } + + result = append(result, e.value) + } + + return result +} + // Update atomically updates a value in the cache using the provided update function. // The update function receives the current value (or zero value if not found) and // returns the new value to store. This is thread-safe and prevents race conditions diff --git a/p2p/protocol.go b/p2p/protocol.go index 7fb287c46..27404803a 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -682,7 +682,7 @@ func (c *conn) sendTxAnnouncements(hashes []common.Hash) error { ) for _, hash := range hashes { - tx, ok := c.conns.GetTx(hash) + tx, ok := c.conns.PeekTx(hash) if !ok || tx == nil { continue } @@ -1056,8 +1056,8 @@ func (c *conn) handleGetPooledTransactions(msg ethp2p.Msg) error { c.countMsgReceived(request.Name(), float64(len(request.GetPooledTransactionsRequest))) - // Try to serve from cache using batch lookup (single lock operation) - txs := c.conns.GetTxs(request.GetPooledTransactionsRequest) + // Try to serve from cache using batch lookup (single read lock operation) + txs := c.conns.PeekTxs(request.GetPooledTransactionsRequest) response := ð.PooledTransactionsPacket{ RequestId: request.RequestId, From e647649e1cf3541ecb228256efe45bc62f6fd192 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 10:26:00 -0500 Subject: [PATCH 37/48] fix: lint --- p2p/datastructures/bloomset_test.go | 242 ++++++++++++++-------------- 1 file changed, 122 insertions(+), 120 deletions(-) diff --git a/p2p/datastructures/bloomset_test.go b/p2p/datastructures/bloomset_test.go index 23a386e25..f47c8ca4a 100644 --- a/p2p/datastructures/bloomset_test.go +++ b/p2p/datastructures/bloomset_test.go @@ -8,125 +8,153 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) -func TestBloomSet_AddAndContains(t *testing.T) { - b := NewBloomSet(DefaultBloomSetOptions()) +func TestBloomSet(t *testing.T) { + t.Run("AddAndContains", func(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) - hash1 := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") - hash2 := common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") + hash1 := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + hash2 := common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") - // Initially should not contain anything - if b.Contains(hash1) { - t.Error("expected hash1 not to be contained initially") - } + // Initially should not contain anything + if b.Contains(hash1) { + t.Error("expected hash1 not to be contained initially") + } - // Add hash1 - b.Add(hash1) + // Add hash1 + b.Add(hash1) - // Should contain hash1 - if !b.Contains(hash1) { - t.Error("expected hash1 to be contained after add") - } + // Should contain hash1 + if !b.Contains(hash1) { + t.Error("expected hash1 to be contained after add") + } - // Should not contain hash2 - if b.Contains(hash2) { - t.Error("expected hash2 not to be contained") - } -} + // Should not contain hash2 + if b.Contains(hash2) { + t.Error("expected hash2 not to be contained") + } + }) -func TestBloomSet_AddMany(t *testing.T) { - b := NewBloomSet(DefaultBloomSetOptions()) + t.Run("AddMany", func(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) - hashes := make([]common.Hash, 100) - for i := range hashes { - hashes[i] = common.BytesToHash([]byte{byte(i), byte(i + 1), byte(i + 2)}) - } + hashes := make([]common.Hash, 100) + for i := range hashes { + hashes[i] = common.BytesToHash([]byte{byte(i), byte(i + 1), byte(i + 2)}) + } - b.AddMany(hashes) + b.AddMany(hashes) - // All added hashes should be contained - for i, hash := range hashes { - if !b.Contains(hash) { - t.Errorf("expected hash %d to be contained", i) + // All added hashes should be contained + for i, hash := range hashes { + if !b.Contains(hash) { + t.Errorf("expected hash %d to be contained", i) + } } - } -} + }) -func TestBloomSet_FilterNotContained(t *testing.T) { - b := NewBloomSet(DefaultBloomSetOptions()) + t.Run("FilterNotContained", func(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) - // Add some hashes - known := []common.Hash{ - common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), - common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222"), - } - b.AddMany(known) + // Add some hashes + known := []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222"), + } + b.AddMany(known) - // Create a mixed list - unknown := []common.Hash{ - common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), - common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444"), - } - mixed := append(known, unknown...) + // Create a mixed list + unknown := []common.Hash{ + common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), + common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444"), + } + mixed := append(known, unknown...) - // Filter should return only unknown hashes - result := b.FilterNotContained(mixed) + // Filter should return only unknown hashes + result := b.FilterNotContained(mixed) - if len(result) != 2 { - t.Errorf("expected 2 unknown hashes, got %d", len(result)) - } + if len(result) != 2 { + t.Errorf("expected 2 unknown hashes, got %d", len(result)) + } - for _, h := range result { - if h == known[0] || h == known[1] { - t.Errorf("known hash %s should not be in result", h.Hex()) + for _, h := range result { + if h == known[0] || h == known[1] { + t.Errorf("known hash %s should not be in result", h.Hex()) + } } - } -} + }) -func TestBloomSet_Rotate(t *testing.T) { - b := NewBloomSet(DefaultBloomSetOptions()) + t.Run("Rotate", func(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) - hash1 := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") - hash2 := common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") + hash1 := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + hash2 := common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") - // Add hash1 to current - b.Add(hash1) + // Add hash1 to current + b.Add(hash1) - // Rotate - hash1 moves to previous - b.Rotate() + // Rotate - hash1 moves to previous + b.Rotate() - // hash1 should still be found (in previous) - if !b.Contains(hash1) { - t.Error("expected hash1 to be found after first rotation") - } + // hash1 should still be found (in previous) + if !b.Contains(hash1) { + t.Error("expected hash1 to be found after first rotation") + } - // Add hash2 to current - b.Add(hash2) + // Add hash2 to current + b.Add(hash2) - // Rotate again - hash1's filter is now cleared, hash2 moves to previous - b.Rotate() + // Rotate again - hash1's filter is now cleared, hash2 moves to previous + b.Rotate() - // hash1 should no longer be found - if b.Contains(hash1) { - t.Error("expected hash1 not to be found after second rotation") - } + // hash1 should no longer be found + if b.Contains(hash1) { + t.Error("expected hash1 not to be found after second rotation") + } - // hash2 should still be found - if !b.Contains(hash2) { - t.Error("expected hash2 to be found after second rotation") - } -} + // hash2 should still be found + if !b.Contains(hash2) { + t.Error("expected hash2 to be found after second rotation") + } + }) -func TestBloomSet_MemoryUsage(t *testing.T) { - opts := DefaultBloomSetOptions() - b := NewBloomSet(opts) + t.Run("MemoryUsage", func(t *testing.T) { + opts := DefaultBloomSetOptions() + b := NewBloomSet(opts) - usage := b.MemoryUsage() + usage := b.MemoryUsage() - // With default 327680 bits = 5120 words per filter * 8 bytes * 2 filters = 81920 bytes - expectedBytes := uint((327680+63)/64) * 8 * 2 - if usage != expectedBytes { - t.Errorf("expected memory usage %d bytes, got %d", expectedBytes, usage) - } + // With default 327680 bits = 5120 words per filter * 8 bytes * 2 filters = 81920 bytes + expectedBytes := uint((327680+63)/64) * 8 * 2 + if usage != expectedBytes { + t.Errorf("expected memory usage %d bytes, got %d", expectedBytes, usage) + } + }) + + t.Run("FalsePositiveRate", func(t *testing.T) { + // Test that false positive rate is approximately as expected + b := NewBloomSet(DefaultBloomSetOptions()) + + // Add 32768 unique hashes (the design capacity) + // Use keccak256 to generate properly distributed hashes + for i := uint64(0); i < 32768; i++ { + b.Add(generateTestHash(i)) + } + + // Test 10000 hashes that were NOT added (different seed range) + falsePositives := 0 + for i := uint64(100000); i < 110000; i++ { + if b.Contains(generateTestHash(i)) { + falsePositives++ + } + } + + // Expected ~1% false positive rate, allow up to 3% for statistical variance + rate := float64(falsePositives) / 10000.0 + if rate > 0.03 { + t.Errorf("false positive rate too high: %.2f%% (expected ~1%%)", rate*100) + } + t.Logf("False positive rate: %.2f%%", rate*100) + }) } // generateTestHash creates a deterministic hash from a seed using keccak256. @@ -136,33 +164,7 @@ func generateTestHash(seed uint64) common.Hash { return crypto.Keccak256Hash(buf[:]) } -func TestBloomSet_FalsePositiveRate(t *testing.T) { - // Test that false positive rate is approximately as expected - b := NewBloomSet(DefaultBloomSetOptions()) - - // Add 32768 unique hashes (the design capacity) - // Use keccak256 to generate properly distributed hashes - for i := uint64(0); i < 32768; i++ { - b.Add(generateTestHash(i)) - } - - // Test 10000 hashes that were NOT added (different seed range) - falsePositives := 0 - for i := uint64(100000); i < 110000; i++ { - if b.Contains(generateTestHash(i)) { - falsePositives++ - } - } - - // Expected ~1% false positive rate, allow up to 3% for statistical variance - rate := float64(falsePositives) / 10000.0 - if rate > 0.03 { - t.Errorf("false positive rate too high: %.2f%% (expected ~1%%)", rate*100) - } - t.Logf("False positive rate: %.2f%%", rate*100) -} - -func BenchmarkBloomSet_Add(b *testing.B) { +func BenchmarkBloomSetAdd(b *testing.B) { bloom := NewBloomSet(DefaultBloomSetOptions()) hash := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") @@ -171,7 +173,7 @@ func BenchmarkBloomSet_Add(b *testing.B) { } } -func BenchmarkBloomSet_Contains(b *testing.B) { +func BenchmarkBloomSetContains(b *testing.B) { bloom := NewBloomSet(DefaultBloomSetOptions()) hash := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") bloom.Add(hash) @@ -181,7 +183,7 @@ func BenchmarkBloomSet_Contains(b *testing.B) { } } -func BenchmarkBloomSet_FilterNotContained(b *testing.B) { +func BenchmarkBloomSetFilterNotContained(b *testing.B) { bloom := NewBloomSet(DefaultBloomSetOptions()) // Add 1000 hashes @@ -202,7 +204,7 @@ func BenchmarkBloomSet_FilterNotContained(b *testing.B) { } } -func BenchmarkLRU_FilterNotContained(b *testing.B) { +func BenchmarkLRUFilterNotContained(b *testing.B) { cache := NewLRU[common.Hash, struct{}](LRUOptions{MaxSize: 32768}) // Add 1000 hashes From ea8cd0fcea995b4a3e7c775bfa98e2b113748e0c Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 10:28:17 -0500 Subject: [PATCH 38/48] fix: lint --- p2p/datastructures/bloomset_test.go | 234 ++++++++++++++-------------- 1 file changed, 116 insertions(+), 118 deletions(-) diff --git a/p2p/datastructures/bloomset_test.go b/p2p/datastructures/bloomset_test.go index f47c8ca4a..0a173144d 100644 --- a/p2p/datastructures/bloomset_test.go +++ b/p2p/datastructures/bloomset_test.go @@ -8,153 +8,125 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) -func TestBloomSet(t *testing.T) { - t.Run("AddAndContains", func(t *testing.T) { - b := NewBloomSet(DefaultBloomSetOptions()) +func TestBloomSetAddAndContains(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) - hash1 := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") - hash2 := common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") + hash1 := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + hash2 := common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") - // Initially should not contain anything - if b.Contains(hash1) { - t.Error("expected hash1 not to be contained initially") - } + // Initially should not contain anything + if b.Contains(hash1) { + t.Error("expected hash1 not to be contained initially") + } - // Add hash1 - b.Add(hash1) + // Add hash1 + b.Add(hash1) - // Should contain hash1 - if !b.Contains(hash1) { - t.Error("expected hash1 to be contained after add") - } + // Should contain hash1 + if !b.Contains(hash1) { + t.Error("expected hash1 to be contained after add") + } - // Should not contain hash2 - if b.Contains(hash2) { - t.Error("expected hash2 not to be contained") - } - }) + // Should not contain hash2 + if b.Contains(hash2) { + t.Error("expected hash2 not to be contained") + } +} - t.Run("AddMany", func(t *testing.T) { - b := NewBloomSet(DefaultBloomSetOptions()) +func TestBloomSetAddMany(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) - hashes := make([]common.Hash, 100) - for i := range hashes { - hashes[i] = common.BytesToHash([]byte{byte(i), byte(i + 1), byte(i + 2)}) - } + hashes := make([]common.Hash, 100) + for i := range hashes { + hashes[i] = common.BytesToHash([]byte{byte(i), byte(i + 1), byte(i + 2)}) + } - b.AddMany(hashes) + b.AddMany(hashes) - // All added hashes should be contained - for i, hash := range hashes { - if !b.Contains(hash) { - t.Errorf("expected hash %d to be contained", i) - } + // All added hashes should be contained + for i, hash := range hashes { + if !b.Contains(hash) { + t.Errorf("expected hash %d to be contained", i) } - }) + } +} - t.Run("FilterNotContained", func(t *testing.T) { - b := NewBloomSet(DefaultBloomSetOptions()) +func TestBloomSetFilterNotContained(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) - // Add some hashes - known := []common.Hash{ - common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), - common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222"), - } - b.AddMany(known) + // Add some hashes + known := []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222"), + } + b.AddMany(known) - // Create a mixed list - unknown := []common.Hash{ - common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), - common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444"), - } - mixed := append(known, unknown...) + // Create a mixed list + unknown := []common.Hash{ + common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), + common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444"), + } + mixed := append(known, unknown...) - // Filter should return only unknown hashes - result := b.FilterNotContained(mixed) + // Filter should return only unknown hashes + result := b.FilterNotContained(mixed) - if len(result) != 2 { - t.Errorf("expected 2 unknown hashes, got %d", len(result)) - } + if len(result) != 2 { + t.Errorf("expected 2 unknown hashes, got %d", len(result)) + } - for _, h := range result { - if h == known[0] || h == known[1] { - t.Errorf("known hash %s should not be in result", h.Hex()) - } + for _, h := range result { + if h == known[0] || h == known[1] { + t.Errorf("known hash %s should not be in result", h.Hex()) } - }) - - t.Run("Rotate", func(t *testing.T) { - b := NewBloomSet(DefaultBloomSetOptions()) - - hash1 := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") - hash2 := common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") - - // Add hash1 to current - b.Add(hash1) - - // Rotate - hash1 moves to previous - b.Rotate() + } +} - // hash1 should still be found (in previous) - if !b.Contains(hash1) { - t.Error("expected hash1 to be found after first rotation") - } +func TestBloomSetRotate(t *testing.T) { + b := NewBloomSet(DefaultBloomSetOptions()) - // Add hash2 to current - b.Add(hash2) + hash1 := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + hash2 := common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") - // Rotate again - hash1's filter is now cleared, hash2 moves to previous - b.Rotate() + // Add hash1 to current + b.Add(hash1) - // hash1 should no longer be found - if b.Contains(hash1) { - t.Error("expected hash1 not to be found after second rotation") - } + // Rotate - hash1 moves to previous + b.Rotate() - // hash2 should still be found - if !b.Contains(hash2) { - t.Error("expected hash2 to be found after second rotation") - } - }) + // hash1 should still be found (in previous) + if !b.Contains(hash1) { + t.Error("expected hash1 to be found after first rotation") + } - t.Run("MemoryUsage", func(t *testing.T) { - opts := DefaultBloomSetOptions() - b := NewBloomSet(opts) + // Add hash2 to current + b.Add(hash2) - usage := b.MemoryUsage() + // Rotate again - hash1's filter is now cleared, hash2 moves to previous + b.Rotate() - // With default 327680 bits = 5120 words per filter * 8 bytes * 2 filters = 81920 bytes - expectedBytes := uint((327680+63)/64) * 8 * 2 - if usage != expectedBytes { - t.Errorf("expected memory usage %d bytes, got %d", expectedBytes, usage) - } - }) + // hash1 should no longer be found + if b.Contains(hash1) { + t.Error("expected hash1 not to be found after second rotation") + } - t.Run("FalsePositiveRate", func(t *testing.T) { - // Test that false positive rate is approximately as expected - b := NewBloomSet(DefaultBloomSetOptions()) + // hash2 should still be found + if !b.Contains(hash2) { + t.Error("expected hash2 to be found after second rotation") + } +} - // Add 32768 unique hashes (the design capacity) - // Use keccak256 to generate properly distributed hashes - for i := uint64(0); i < 32768; i++ { - b.Add(generateTestHash(i)) - } +func TestBloomSetMemoryUsage(t *testing.T) { + opts := DefaultBloomSetOptions() + b := NewBloomSet(opts) - // Test 10000 hashes that were NOT added (different seed range) - falsePositives := 0 - for i := uint64(100000); i < 110000; i++ { - if b.Contains(generateTestHash(i)) { - falsePositives++ - } - } + usage := b.MemoryUsage() - // Expected ~1% false positive rate, allow up to 3% for statistical variance - rate := float64(falsePositives) / 10000.0 - if rate > 0.03 { - t.Errorf("false positive rate too high: %.2f%% (expected ~1%%)", rate*100) - } - t.Logf("False positive rate: %.2f%%", rate*100) - }) + // With default 327680 bits = 5120 words per filter * 8 bytes * 2 filters = 81920 bytes + expectedBytes := uint((327680+63)/64) * 8 * 2 + if usage != expectedBytes { + t.Errorf("expected memory usage %d bytes, got %d", expectedBytes, usage) + } } // generateTestHash creates a deterministic hash from a seed using keccak256. @@ -164,6 +136,32 @@ func generateTestHash(seed uint64) common.Hash { return crypto.Keccak256Hash(buf[:]) } +func TestBloomSetFalsePositiveRate(t *testing.T) { + // Test that false positive rate is approximately as expected + b := NewBloomSet(DefaultBloomSetOptions()) + + // Add 32768 unique hashes (the design capacity) + // Use keccak256 to generate properly distributed hashes + for i := uint64(0); i < 32768; i++ { + b.Add(generateTestHash(i)) + } + + // Test 10000 hashes that were NOT added (different seed range) + falsePositives := 0 + for i := uint64(100000); i < 110000; i++ { + if b.Contains(generateTestHash(i)) { + falsePositives++ + } + } + + // Expected ~1% false positive rate, allow up to 3% for statistical variance + rate := float64(falsePositives) / 10000.0 + if rate > 0.03 { + t.Errorf("false positive rate too high: %.2f%% (expected ~1%%)", rate*100) + } + t.Logf("False positive rate: %.2f%%", rate*100) +} + func BenchmarkBloomSetAdd(b *testing.B) { bloom := NewBloomSet(DefaultBloomSetOptions()) hash := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") From 9c807afc966bda14e1fe1c7a6d5cd72e308b139d Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 12:39:35 -0500 Subject: [PATCH 39/48] fix: suggestions --- p2p/datastructures/bloomset_test.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/p2p/datastructures/bloomset_test.go b/p2p/datastructures/bloomset_test.go index 0a173144d..5682b3fec 100644 --- a/p2p/datastructures/bloomset_test.go +++ b/p2p/datastructures/bloomset_test.go @@ -142,7 +142,7 @@ func TestBloomSetFalsePositiveRate(t *testing.T) { // Add 32768 unique hashes (the design capacity) // Use keccak256 to generate properly distributed hashes - for i := uint64(0); i < 32768; i++ { + for i := range uint64(32768) { b.Add(generateTestHash(i)) } @@ -196,8 +196,7 @@ func BenchmarkBloomSetFilterNotContained(b *testing.B) { batch[i] = common.BytesToHash([]byte{byte(i >> 8), byte(i)}) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { bloom.FilterNotContained(batch) } } @@ -217,8 +216,7 @@ func BenchmarkLRUFilterNotContained(b *testing.B) { batch[i] = common.BytesToHash([]byte{byte(i >> 8), byte(i)}) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { cache.FilterNotContained(batch) } } From 65e07d1045800ce5f7337cfcc46564064577dcdc Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 12:55:58 -0500 Subject: [PATCH 40/48] fix: remove unused --- p2p/conns.go | 26 +++--- p2p/datastructures/bloomset_test.go | 19 ----- p2p/datastructures/lru.go | 122 ++-------------------------- p2p/protocol.go | 16 +--- 4 files changed, 23 insertions(+), 160 deletions(-) diff --git a/p2p/conns.go b/p2p/conns.go index a7f7d2138..525e6995a 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -317,20 +317,18 @@ func (c *Conns) PeerConnectedAt(peerID string) time.Time { return time.Time{} } -// AddTx adds a transaction to the shared cache for duplicate detection and serving. -func (c *Conns) AddTx(hash common.Hash, tx *types.Transaction) { - c.txs.Add(hash, tx) -} - -// GetTx retrieves a transaction from the shared cache and updates LRU ordering. -func (c *Conns) GetTx(hash common.Hash) (*types.Transaction, bool) { - return c.txs.Get(hash) -} - -// GetTxs retrieves multiple transactions from the shared cache in a single lock operation. -// Updates LRU ordering for found transactions. -func (c *Conns) GetTxs(hashes []common.Hash) []*types.Transaction { - return c.txs.GetMany(hashes) +// AddTxs adds multiple transactions to the shared cache in a single lock operation. +// Returns the computed hashes for reuse by the caller. +func (c *Conns) AddTxs(txs []*types.Transaction) []common.Hash { + if len(txs) == 0 { + return nil + } + hashes := make([]common.Hash, len(txs)) + for i, tx := range txs { + hashes[i] = tx.Hash() + } + c.txs.AddBatch(hashes, txs) + return hashes } // PeekTx retrieves a transaction from the shared cache without updating LRU ordering. diff --git a/p2p/datastructures/bloomset_test.go b/p2p/datastructures/bloomset_test.go index 5682b3fec..246b29add 100644 --- a/p2p/datastructures/bloomset_test.go +++ b/p2p/datastructures/bloomset_test.go @@ -201,22 +201,3 @@ func BenchmarkBloomSetFilterNotContained(b *testing.B) { } } -func BenchmarkLRUFilterNotContained(b *testing.B) { - cache := NewLRU[common.Hash, struct{}](LRUOptions{MaxSize: 32768}) - - // Add 1000 hashes - for i := range 1000 { - hash := common.BytesToHash([]byte{byte(i >> 8), byte(i)}) - cache.Add(hash, struct{}{}) - } - - // Create a batch of 100 hashes (mix of known and unknown) - batch := make([]common.Hash, 100) - for i := range batch { - batch[i] = common.BytesToHash([]byte{byte(i >> 8), byte(i)}) - } - - for b.Loop() { - cache.FilterNotContained(batch) - } -} diff --git a/p2p/datastructures/lru.go b/p2p/datastructures/lru.go index ec5157bef..aa3500267 100644 --- a/p2p/datastructures/lru.go +++ b/p2p/datastructures/lru.go @@ -100,41 +100,6 @@ func (c *LRU[K, V]) Get(key K) (V, bool) { return e.value, true } -// GetMany retrieves multiple values from the cache and updates LRU ordering. -// Uses a single write lock for all lookups, reducing lock contention compared -// to calling Get in a loop. Returns a slice of values for keys that were found. -func (c *LRU[K, V]) GetMany(keys []K) []V { - if len(keys) == 0 { - return nil - } - - c.mu.Lock() - defer c.mu.Unlock() - - now := time.Now() - result := make([]V, 0, len(keys)) - - for _, key := range keys { - elem, ok := c.items[key] - if !ok { - continue - } - - e := elem.Value.(*entry[K, V]) - - if e.expiresAt != nil && now.After(*e.expiresAt) { - c.list.Remove(elem) - delete(c.items, key) - continue - } - - c.list.MoveToFront(elem) - result = append(result, e.value) - } - - return result -} - // Peek retrieves a value from the cache without updating LRU ordering. // Uses a read lock for better concurrency. func (c *LRU[K, V]) Peek(key K) (V, bool) { @@ -241,26 +206,6 @@ func (c *LRU[K, V]) Update(key K, updateFn func(V) V) { } } -// Contains checks if a key exists in the cache and is not expired. -// Uses a read lock and doesn't update LRU ordering. -func (c *LRU[K, V]) Contains(key K) bool { - c.mu.RLock() - defer c.mu.RUnlock() - - elem, ok := c.items[key] - if !ok { - return false - } - - e := elem.Value.(*entry[K, V]) - - if e.expiresAt != nil && time.Now().After(*e.expiresAt) { - return false - } - - return true -} - // Remove removes a key from the cache and returns the value if it existed. func (c *LRU[K, V]) Remove(key K) (V, bool) { c.mu.Lock() @@ -277,66 +222,11 @@ func (c *LRU[K, V]) Remove(key K) (V, bool) { return zero, false } -// Len returns the number of items in the cache. -func (c *LRU[K, V]) Len() int { - c.mu.RLock() - defer c.mu.RUnlock() - return c.list.Len() -} - -// Purge clears all items from the cache. -func (c *LRU[K, V]) Purge() { - c.mu.Lock() - defer c.mu.Unlock() - - c.items = make(map[K]*list.Element) - c.list.Init() -} - -// Keys returns all keys in the cache. -func (c *LRU[K, V]) Keys() []K { - c.mu.RLock() - defer c.mu.RUnlock() - - keys := make([]K, 0, c.list.Len()) - for elem := c.list.Front(); elem != nil; elem = elem.Next() { - e := elem.Value.(*entry[K, V]) - keys = append(keys, e.key) - } - return keys -} - -// FilterNotContained returns the subset of keys that are not in the cache. -// Uses a single read lock for all lookups, reducing lock contention compared -// to calling Contains in a loop. -func (c *LRU[K, V]) FilterNotContained(keys []K) []K { - c.mu.RLock() - defer c.mu.RUnlock() - - now := time.Now() - result := make([]K, 0, len(keys)) - - for _, key := range keys { - elem, ok := c.items[key] - if !ok { - result = append(result, key) - continue - } - - e := elem.Value.(*entry[K, V]) - if e.expiresAt != nil && now.After(*e.expiresAt) { - result = append(result, key) - } - } - - return result -} - -// AddMany adds multiple keys with the same value to the cache. +// AddBatch adds multiple key-value pairs to the cache. // Uses a single write lock for all additions, reducing lock contention -// compared to calling Add in a loop. -func (c *LRU[K, V]) AddMany(keys []K, value V) { - if len(keys) == 0 { +// compared to calling Add in a loop. Keys and values must have the same length. +func (c *LRU[K, V]) AddBatch(keys []K, values []V) { + if len(keys) == 0 || len(keys) != len(values) { return } @@ -349,7 +239,9 @@ func (c *LRU[K, V]) AddMany(keys []K, value V) { expiresAt = &t } - for _, key := range keys { + for i, key := range keys { + value := values[i] + if elem, ok := c.items[key]; ok { c.list.MoveToFront(elem) e := elem.Value.(*entry[K, V]) diff --git a/p2p/protocol.go b/p2p/protocol.go index 27404803a..5f355eacb 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -833,12 +833,8 @@ func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { c.db.WriteTransactions(ctx, c.node, txs, tfs) } - // Cache transactions for duplicate detection and serving to peers - hashes := make([]common.Hash, len(txs)) - for i, tx := range txs { - c.conns.AddTx(tx.Hash(), tx) - hashes[i] = tx.Hash() - } + // Cache transactions for duplicate detection and serving to peers (single lock) + hashes := c.conns.AddTxs(txs) // Broadcast transactions or hashes to other peers asynchronously go c.conns.BroadcastTxs(types.Transactions(txs)) @@ -1123,12 +1119,8 @@ func (c *conn) handlePooledTransactions(ctx context.Context, msg ethp2p.Msg) err c.db.WriteTransactions(ctx, c.node, packet.PooledTransactionsResponse, tfs) } - // Cache transactions for duplicate detection and serving to peers - hashes := make([]common.Hash, len(packet.PooledTransactionsResponse)) - for i, tx := range packet.PooledTransactionsResponse { - c.conns.AddTx(tx.Hash(), tx) - hashes[i] = tx.Hash() - } + // Cache transactions for duplicate detection and serving to peers (single lock) + hashes := c.conns.AddTxs(packet.PooledTransactionsResponse) // Broadcast transactions or hashes to other peers asynchronously go c.conns.BroadcastTxs(types.Transactions(packet.PooledTransactionsResponse)) From 3b82419a4471f3f254a720c42091af7572cc0552 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 13:22:35 -0500 Subject: [PATCH 41/48] feat: peek many with keys --- p2p/conns.go | 12 ++++++------ p2p/datastructures/lru.go | 37 ++++++++++++++++++++++++++++++++++++- p2p/protocol.go | 27 ++++++++++----------------- 3 files changed, 52 insertions(+), 24 deletions(-) diff --git a/p2p/conns.go b/p2p/conns.go index 525e6995a..c5ed5833c 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -331,18 +331,18 @@ func (c *Conns) AddTxs(txs []*types.Transaction) []common.Hash { return hashes } -// PeekTx retrieves a transaction from the shared cache without updating LRU ordering. -// Uses a read lock for better concurrency when LRU ordering is not needed. -func (c *Conns) PeekTx(hash common.Hash) (*types.Transaction, bool) { - return c.txs.Peek(hash) -} - // PeekTxs retrieves multiple transactions from the shared cache without updating LRU ordering. // Uses a single read lock for better concurrency when LRU ordering is not needed. func (c *Conns) PeekTxs(hashes []common.Hash) []*types.Transaction { return c.txs.PeekMany(hashes) } +// PeekTxsWithHashes retrieves multiple transactions with their hashes from the cache. +// Returns parallel slices of found hashes and transactions. Uses a single read lock. +func (c *Conns) PeekTxsWithHashes(hashes []common.Hash) ([]common.Hash, []*types.Transaction) { + return c.txs.PeekManyWithKeys(hashes) +} + // Blocks returns the global blocks cache. func (c *Conns) Blocks() *ds.LRU[common.Hash, BlockCache] { return c.blocks diff --git a/p2p/datastructures/lru.go b/p2p/datastructures/lru.go index aa3500267..f39edd1ca 100644 --- a/p2p/datastructures/lru.go +++ b/p2p/datastructures/lru.go @@ -124,7 +124,8 @@ func (c *LRU[K, V]) Peek(key K) (V, bool) { // PeekMany retrieves multiple values from the cache without updating LRU ordering. // Uses a single read lock for all lookups, providing better concurrency than GetMany -// when LRU ordering updates are not needed. +// when LRU ordering updates are not needed. Returns only found values (indices don't +// correspond to input keys). Use PeekManyWithKeys if you need key-value pairs. func (c *LRU[K, V]) PeekMany(keys []K) []V { if len(keys) == 0 { return nil @@ -154,6 +155,40 @@ func (c *LRU[K, V]) PeekMany(keys []K) []V { return result } +// PeekManyWithKeys retrieves multiple key-value pairs from the cache without updating +// LRU ordering. Returns parallel slices of found keys and values. Uses a single read +// lock for all lookups. +func (c *LRU[K, V]) PeekManyWithKeys(keys []K) ([]K, []V) { + if len(keys) == 0 { + return nil, nil + } + + c.mu.RLock() + defer c.mu.RUnlock() + + now := time.Now() + foundKeys := make([]K, 0, len(keys)) + foundValues := make([]V, 0, len(keys)) + + for _, key := range keys { + elem, ok := c.items[key] + if !ok { + continue + } + + e := elem.Value.(*entry[K, V]) + + if e.expiresAt != nil && now.After(*e.expiresAt) { + continue + } + + foundKeys = append(foundKeys, key) + foundValues = append(foundValues, e.value) + } + + return foundKeys, foundValues +} + // Update atomically updates a value in the cache using the provided update function. // The update function receives the current value (or zero value if not found) and // returns the new value to store. This is thread-safe and prevents race conditions diff --git a/p2p/protocol.go b/p2p/protocol.go index 5f355eacb..7721d0373 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -673,28 +673,21 @@ func (c *conn) enqueueTxHashes(queue, hashes []common.Hash) []common.Hash { // It looks up each transaction from the cache to populate Types and Sizes // as required by the ETH68 protocol. func (c *conn) sendTxAnnouncements(hashes []common.Hash) error { - // Build packet with actual Types and Sizes from cached transactions. + // Batch lookup all transactions in a single lock operation. // Skip hashes where the transaction is no longer in cache. - var ( - pending []common.Hash - pendingTypes []byte - pendingSizes []uint32 - ) - - for _, hash := range hashes { - tx, ok := c.conns.PeekTx(hash) - if !ok || tx == nil { - continue - } - pending = append(pending, hash) - pendingTypes = append(pendingTypes, tx.Type()) - pendingSizes = append(pendingSizes, uint32(tx.Size())) - } - + pending, txs := c.conns.PeekTxsWithHashes(hashes) if len(pending) == 0 { return nil } + // Build Types and Sizes from the found transactions. + pendingTypes := make([]byte, len(txs)) + pendingSizes := make([]uint32, len(txs)) + for i, tx := range txs { + pendingTypes[i] = tx.Type() + pendingSizes[i] = uint32(tx.Size()) + } + packet := eth.NewPooledTransactionHashesPacket{ Types: pendingTypes, Sizes: pendingSizes, From 73bbd2608b5e381be1e73bd7a09c33e7cc636048 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 14:00:11 -0500 Subject: [PATCH 42/48] fix: missing functions --- p2p/conns.go | 5 +++++ p2p/datastructures/lru.go | 13 +++++++++++++ 2 files changed, 18 insertions(+) diff --git a/p2p/conns.go b/p2p/conns.go index 6d69d14ae..a3181368e 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -332,6 +332,11 @@ func (c *Conns) AddTxs(txs []*types.Transaction) []common.Hash { return hashes } +// GetTx retrieves a transaction from the shared cache and updates LRU ordering. +func (c *Conns) GetTx(hash common.Hash) (*types.Transaction, bool) { + return c.txs.Get(hash) +} + // PeekTxs retrieves multiple transactions from the shared cache without updating LRU ordering. // Uses a single read lock for better concurrency when LRU ordering is not needed. func (c *Conns) PeekTxs(hashes []common.Hash) []*types.Transaction { diff --git a/p2p/datastructures/lru.go b/p2p/datastructures/lru.go index f39edd1ca..d0730c1e4 100644 --- a/p2p/datastructures/lru.go +++ b/p2p/datastructures/lru.go @@ -257,6 +257,19 @@ func (c *LRU[K, V]) Remove(key K) (V, bool) { return zero, false } +// Keys returns all keys in the cache in LRU order (most recent first). +func (c *LRU[K, V]) Keys() []K { + c.mu.RLock() + defer c.mu.RUnlock() + + keys := make([]K, 0, c.list.Len()) + for elem := c.list.Front(); elem != nil; elem = elem.Next() { + e := elem.Value.(*entry[K, V]) + keys = append(keys, e.key) + } + return keys +} + // AddBatch adds multiple key-value pairs to the cache. // Uses a single write lock for all additions, reducing lock contention // compared to calling Add in a loop. Keys and values must have the same length. From 00de53875ff90b4825f6af707046e7a48fcbb241 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 17:33:36 -0500 Subject: [PATCH 43/48] fix: goroutine leak --- p2p/conns.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/p2p/conns.go b/p2p/conns.go index c5ed5833c..7690cd629 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -191,11 +191,14 @@ func (c *Conns) BroadcastTxHashes(hashes []common.Hash) int { count := 0 for _, cn := range peers { + // Non-blocking send, drop if queue full (matches Bor behavior) select { case cn.txAnnounce <- hashes: count++ case <-cn.closeCh: // Peer closing, skip + default: + // Channel full, skip to avoid goroutine leak } } From c46e2e8bb095d4cf4a7db9cc4f9826d953f4c477 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 18:09:29 -0500 Subject: [PATCH 44/48] fix: gas methods --- cmd/p2p/sensor/rpc.go | 75 ++++-------- p2p/conns.go | 71 ------------ p2p/gasprice.go | 257 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 280 insertions(+), 123 deletions(-) create mode 100644 p2p/gasprice.go diff --git a/cmd/p2p/sensor/rpc.go b/cmd/p2p/sensor/rpc.go index 3465a8373..e29c6d40b 100644 --- a/cmd/p2p/sensor/rpc.go +++ b/cmd/p2p/sensor/rpc.go @@ -46,6 +46,7 @@ type rpcError struct { func handleRPC(conns *p2p.Conns, networkID uint64) { // Use network ID as chain ID for signature validation chainID := new(big.Int).SetUint64(networkID) + gpo := p2p.NewGasPriceOracle(conns) mux := http.NewServeMux() mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { @@ -69,7 +70,7 @@ func handleRPC(conns *p2p.Conns, networkID uint64) { trimmed := strings.TrimSpace(string(body)) if len(trimmed) > 0 && trimmed[0] == '[' { // Handle batch request - handleBatchRequest(w, body, conns, chainID) + handleBatchRequest(w, body, conns, chainID, gpo) return } @@ -82,7 +83,7 @@ func handleRPC(conns *p2p.Conns, networkID uint64) { // Process request (reuse same logic as batch) var txs types.Transactions - resp := processRequest(req, conns, chainID, &txs) + resp := processRequest(req, conns, chainID, gpo, &txs) // Broadcast any transactions if len(txs) > 0 { @@ -124,7 +125,7 @@ func writeError(w http.ResponseWriter, code int, message string, id any) { // handleBatchRequest processes JSON-RPC 2.0 batch requests. // For eth_sendRawTransaction requests, it collects valid transactions for batch broadcasting. // Returns a batch response with results or errors for each request. -func handleBatchRequest(w http.ResponseWriter, body []byte, conns *p2p.Conns, chainID *big.Int) { +func handleBatchRequest(w http.ResponseWriter, body []byte, conns *p2p.Conns, chainID *big.Int, gpo *p2p.GasPriceOracle) { // Parse batch of requests var requests []rpcRequest if err := json.Unmarshal(body, &requests); err != nil { @@ -143,7 +144,7 @@ func handleBatchRequest(w http.ResponseWriter, body []byte, conns *p2p.Conns, ch txs := make(types.Transactions, 0) for _, req := range requests { - resp := processRequest(req, conns, chainID, &txs) + resp := processRequest(req, conns, chainID, gpo, &txs) responses = append(responses, resp) } @@ -181,7 +182,7 @@ func newErrorResponse(err *rpcError, id any) rpcResponse { // processRequest handles a single RPC request and returns a response. // For eth_sendRawTransaction, valid transactions are appended to txs for batch broadcasting. -func processRequest(req rpcRequest, conns *p2p.Conns, chainID *big.Int, txs *types.Transactions) rpcResponse { +func processRequest(req rpcRequest, conns *p2p.Conns, chainID *big.Int, gpo *p2p.GasPriceOracle, txs *types.Transactions) rpcResponse { switch req.Method { case "eth_sendRawTransaction": tx, resp := validateTx(req, chainID) @@ -202,7 +203,14 @@ func processRequest(req rpcRequest, conns *p2p.Conns, chainID *big.Int, txs *typ return newResultResponse(hexutil.EncodeUint64(head.Block.NumberU64()), req.ID) case "eth_gasPrice": - return newResultResponse(hexutil.EncodeBig(conns.SuggestGasPrice()), req.ID) + return newResultResponse(hexutil.EncodeBig(gpo.SuggestGasPrice()), req.ID) + + case "eth_maxPriorityFeePerGas": + tip := gpo.SuggestGasTipCap() + if tip == nil { + tip = big.NewInt(1e9) // Default to 1 gwei + } + return newResultResponse(hexutil.EncodeBig(tip), req.ID) case "eth_getBlockByHash": result, err := getBlockByHash(req, conns) @@ -245,72 +253,35 @@ func handleMethodResult(result any, err *rpcError, id any) rpcResponse { // transaction hex, unmarshaling it, and verifying the signature. Returns the transaction if valid // (with an empty response), or nil transaction with an error response if validation fails. func validateTx(req rpcRequest, chainID *big.Int) (*types.Transaction, rpcResponse) { - // Check params + invalidParams := func(msg string) rpcResponse { + return newErrorResponse(&rpcError{Code: -32602, Message: msg}, req.ID) + } + if len(req.Params) == 0 { - return nil, rpcResponse{ - JSONRPC: "2.0", - Error: &rpcError{ - Code: -32602, - Message: "Invalid params: missing raw transaction", - }, - ID: req.ID, - } + return nil, invalidParams("Invalid params: missing raw transaction") } - // Extract raw transaction hex string hex, ok := req.Params[0].(string) if !ok { - return nil, rpcResponse{ - JSONRPC: "2.0", - Error: &rpcError{ - Code: -32602, - Message: "Invalid params: raw transaction must be a hex string", - }, - ID: req.ID, - } + return nil, invalidParams("Invalid params: raw transaction must be a hex string") } - // Decode hex string to bytes bytes, err := hexutil.Decode(hex) if err != nil { - return nil, rpcResponse{ - JSONRPC: "2.0", - Error: &rpcError{ - Code: -32602, - Message: fmt.Sprintf("Invalid transaction hex: %v", err), - }, - ID: req.ID, - } + return nil, invalidParams(fmt.Sprintf("Invalid transaction hex: %v", err)) } - // Unmarshal transaction tx := new(types.Transaction) if err = tx.UnmarshalBinary(bytes); err != nil { - return nil, rpcResponse{ - JSONRPC: "2.0", - Error: &rpcError{ - Code: -32602, - Message: fmt.Sprintf("Invalid transaction encoding: %v", err), - }, - ID: req.ID, - } + return nil, invalidParams(fmt.Sprintf("Invalid transaction encoding: %v", err)) } - // Validate transaction signature signer := types.LatestSignerForChainID(chainID) sender, err := types.Sender(signer, tx) if err != nil { - return nil, rpcResponse{ - JSONRPC: "2.0", - Error: &rpcError{ - Code: -32602, - Message: fmt.Sprintf("Invalid transaction signature: %v", err), - }, - ID: req.ID, - } + return nil, invalidParams(fmt.Sprintf("Invalid transaction signature: %v", err)) } - // Log the transaction to := "nil" if tx.To() != nil { to = tx.To().Hex() diff --git a/p2p/conns.go b/p2p/conns.go index 9c187c67a..803c3e716 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -2,7 +2,6 @@ package p2p import ( "math/big" - "sort" "sync" "time" @@ -450,76 +449,6 @@ func (c *Conns) GetBlockByNumber(number uint64) (common.Hash, BlockCache, bool) return common.Hash{}, BlockCache{}, false } -// SuggestGasPrice estimates the gas price based on recent blocks in the cache. -// Follows a geth-style gas price oracle approach using the 60th percentile. -func (c *Conns) SuggestGasPrice() *big.Int { - defaultGasPrice := big.NewInt(1e9) // 1 gwei - - keys := c.blocks.Keys() - if len(keys) == 0 { - return defaultGasPrice - } - - // Collect effective gas prices from transactions in cached blocks - var prices []*big.Int - for _, hash := range keys { - cache, ok := c.blocks.Peek(hash) - if !ok || cache.Body == nil || cache.Header == nil { - continue - } - - baseFee := cache.Header.BaseFee - if baseFee == nil { - baseFee = big.NewInt(0) - } - - for _, tx := range cache.Body.Transactions { - price := calculateEffectiveGasPrice(tx, baseFee) - if price != nil && price.Sign() > 0 { - prices = append(prices, price) - } - } - } - - if len(prices) == 0 { - // Fallback to head block base fee + 1 gwei tip - head := c.HeadBlock() - if head.Block != nil && head.Block.BaseFee() != nil { - return new(big.Int).Add(head.Block.BaseFee(), big.NewInt(1e9)) - } - return defaultGasPrice - } - - // Sort and return 60th percentile (geth default) - sort.Slice(prices, func(i, j int) bool { - return prices[i].Cmp(prices[j]) < 0 - }) - return prices[len(prices)*60/100] -} - -// calculateEffectiveGasPrice returns the effective gas price for a transaction. -// For EIP-1559 transactions, this is min(maxFeePerGas, baseFee + maxPriorityFeePerGas). -// For legacy transactions, this is the gas price directly. -// Returns nil if the price cannot be determined. -func calculateEffectiveGasPrice(tx *types.Transaction, baseFee *big.Int) *big.Int { - if tx.Type() == types.DynamicFeeTxType { - tip := tx.GasTipCap() - if tip == nil { - return nil - } - effectiveGasPrice := new(big.Int).Add(baseFee, tip) - if tx.GasFeeCap() != nil && effectiveGasPrice.Cmp(tx.GasFeeCap()) > 0 { - return new(big.Int).Set(tx.GasFeeCap()) - } - return effectiveGasPrice - } - // Legacy transactions: use gas price directly - if price := tx.GasPrice(); price != nil { - return new(big.Int).Set(price) - } - return nil -} - // GetPeerVersion returns the negotiated eth protocol version for a specific peer. // Returns 0 if the peer is not found. func (c *Conns) GetPeerVersion(peerID string) uint { diff --git a/p2p/gasprice.go b/p2p/gasprice.go new file mode 100644 index 000000000..1bc28e4ac --- /dev/null +++ b/p2p/gasprice.go @@ -0,0 +1,257 @@ +package p2p + +import ( + "math/big" + "sort" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// Gas price oracle constants (matching Bor/geth defaults) +const ( + // gpoSampleNumber is the number of transactions to sample per block + gpoSampleNumber = 3 + // gpoCheckBlocks is the number of blocks to check for gas price estimation + gpoCheckBlocks = 20 + // gpoPercentile is the percentile to use for gas price estimation + gpoPercentile = 60 +) + +var ( + // gpoMaxPrice is the maximum gas price to suggest (500 gwei) + gpoMaxPrice = big.NewInt(500_000_000_000) + // gpoIgnorePrice is the minimum tip to consider (2 gwei, lower than Bor's 25 gwei for broader network compatibility) + gpoIgnorePrice = big.NewInt(2_000_000_000) + // gpoDefaultPrice is the default gas price when no data is available (1 gwei) + gpoDefaultPrice = big.NewInt(1_000_000_000) +) + +// GasPriceOracle estimates gas prices based on recent block data. +// It follows Bor/geth's gas price oracle approach. +type GasPriceOracle struct { + conns *Conns + + mu sync.RWMutex + lastHead common.Hash + lastPrice *big.Int + lastTip *big.Int +} + +// NewGasPriceOracle creates a new gas price oracle that uses the given Conns for block data. +func NewGasPriceOracle(conns *Conns) *GasPriceOracle { + return &GasPriceOracle{ + conns: conns, + } +} + +// SuggestGasPrice estimates the gas price based on recent blocks. +// For EIP-1559 networks, this returns baseFee + suggestedTip. +// For legacy networks, this returns the 60th percentile of gas prices. +func (o *GasPriceOracle) SuggestGasPrice() *big.Int { + head := o.conns.HeadBlock() + if head.Block == nil { + return gpoDefaultPrice + } + + // For EIP-1559: return baseFee + suggested tip + if baseFee := head.Block.BaseFee(); baseFee != nil { + tip := o.SuggestGasTipCap() + if tip == nil { + tip = gpoDefaultPrice + } + return new(big.Int).Add(baseFee, tip) + } + + // Legacy: return percentile of gas prices + return o.suggestLegacyGasPrice() +} + +// suggestLegacyGasPrice estimates gas price for pre-EIP-1559 networks. +func (o *GasPriceOracle) suggestLegacyGasPrice() *big.Int { + keys := o.conns.blocks.Keys() + if len(keys) == 0 { + return gpoDefaultPrice + } + + if len(keys) > gpoCheckBlocks { + keys = keys[:gpoCheckBlocks] + } + + var prices []*big.Int + for _, hash := range keys { + cache, ok := o.conns.blocks.Peek(hash) + if !ok || cache.Body == nil { + continue + } + + for _, tx := range cache.Body.Transactions { + if price := tx.GasPrice(); price != nil && price.Sign() > 0 { + prices = append(prices, new(big.Int).Set(price)) + } + } + } + + if len(prices) == 0 { + return gpoDefaultPrice + } + + sort.Slice(prices, func(i, j int) bool { + return prices[i].Cmp(prices[j]) < 0 + }) + + price := prices[(len(prices)-1)*gpoPercentile/100] + if price.Cmp(gpoMaxPrice) > 0 { + return new(big.Int).Set(gpoMaxPrice) + } + return price +} + +// SuggestGasTipCap estimates a gas tip cap (priority fee) based on recent blocks. +// This implementation follows Bor/geth's gas price oracle approach: +// - Samples the lowest N tips from each of the last M blocks +// - Ignores tips below a threshold +// - Returns the configured percentile of collected tips +// - Caches results until head changes +func (o *GasPriceOracle) SuggestGasTipCap() *big.Int { + head := o.conns.HeadBlock() + if head.Block == nil { + return nil + } + headHash := head.Block.Hash() + + // Check cache first + o.mu.RLock() + if headHash == o.lastHead && o.lastTip != nil { + tip := new(big.Int).Set(o.lastTip) + o.mu.RUnlock() + return tip + } + lastTip := o.lastTip + o.mu.RUnlock() + + // Collect tips from recent blocks + keys := o.conns.blocks.Keys() + if len(keys) == 0 { + return lastTip + } + + // Limit to checkBlocks most recent + if len(keys) > gpoCheckBlocks { + keys = keys[:gpoCheckBlocks] + } + + var results []*big.Int + for _, hash := range keys { + tips := o.getBlockTips(hash, gpoSampleNumber, gpoIgnorePrice) + if len(tips) == 0 && lastTip != nil { + // Empty block or all tips below threshold, use last tip + tips = []*big.Int{lastTip} + } + results = append(results, tips...) + } + + if len(results) == 0 { + return lastTip + } + + // Sort and get percentile + sort.Slice(results, func(i, j int) bool { + return results[i].Cmp(results[j]) < 0 + }) + tip := results[(len(results)-1)*gpoPercentile/100] + + // Apply max price cap + if tip.Cmp(gpoMaxPrice) > 0 { + tip = new(big.Int).Set(gpoMaxPrice) + } + + // Cache result + o.mu.Lock() + o.lastHead = headHash + o.lastTip = tip + o.mu.Unlock() + + return new(big.Int).Set(tip) +} + +// getBlockTips returns the lowest N tips from a block that are above the ignore threshold. +// Transactions are sorted by effective tip ascending, and the first N valid tips are returned. +func (o *GasPriceOracle) getBlockTips(hash common.Hash, limit int, ignoreUnder *big.Int) []*big.Int { + cache, ok := o.conns.blocks.Peek(hash) + if !ok || cache.Body == nil || cache.Header == nil { + return nil + } + + baseFee := cache.Header.BaseFee + if baseFee == nil { + return nil // Pre-EIP-1559 block + } + + // Calculate tips for all transactions + var allTips []*big.Int + for _, tx := range cache.Body.Transactions { + tip := effectiveGasTip(tx, baseFee) + if tip != nil && tip.Sign() > 0 { + allTips = append(allTips, tip) + } + } + + if len(allTips) == 0 { + return nil + } + + // Sort by tip ascending (lowest first, like Bor) + sort.Slice(allTips, func(i, j int) bool { + return allTips[i].Cmp(allTips[j]) < 0 + }) + + // Collect tips above threshold, up to limit + var tips []*big.Int + for _, tip := range allTips { + if ignoreUnder != nil && tip.Cmp(ignoreUnder) < 0 { + continue + } + tips = append(tips, tip) + if len(tips) >= limit { + break + } + } + + return tips +} + +// effectiveGasTip returns the effective tip (priority fee) for a transaction. +// For EIP-1559 transactions: min(maxPriorityFeePerGas, maxFeePerGas - baseFee) +// For legacy transactions: gasPrice - baseFee (the implicit tip) +// Returns nil if the tip cannot be determined or is negative. +func effectiveGasTip(tx *types.Transaction, baseFee *big.Int) *big.Int { + switch tx.Type() { + case types.DynamicFeeTxType, types.BlobTxType: + tip := tx.GasTipCap() + if tip == nil { + return nil + } + // Effective tip is min(maxPriorityFeePerGas, maxFeePerGas - baseFee) + if tx.GasFeeCap() != nil { + effectiveTip := new(big.Int).Sub(tx.GasFeeCap(), baseFee) + if effectiveTip.Cmp(tip) < 0 { + tip = effectiveTip + } + } + if tip.Sign() <= 0 { + return nil + } + return new(big.Int).Set(tip) + default: + // Legacy/AccessList transactions: tip is gasPrice - baseFee + if price := tx.GasPrice(); price != nil { + tip := new(big.Int).Sub(price, baseFee) + if tip.Sign() > 0 { + return tip + } + } + return nil + } +} From a48a0feb5b40d7238dc8e3dcbcc5ec5b9903db0e Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 18:19:12 -0500 Subject: [PATCH 45/48] fix: cleanup --- p2p/log.go | 12 ++++++++---- p2p/nodeset.go | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/p2p/log.go b/p2p/log.go index bfb26b8ee..4ba2caba1 100644 --- a/p2p/log.go +++ b/p2p/log.go @@ -89,6 +89,7 @@ func (c *MessageCount) IsEmpty() bool { return sum( c.BlockHeaders, c.BlockBodies, + c.Blocks, c.BlockHashes, c.BlockHeaderRequests, c.BlockBodiesRequests, @@ -98,16 +99,19 @@ func (c *MessageCount) IsEmpty() bool { c.Pings, c.Errors, c.Disconnects, + c.NewWitness, + c.NewWitnessHashes, + c.GetWitnessRequest, + c.Witness, ) == 0 } func sum(ints ...int64) int64 { - var sum int64 = 0 + var total int64 for _, i := range ints { - sum += i + total += i } - - return sum + return total } // IncrementByName increments the appropriate field based on message name. diff --git a/p2p/nodeset.go b/p2p/nodeset.go index 9572f8538..aa25870f3 100644 --- a/p2p/nodeset.go +++ b/p2p/nodeset.go @@ -97,7 +97,7 @@ func WriteURLs(file string, ns NodeSet) error { } } - urls := []string{} + var urls []string for url := range m { urls = append(urls, url) } @@ -130,7 +130,7 @@ func WritePeers(file string, urls []string) error { } func WriteDNSTreeNodes(file string, tree *dnsdisc.Tree) error { - urls := []string{} + var urls []string for _, node := range tree.Nodes() { urls = append(urls, node.URLv4()) } From 73a6c0aaea1968720096fabf23728e10500fefe2 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 22:10:41 -0500 Subject: [PATCH 46/48] fix: add space --- p2p/conns.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/p2p/conns.go b/p2p/conns.go index 196054131..803c3e716 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -338,6 +338,7 @@ func (c *Conns) AddTxs(txs []*types.Transaction) []common.Hash { func (c *Conns) GetTx(hash common.Hash) (*types.Transaction, bool) { return c.txs.Get(hash) } + // PeekTxs retrieves multiple transactions from the shared cache without updating LRU ordering. // Uses a single read lock for better concurrency when LRU ordering is not needed. func (c *Conns) PeekTxs(hashes []common.Hash) []*types.Transaction { @@ -447,6 +448,7 @@ func (c *Conns) GetBlockByNumber(number uint64) (common.Hash, BlockCache, bool) } return common.Hash{}, BlockCache{}, false } + // GetPeerVersion returns the negotiated eth protocol version for a specific peer. // Returns 0 if the peer is not found. func (c *Conns) GetPeerVersion(peerID string) uint { From b33b1626f0d6221ba099b75bc11f148e49e24445 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 3 Mar 2026 22:11:08 -0500 Subject: [PATCH 47/48] fix: add space in lru --- p2p/datastructures/lru.go | 1 + 1 file changed, 1 insertion(+) diff --git a/p2p/datastructures/lru.go b/p2p/datastructures/lru.go index dd08d2713..d0730c1e4 100644 --- a/p2p/datastructures/lru.go +++ b/p2p/datastructures/lru.go @@ -269,6 +269,7 @@ func (c *LRU[K, V]) Keys() []K { } return keys } + // AddBatch adds multiple key-value pairs to the cache. // Uses a single write lock for all additions, reducing lock contention // compared to calling Add in a loop. Keys and values must have the same length. From e58a58f3b7b3e670a5b86d1a13ea9920eb1f6a57 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Wed, 4 Mar 2026 13:02:44 -0500 Subject: [PATCH 48/48] fix: lint --- p2p/gasprice.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/p2p/gasprice.go b/p2p/gasprice.go index 1bc28e4ac..b7c349068 100644 --- a/p2p/gasprice.go +++ b/p2p/gasprice.go @@ -33,10 +33,9 @@ var ( type GasPriceOracle struct { conns *Conns - mu sync.RWMutex - lastHead common.Hash - lastPrice *big.Int - lastTip *big.Int + mu sync.RWMutex + lastHead common.Hash + lastTip *big.Int } // NewGasPriceOracle creates a new gas price oracle that uses the given Conns for block data.