diff --git a/block/internal/cache/generic_cache.go b/block/internal/cache/generic_cache.go index dc3e1b7d14..03b8042d03 100644 --- a/block/internal/cache/generic_cache.go +++ b/block/internal/cache/generic_cache.go @@ -113,6 +113,13 @@ func (c *Cache) setSeenBatch(hashes []string, height uint64) { } } +func (c *Cache) getHashByHeight(height uint64) (string, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + h, ok := c.hashByHeight[height] + return h, ok +} + func (c *Cache) getDAIncluded(hash string) (uint64, bool) { c.mu.RLock() defer c.mu.RUnlock() diff --git a/block/internal/cache/generic_cache_test.go b/block/internal/cache/generic_cache_test.go index ec4e92e7f7..d756681acd 100644 --- a/block/internal/cache/generic_cache_test.go +++ b/block/internal/cache/generic_cache_test.go @@ -391,3 +391,26 @@ func TestCache_DeleteAllForHeight_CleansHashAndDA(t *testing.T) { _, ok = c.getDAIncludedByHeight(2) assert.True(t, ok) } + +func TestCache_getHashByHeight(t *testing.T) { + c := NewCache(nil, "") + + h, ok := c.getHashByHeight(42) + assert.False(t, ok) + assert.Empty(t, h) + + c.setSeen("abc", 42) + h, ok = c.getHashByHeight(42) + assert.True(t, ok) + assert.Equal(t, "abc", h) + + // setDAIncluded also maintains hashByHeight. + c.setDAIncluded("def", 7, 100) + h, ok = c.getHashByHeight(100) + assert.True(t, ok) + assert.Equal(t, "def", h) + + c.deleteAllForHeight(42) + _, ok = c.getHashByHeight(42) + assert.False(t, ok) +} diff --git a/block/internal/cache/manager.go b/block/internal/cache/manager.go index 4d95a7d7e5..2a19f7104c 100644 --- a/block/internal/cache/manager.go +++ b/block/internal/cache/manager.go @@ -38,11 +38,17 @@ type CacheManager interface { // Header operations IsHeaderSeen(hash string) bool SetHeaderSeen(hash string, blockHeight uint64) + GetHeaderHashByHeight(blockHeight uint64) (string, bool) GetHeaderDAIncludedByHash(hash string) (uint64, bool) GetHeaderDAIncludedByHeight(blockHeight uint64) (uint64, bool) SetHeaderDAIncluded(hash string, daHeight uint64, blockHeight uint64) RemoveHeaderDAIncluded(hash string) + // Pending signed header operations (in-flight, pre-persistence) + SetPendingSignedHeader(h *types.SignedHeader, source string) + GetPendingSignedHeader(blockHeight uint64) (*types.SignedHeader, string, bool) + RemovePendingSignedHeader(blockHeight uint64) + // Data operations IsDataSeen(hash string) bool SetDataSeen(hash string, blockHeight uint64) @@ -92,17 +98,24 @@ type Manager interface { var _ Manager = (*implementation)(nil) type implementation struct { - headerCache *Cache - dataCache *Cache - txCache *Cache - txTimestamps *sync.Map // map[string]time.Time - pendingEvents map[uint64]*common.DAHeightEvent - pendingMu sync.Mutex - pendingHeaders *PendingHeaders - pendingData *PendingData - store store.Store - config config.Config - logger zerolog.Logger + headerCache *Cache + dataCache *Cache + txCache *Cache + txTimestamps *sync.Map // map[string]time.Time + pendingEvents map[uint64]*common.DAHeightEvent + pendingMu sync.Mutex + pendingHeaders *PendingHeaders + pendingData *PendingData + pendingSignedHeaders map[uint64]pendingSignedHeader + pendingSignedHeadersMu sync.RWMutex + store store.Store + config config.Config + logger zerolog.Logger +} + +type pendingSignedHeader struct { + header *types.SignedHeader + source string } // NewManager creates a new Manager, restoring or clearing persisted state as configured. @@ -122,16 +135,17 @@ func NewManager(cfg config.Config, st store.Store, logger zerolog.Logger) (Manag } impl := &implementation{ - headerCache: headerCache, - dataCache: dataCache, - txCache: txCache, - txTimestamps: new(sync.Map), - pendingEvents: make(map[uint64]*common.DAHeightEvent), - pendingHeaders: pendingHeaders, - pendingData: pendingData, - store: st, - config: cfg, - logger: logger, + headerCache: headerCache, + dataCache: dataCache, + txCache: txCache, + txTimestamps: new(sync.Map), + pendingEvents: make(map[uint64]*common.DAHeightEvent), + pendingHeaders: pendingHeaders, + pendingData: pendingData, + pendingSignedHeaders: make(map[uint64]pendingSignedHeader), + store: st, + config: cfg, + logger: logger, } if cfg.ClearCache { @@ -157,6 +171,11 @@ func (m *implementation) SetHeaderSeen(hash string, blockHeight uint64) { m.headerCache.setSeen(hash, blockHeight) } +// GetHeaderHashByHeight returns the first-seen header hash at the given height. +func (m *implementation) GetHeaderHashByHeight(blockHeight uint64) (string, bool) { + return m.headerCache.getHashByHeight(blockHeight) +} + func (m *implementation) GetHeaderDAIncludedByHash(hash string) (uint64, bool) { return m.headerCache.getDAIncluded(hash) } @@ -173,6 +192,42 @@ func (m *implementation) RemoveHeaderDAIncluded(hash string) { m.headerCache.removeDAIncluded(hash) } +// SetPendingSignedHeader records the first SignedHeader seen at this height. +// First-write-wins: later writes at the same height are ignored so the +// double-sign detector can match alternates against the original observation. +func (m *implementation) SetPendingSignedHeader(h *types.SignedHeader, source string) { + if h == nil { + return + } + height := h.Height() + m.pendingSignedHeadersMu.Lock() + defer m.pendingSignedHeadersMu.Unlock() + if _, exists := m.pendingSignedHeaders[height]; exists { + return + } + m.pendingSignedHeaders[height] = pendingSignedHeader{header: h, source: source} +} + +// GetPendingSignedHeader returns the first-seen SignedHeader and the source +// ("da" or "p2p") it was observed from. +func (m *implementation) GetPendingSignedHeader(blockHeight uint64) (*types.SignedHeader, string, bool) { + m.pendingSignedHeadersMu.RLock() + defer m.pendingSignedHeadersMu.RUnlock() + entry, ok := m.pendingSignedHeaders[blockHeight] + if !ok { + return nil, "", false + } + return entry.header, entry.source, true +} + +// RemovePendingSignedHeader evicts the entry once the height is persisted, so +// the store becomes the authoritative source for double-sign comparison. +func (m *implementation) RemovePendingSignedHeader(blockHeight uint64) { + m.pendingSignedHeadersMu.Lock() + delete(m.pendingSignedHeaders, blockHeight) + m.pendingSignedHeadersMu.Unlock() +} + // DaHeight returns the highest DA height seen across header and data caches. func (m *implementation) DaHeight() uint64 { return max(m.headerCache.daHeight(), m.dataCache.daHeight()) @@ -263,6 +318,7 @@ func (m *implementation) DeleteHeight(blockHeight uint64) { m.pendingMu.Lock() delete(m.pendingEvents, blockHeight) m.pendingMu.Unlock() + m.RemovePendingSignedHeader(blockHeight) // Note: txCache is intentionally NOT deleted here because: // 1. Transactions are tracked by hash, not by block height (they use height 0) @@ -408,6 +464,9 @@ func (m *implementation) ClearFromStore() error { m.dataCache = NewCache(m.store, DataDAIncludedPrefix) m.txCache = NewCache(nil, "") m.pendingEvents = make(map[uint64]*common.DAHeightEvent) + m.pendingSignedHeadersMu.Lock() + m.pendingSignedHeaders = make(map[uint64]pendingSignedHeader) + m.pendingSignedHeadersMu.Unlock() // Initialize DA height from store metadata to ensure DaHeight() is never 0. m.initDAHeightFromStore(ctx) diff --git a/block/internal/cache/manager_test.go b/block/internal/cache/manager_test.go index fa5aebf34b..2222ac6e6a 100644 --- a/block/internal/cache/manager_test.go +++ b/block/internal/cache/manager_test.go @@ -3,6 +3,8 @@ package cache import ( "context" "encoding/binary" + "sync" + "sync/atomic" "testing" "time" @@ -518,6 +520,205 @@ func TestManager_DaHeightAfterCacheClear(t *testing.T) { "DaHeight should be seeded from finalized-tip metadata even after ClearCache") } +// builds a minimal SignedHeader; variant differentiates hashes at the same height. +func signedHeaderForHeight(height uint64, variant byte) *types.SignedHeader { + return &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ChainID: "pending-signed", Height: height, Time: 1}, + AppHash: []byte{variant, variant, variant}, + }, + } +} + +func TestManager_PendingSignedHeader_FirstWriteWins(t *testing.T) { + t.Parallel() + cfg := tempConfig(t) + st := testMemStore(t) + + m, err := NewManager(cfg, st, zerolog.Nop()) + require.NoError(t, err) + + first := signedHeaderForHeight(5, 0x01) + second := signedHeaderForHeight(5, 0x02) + require.NotEqual(t, first.Hash().String(), second.Hash().String()) + + m.SetPendingSignedHeader(first, "p2p") + m.SetPendingSignedHeader(second, "da") + + got, source, ok := m.GetPendingSignedHeader(5) + require.True(t, ok) + require.Equal(t, first.Hash().String(), got.Hash().String()) + require.Equal(t, "p2p", source) +} + +func TestManager_PendingSignedHeader_NilHeaderIgnored(t *testing.T) { + t.Parallel() + cfg := tempConfig(t) + st := testMemStore(t) + + m, err := NewManager(cfg, st, zerolog.Nop()) + require.NoError(t, err) + + m.SetPendingSignedHeader(nil, "p2p") + _, _, ok := m.GetPendingSignedHeader(5) + require.False(t, ok) + + real := signedHeaderForHeight(5, 0x01) + m.SetPendingSignedHeader(real, "p2p") + got, _, ok := m.GetPendingSignedHeader(5) + require.True(t, ok) + require.Equal(t, real.Hash().String(), got.Hash().String()) +} + +func TestManager_GetPendingSignedHeader_Miss(t *testing.T) { + t.Parallel() + cfg := tempConfig(t) + st := testMemStore(t) + + m, err := NewManager(cfg, st, zerolog.Nop()) + require.NoError(t, err) + + hdr, source, ok := m.GetPendingSignedHeader(99) + require.False(t, ok) + require.Nil(t, hdr) + require.Empty(t, source) +} + +func TestManager_RemovePendingSignedHeader_Idempotent(t *testing.T) { + t.Parallel() + cfg := tempConfig(t) + st := testMemStore(t) + + m, err := NewManager(cfg, st, zerolog.Nop()) + require.NoError(t, err) + + require.NotPanics(t, func() { m.RemovePendingSignedHeader(123) }) + + hdr := signedHeaderForHeight(5, 0x01) + m.SetPendingSignedHeader(hdr, "p2p") + m.RemovePendingSignedHeader(5) + m.RemovePendingSignedHeader(5) + _, _, ok := m.GetPendingSignedHeader(5) + require.False(t, ok) +} + +func TestManager_DeleteHeight_EvictsPendingSignedHeader(t *testing.T) { + t.Parallel() + cfg := tempConfig(t) + st := testMemStore(t) + + m, err := NewManager(cfg, st, zerolog.Nop()) + require.NoError(t, err) + + hdr := signedHeaderForHeight(5, 0x01) + m.SetPendingSignedHeader(hdr, "p2p") + _, _, ok := m.GetPendingSignedHeader(5) + require.True(t, ok) + + m.DeleteHeight(5) + + _, _, ok = m.GetPendingSignedHeader(5) + require.False(t, ok) +} + +func TestManager_ClearFromStore_ResetsPendingSignedHeaders(t *testing.T) { + t.Parallel() + cfg := tempConfig(t) + st := testMemStore(t) + + m, err := NewManager(cfg, st, zerolog.Nop()) + require.NoError(t, err) + + m.SetPendingSignedHeader(signedHeaderForHeight(5, 0x01), "p2p") + m.SetPendingSignedHeader(signedHeaderForHeight(6, 0x02), "da") + + impl, ok := m.(*implementation) + require.True(t, ok) + require.NoError(t, impl.ClearFromStore()) + + for _, h := range []uint64{5, 6} { + _, _, present := m.GetPendingSignedHeader(h) + require.False(t, present, "pending entry at %d must be cleared", h) + } +} + +// Race-detector coverage for the pending-signed-header map. Run with -race. +func TestManager_PendingSignedHeader_Concurrency(t *testing.T) { + t.Parallel() + cfg := tempConfig(t) + st := testMemStore(t) + + m, err := NewManager(cfg, st, zerolog.Nop()) + require.NoError(t, err) + + const ( + writers = 8 + readers = 8 + removers = 4 + heightsPerRun = 200 + ) + + headers := make([]*types.SignedHeader, heightsPerRun) + for i := range headers { + headers[i] = signedHeaderForHeight(uint64(i+1), byte(i&0xff)) + } + + var ( + wg sync.WaitGroup + startCh = make(chan struct{}) + writerHit atomic.Int64 + readerHit atomic.Int64 + ) + + for w := range writers { + wg.Add(1) + go func(seed int) { + defer wg.Done() + <-startCh + for i := range heightsPerRun { + idx := (seed*7 + i) % heightsPerRun + m.SetPendingSignedHeader(headers[idx], "p2p") + writerHit.Add(1) + } + }(w) + } + + for r := range readers { + wg.Add(1) + go func(seed int) { + defer wg.Done() + <-startCh + for i := range heightsPerRun { + h := uint64((seed*11+i)%heightsPerRun + 1) + _, _, _ = m.GetPendingSignedHeader(h) + readerHit.Add(1) + } + }(r) + } + + for d := range removers { + wg.Add(1) + go func(seed int) { + defer wg.Done() + <-startCh + for i := range heightsPerRun { + h := uint64((seed*13+i)%heightsPerRun + 1) + if i%2 == 0 { + m.RemovePendingSignedHeader(h) + } else { + m.DeleteHeight(h) + } + } + }(d) + } + + close(startCh) + wg.Wait() + + require.Equal(t, int64(writers*heightsPerRun), writerHit.Load()) + require.Equal(t, int64(readers*heightsPerRun), readerHit.Load()) +} + func TestManager_DaHeightFromStoreOnRestore(t *testing.T) { t.Parallel() diff --git a/block/internal/common/metrics.go b/block/internal/common/metrics.go index 179157eb7e..a817af4be0 100644 --- a/block/internal/common/metrics.go +++ b/block/internal/common/metrics.go @@ -68,6 +68,9 @@ type Metrics struct { ForcedInclusionTxsInGracePeriod metrics.Gauge // Number of forced inclusion txs currently in grace period ForcedInclusionTxsMalicious metrics.Counter // Total number of forced inclusion txs marked as malicious + // Double-sign detection + DoubleSignsDetected metrics.Counter // Distinct (height, alternate-hash) pairs observed + // Syncer metrics BlocksSynchronized map[EventSource]metrics.Counter // Blocks synchronized by source (P2P or DA) } @@ -189,6 +192,13 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Help: "Total number of forced inclusion transactions marked as malicious (past grace boundary)", }, labels).With(labelsAndValues...) + m.DoubleSignsDetected = prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "double_signs_detected_total", + Help: "Total number of distinct (height, alternate-hash) double-sign events observed", + }, labels).With(labelsAndValues...) + // DA Submitter metrics m.DASubmitterPendingBlobs = prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, @@ -269,6 +279,9 @@ func NopMetrics() *Metrics { ForcedInclusionTxsInGracePeriod: discard.NewGauge(), ForcedInclusionTxsMalicious: discard.NewCounter(), + // Double-sign detection + DoubleSignsDetected: discard.NewCounter(), + // Syncer metrics BlocksSynchronized: make(map[EventSource]metrics.Counter), } diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index d4fa93ce04..ca488e07b7 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -15,6 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/da" datypes "github.com/evstack/ev-node/pkg/da/types" "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/types" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) @@ -34,6 +35,8 @@ type daRetriever struct { cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger + store store.Store + onDoubleSign doubleSignHandler // nil disables detection; the retriever aborts the batch on a positive mu sync.Mutex // transient cache, only full event need to be passed to the syncer @@ -46,18 +49,23 @@ type daRetriever struct { strictMode bool } -// NewDARetriever creates a new DA retriever +// NewDARetriever creates a new DA retriever. Double-sign detection is disabled +// when st or onDoubleSign is nil. func NewDARetriever( client da.Client, cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, + st store.Store, + onDoubleSign doubleSignHandler, ) *daRetriever { return &daRetriever{ client: client, cache: cache, genesis: genesis, logger: logger.With().Str("component", "da_retriever").Logger(), + store: st, + onDoubleSign: onDoubleSign, pendingHeaders: make(map[uint64]*types.SignedHeader), pendingData: make(map[uint64]*types.Data), strictMode: false, @@ -172,9 +180,18 @@ func (r *daRetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight } if header := r.tryDecodeHeader(bz, daHeight); header != nil { + // Catches both in-batch alternates and alternates of already-persisted heights. + if r.store != nil && r.onDoubleSign != nil { + if ev, err := detectDoubleSign(ctx, r.store, r.cache, header, types.EvidenceSourceDA); err == nil && ev != nil { + r.onDoubleSign(ctx, ev) + return nil + } else if err != nil { + r.logger.Warn().Err(err).Uint64("height", header.Height()).Msg("double-sign detection error") + } + r.cache.SetPendingSignedHeader(header, types.EvidenceSourceDA) + } + if _, ok := r.pendingHeaders[header.Height()]; ok { - // a (malicious) node may have re-published valid header to another da height (should never happen) - // we can already discard it, only the first one is valid r.logger.Debug().Uint64("height", header.Height()).Uint64("da_height", daHeight).Msg("header blob already exists for height, discarding") continue } @@ -304,6 +321,15 @@ func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH return nil } + // Precondition for the double-sign detector: a forged blob must never + // reach the pending cache or be persisted as equivocation evidence. + // Required even in strict envelope mode — the inner SignedHeader + // signature is a separate commitment from the envelope signature. + if err := header.ValidateBasic(); err != nil { + r.logger.Debug().Err(err).Msg("signed header failed validation") + return nil + } + if isValidEnvelope && !r.strictMode { r.logger.Info().Uint64("height", header.Height()).Msg("valid DA envelope detected, switching to STRICT MODE") r.strictMode = true diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 3b587def1f..fe7718eed6 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -52,7 +52,7 @@ func newTestDARetriever(t *testing.T, mockClient *mocks.MockClient, cfg config.C mockClient.On("GetForcedInclusionNamespace").Return([]byte(nil)).Maybe() mockClient.On("HasForcedInclusionNamespace").Return(false).Maybe() - return NewDARetriever(mockClient, cm, gen, zerolog.Nop()) + return NewDARetriever(mockClient, cm, gen, zerolog.Nop(), nil, nil) } // makeSignedDataBytes builds SignedData containing the provided Data and returns its binary encoding diff --git a/block/internal/syncing/doublesign.go b/block/internal/syncing/doublesign.go new file mode 100644 index 0000000000..3cdcb21965 --- /dev/null +++ b/block/internal/syncing/doublesign.go @@ -0,0 +1,179 @@ +package syncing + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/rs/zerolog" + + "github.com/evstack/ev-node/block/internal/cache" + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/pkg/store" + "github.com/evstack/ev-node/types" +) + +// ErrDoubleSign is returned when two validly-signed SignedHeaders are observed at the same height. +var ErrDoubleSign = errors.New("double-sign detected") + +// doubleSignHandler is fired when an equivocation is confirmed. It persists +// evidence, bumps metrics, and halts the syncer. +type doubleSignHandler func(ctx context.Context, evidence *types.DoubleSignEvidence) + +// detectDoubleSign compares incoming against the first-seen SignedHeader at +// the same height (cache then store) and returns non-nil evidence when their +// hashes differ. Caller must verify proposer + signature first. +func detectDoubleSign( + ctx context.Context, + st store.Store, + cm cache.CacheManager, + incoming *types.SignedHeader, + incomingSource string, +) (*types.DoubleSignEvidence, error) { + if incoming == nil { + return nil, errors.New("incoming header is nil") + } + height := incoming.Height() + + // Cache wins over store: the cached entry is the literal first observation + // and carries the original FirstSource. + if cached, source, ok := cm.GetPendingSignedHeader(height); ok { + return buildEvidenceFromPair(cached, incoming, source, incomingSource), nil + } + + storedHeader, storeErr := st.GetHeader(ctx, height) + if storeErr != nil { + if store.IsNotFound(storeErr) { + return nil, nil + } + return nil, fmt.Errorf("lookup stored header at %d: %w", height, storeErr) + } + if storedHeader == nil { + return nil, nil + } + return buildEvidenceFromPair(storedHeader, incoming, types.EvidenceSourceStored, incomingSource), nil +} + +// buildEvidenceFromPair returns evidence for two SignedHeaders at the same +// height with different hashes and matching proposer. Returns nil otherwise. +func buildEvidenceFromPair(first, alternate *types.SignedHeader, firstSource, altSource string) *types.DoubleSignEvidence { + if first == nil || alternate == nil { + return nil + } + if first.Height() != alternate.Height() { + return nil + } + if bytes.Equal(first.Hash(), alternate.Hash()) { + return nil + } + if !bytes.Equal(first.ProposerAddress, alternate.ProposerAddress) { + return nil + } + return &types.DoubleSignEvidence{ + Height: first.Height(), + FirstHeader: first, + AlternateHeader: alternate, + DetectedAt: time.Now().UTC(), + FirstSource: firstSource, + AlternateSource: altSource, + } +} + +// persistEvidence writes evidence to its canonical metadata key. Idempotent. +func persistEvidence(ctx context.Context, st store.Store, ev *types.DoubleSignEvidence) error { + if err := ev.ValidateBasic(); err != nil { + return fmt.Errorf("invalid evidence: %w", err) + } + blob, err := ev.MarshalBinary() + if err != nil { + return fmt.Errorf("marshal evidence: %w", err) + } + altHash := ev.AlternateHeader.Hash() + key := store.GetDoubleSignEvidenceKey(ev.Height, altHash) + if err := st.SetMetadata(ctx, key, blob); err != nil { + return fmt.Errorf("persist evidence at %s: %w", key, err) + } + return nil +} + +// reportDoubleSign persists evidence, logs, bumps the metric (once per +// distinct alternate hash via seen), fires criticalErr, and returns the +// wrapped ErrDoubleSign for the caller to propagate as the halt cause. +func reportDoubleSign( + ctx context.Context, + st store.Store, + metrics *common.Metrics, + logger zerolog.Logger, + seen *doubleSignDedup, + criticalErr func(error), + ev *types.DoubleSignEvidence, +) error { + altHashStr := ev.AlternateHeader.Hash().String() + firstHashStr := ev.FirstHeader.Hash().String() + key := store.GetDoubleSignEvidenceKey(ev.Height, ev.AlternateHeader.Hash()) + + // Persist on every call: idempotent, and a retry covers a transient + // failure on the first attempt. + persistErr := persistEvidence(ctx, st, ev) + + if seen != nil && !seen.markSeen(ev.Height, altHashStr) { + return nil + } + + if persistErr != nil { + logger.Error().Err(persistErr). + Uint64("height", ev.Height). + Str("first_hash", firstHashStr). + Str("alternate_hash", altHashStr). + Msg("failed to persist double-sign evidence") + } + + if metrics != nil && metrics.DoubleSignsDetected != nil { + metrics.DoubleSignsDetected.Add(1) + } + + logger.Error(). + Uint64("height", ev.Height). + Str("first_hash", firstHashStr). + Str("first_source", ev.FirstSource). + Str("alternate_hash", altHashStr). + Str("alternate_source", ev.AlternateSource). + Str("evidence_key", key). + Msg("DOUBLE-SIGN DETECTED — sequencer equivocation; halting syncer") + + halt := fmt.Errorf( + "double-sign detected at height %d: sequencer signed conflicting headers %s and %s. "+ + "Evidence persisted at metadata key %s. Manual intervention required: %w", + ev.Height, firstHashStr, altHashStr, key, ErrDoubleSign, + ) + if criticalErr != nil { + criticalErr(halt) + } + return halt +} + +// doubleSignDedup collapses (height, altHash) duplicates so the same +// equivocation arriving from both P2P and DA is only reported once. +type doubleSignDedup struct { + mu sync.Mutex + seen map[string]struct{} +} + +func newDoubleSignDedup() *doubleSignDedup { + return &doubleSignDedup{seen: make(map[string]struct{})} +} + +// markSeen records (height, altHash) and returns true on first sight. +func (d *doubleSignDedup) markSeen(height uint64, altHash string) bool { + key := fmt.Sprintf("%d/%s", height, altHash) + d.mu.Lock() + defer d.mu.Unlock() + if _, ok := d.seen[key]; ok { + return false + } + d.seen[key] = struct{}{} + return true +} diff --git a/block/internal/syncing/doublesign_branches_test.go b/block/internal/syncing/doublesign_branches_test.go new file mode 100644 index 0000000000..ec99bfb3ac --- /dev/null +++ b/block/internal/syncing/doublesign_branches_test.go @@ -0,0 +1,359 @@ +package syncing + +import ( + "context" + "errors" + "sync/atomic" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/block/internal/da" + "github.com/evstack/ev-node/pkg/store" + testmocks "github.com/evstack/ev-node/test/mocks" + "github.com/evstack/ev-node/types" + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +// DA client stub with shared namespace mocks. +func newMockDAClient(t *testing.T) da.Client { + t.Helper() + c := testmocks.NewMockClient(t) + c.On("GetHeaderNamespace").Return([]byte("ns")).Maybe() + c.On("GetDataNamespace").Return([]byte("ns")).Maybe() + return c +} + +// errStore wraps a store and injects errors on selected reads/writes to +// exercise error-handling branches an in-memory store can't hit. +type errStore struct { + store.Store + getHeaderErr error + setMetadataErr error +} + +func (e *errStore) GetHeader(ctx context.Context, height uint64) (*types.SignedHeader, error) { + if e.getHeaderErr != nil { + return nil, e.getHeaderErr + } + return e.Store.GetHeader(ctx, height) +} + +func (e *errStore) SetMetadata(ctx context.Context, key string, value []byte) error { + if e.setMetadataErr != nil { + return e.setMetadataErr + } + return e.Store.SetMetadata(ctx, key, value) +} + +// nilHeaderStore returns (nil, nil) from GetHeader; the detector must treat +// that as "no record" rather than crashing. +type nilHeaderStore struct{ store.Store } + +func (nilHeaderStore) GetHeader(context.Context, uint64) (*types.SignedHeader, error) { + return nil, nil +} + +func TestDetectDoubleSign_NilIncomingReturnsError(t *testing.T) { + env := newDSTestEnv(t) + ev, err := detectDoubleSign(context.Background(), env.store, env.cache, nil, types.EvidenceSourceP2P) + require.Error(t, err) + require.Nil(t, ev) +} + +// A non-NotFound store failure must be surfaced, not swallowed. +func TestDetectDoubleSign_StoreErrorWrapped(t *testing.T) { + env := newDSTestEnv(t) + wrapped := &errStore{Store: env.store, getHeaderErr: errors.New("backend down")} + + alt := env.signHeaderAtHeight(5, 0x01) + ev, err := detectDoubleSign(context.Background(), wrapped, env.cache, alt, types.EvidenceSourceP2P) + require.Error(t, err) + require.Nil(t, ev) + require.Contains(t, err.Error(), "lookup stored header") + require.ErrorContains(t, err, "backend down") +} + +func TestDetectDoubleSign_StoredHeaderNilDefensive(t *testing.T) { + env := newDSTestEnv(t) + wrapped := nilHeaderStore{Store: env.store} + + alt := env.signHeaderAtHeight(5, 0x01) + ev, err := detectDoubleSign(context.Background(), wrapped, env.cache, alt, types.EvidenceSourceP2P) + require.NoError(t, err) + require.Nil(t, ev) +} + +// Store-path detections must use the "stored" sentinel as FirstSource so +// downstream consumers can disambiguate it from in-flight observations. +func TestDetectDoubleSign_FirstSourceStoredSentinel(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + env.saveHeader(first) + + alt := env.signHeaderAtHeight(5, 0x02) + ev, err := detectDoubleSign(context.Background(), env.store, env.cache, alt, types.EvidenceSourceP2P) + require.NoError(t, err) + require.NotNil(t, ev) + require.Equal(t, types.EvidenceSourceStored, ev.FirstSource) +} + +// SetMetadata failures must include the canonical key so an operator can +// recover the persistence target from logs alone. +func TestPersistEvidence_StoreError(t *testing.T) { + env := newDSTestEnv(t) + wrapped := &errStore{Store: env.store, setMetadataErr: errors.New("disk full")} + + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + ev := buildEvidenceFromPair(first, alt, types.EvidenceSourceP2P, types.EvidenceSourceDA) + require.NotNil(t, ev) + + err := persistEvidence(context.Background(), wrapped, ev) + require.Error(t, err) + require.ErrorContains(t, err, "disk full") + require.Contains(t, err.Error(), store.GetDoubleSignEvidenceKey(ev.Height, ev.AlternateHeader.Hash())) +} + +// Persistence failure must not break the halt contract: metric still +// increments, criticalErr still fires, returned error still wraps ErrDoubleSign. +func TestReportDoubleSign_PersistFailureLoggedNotBlocking(t *testing.T) { + env := newDSTestEnv(t) + wrapped := &errStore{Store: env.store, setMetadataErr: errors.New("disk full")} + + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + ev := buildEvidenceFromPair(first, alt, types.EvidenceSourceP2P, types.EvidenceSourceDA) + require.NotNil(t, ev) + + var dsCount atomic.Int64 + metrics := common.NopMetrics() + metrics.DoubleSignsDetected = &counterCtr{n: &dsCount} + + var fired atomic.Pointer[error] + crit := func(err error) { fired.Store(&err) } + + halt := reportDoubleSign(context.Background(), wrapped, metrics, zerolog.Nop(), + newDoubleSignDedup(), crit, ev) + require.Error(t, halt) + require.ErrorIs(t, halt, ErrDoubleSign) + + require.Equal(t, int64(1), dsCount.Load()) + require.NotNil(t, fired.Load()) +} + +// Dedup is keyed on (height, altHash), so two distinct alts at the same +// height must each produce evidence. +func TestReportDoubleSign_TwoDistinctAltsAtSameHeight(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + alt1 := env.signHeaderAtHeight(5, 0x02) + alt2 := env.signHeaderAtHeight(5, 0x03) + require.NotEqual(t, alt1.Hash().String(), alt2.Hash().String()) + + ev1 := buildEvidenceFromPair(first, alt1, types.EvidenceSourceP2P, types.EvidenceSourceDA) + ev2 := buildEvidenceFromPair(first, alt2, types.EvidenceSourceP2P, types.EvidenceSourceDA) + require.NotNil(t, ev1) + require.NotNil(t, ev2) + + var dsCount atomic.Int64 + metrics := common.NopMetrics() + metrics.DoubleSignsDetected = &counterCtr{n: &dsCount} + + seen := newDoubleSignDedup() + noopCrit := func(error) {} + + require.Error(t, reportDoubleSign(context.Background(), env.store, metrics, + zerolog.Nop(), seen, noopCrit, ev1)) + require.Error(t, reportDoubleSign(context.Background(), env.store, metrics, + zerolog.Nop(), seen, noopCrit, ev2)) + + require.Equal(t, int64(2), dsCount.Load()) + + for _, ev := range []*types.DoubleSignEvidence{ev1, ev2} { + key := store.GetDoubleSignEvidenceKey(ev.Height, ev.AlternateHeader.Hash()) + blob, err := env.store.GetMetadata(context.Background(), key) + require.NoError(t, err) + require.NotEmpty(t, blob) + } +} + +func TestReportDoubleSign_NilSeenAndNilGuards(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + ev := buildEvidenceFromPair(first, alt, types.EvidenceSourceP2P, types.EvidenceSourceDA) + require.NotNil(t, ev) + + t.Run("nil seen still halts", func(t *testing.T) { + halt := reportDoubleSign(context.Background(), env.store, common.NopMetrics(), + zerolog.Nop(), nil, func(error) {}, ev) + require.Error(t, halt) + require.ErrorIs(t, halt, ErrDoubleSign) + }) + + t.Run("nil metrics still halts", func(t *testing.T) { + halt := reportDoubleSign(context.Background(), env.store, nil, + zerolog.Nop(), newDoubleSignDedup(), func(error) {}, ev) + require.Error(t, halt) + require.ErrorIs(t, halt, ErrDoubleSign) + }) + + t.Run("nil counter inside metrics still halts", func(t *testing.T) { + m := common.NopMetrics() + m.DoubleSignsDetected = nil + halt := reportDoubleSign(context.Background(), env.store, m, + zerolog.Nop(), newDoubleSignDedup(), func(error) {}, ev) + require.Error(t, halt) + require.ErrorIs(t, halt, ErrDoubleSign) + }) + + t.Run("nil criticalErr still halts", func(t *testing.T) { + halt := reportDoubleSign(context.Background(), env.store, common.NopMetrics(), + zerolog.Nop(), newDoubleSignDedup(), nil, ev) + require.Error(t, halt) + require.ErrorIs(t, halt, ErrDoubleSign) + }) +} + +func TestDoubleSignEvidence_FromProtoNil(t *testing.T) { + dst := new(types.DoubleSignEvidence) + require.Error(t, dst.FromProto(nil)) +} + +func TestDoubleSignEvidence_FromProtoInnerHeaderError(t *testing.T) { + // Both inner SignedHeader fields nil — the wrapper must surface the error. + p := &pb.DoubleSignEvidence{Height: 5} + dst := new(types.DoubleSignEvidence) + require.Error(t, dst.FromProto(p)) +} + +// FromProto must reject partial-nil sub-messages (one set, one nil) to keep +// the (FirstHeader, AlternateHeader) pair invariant after deserialization. +func TestDoubleSignEvidence_FromProtoPartialNilHeader(t *testing.T) { + env := newDSTestEnv(t) + hdr := env.signHeaderAtHeight(5, 0x01) + hdrPB, err := hdr.ToProto() + require.NoError(t, err) + + t.Run("alternate nil", func(t *testing.T) { + dst := new(types.DoubleSignEvidence) + require.Error(t, dst.FromProto(&pb.DoubleSignEvidence{Height: 5, FirstHeader: hdrPB})) + }) + t.Run("first nil", func(t *testing.T) { + dst := new(types.DoubleSignEvidence) + require.Error(t, dst.FromProto(&pb.DoubleSignEvidence{Height: 5, AlternateHeader: hdrPB})) + }) +} + +func TestDoubleSignEvidence_UnmarshalBinaryGarbage(t *testing.T) { + dst := new(types.DoubleSignEvidence) + require.Error(t, dst.UnmarshalBinary([]byte{0xff, 0xff, 0xff, 0xff})) +} + +// Once equivocation is detected, the rest of the batch must be dropped. +func TestDARetriever_AbortsBatchOnDetection(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + next := env.signHeaderAtHeight(6, 0x01) + + firstBin, err := first.MarshalBinary() + require.NoError(t, err) + altBin, err := alt.MarshalBinary() + require.NoError(t, err) + nextBin, err := next.MarshalBinary() + require.NoError(t, err) + + mockClient := newMockDAClient(t) + + r := NewDARetriever(mockClient, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + + events := r.ProcessBlobs(context.Background(), + [][]byte{firstBin, altBin, nextBin}, 100) + require.Empty(t, events) + require.NotContains(t, r.pendingHeaders, uint64(6)) +} + +// On a detector error, the retriever still caches the header so a later +// alternate can be matched once the store recovers. +func TestDARetriever_DetectorErrorWarnAndContinue(t *testing.T) { + env := newDSTestEnv(t) + wrapped := &errStore{Store: env.store, getHeaderErr: errors.New("flapping disk")} + + hdr := env.signHeaderAtHeight(5, 0x01) + bin, err := hdr.MarshalBinary() + require.NoError(t, err) + + mockClient := newMockDAClient(t) + + r := NewDARetriever(mockClient, env.cache, env.gen, zerolog.Nop(), wrapped, env.onDouble) + + _ = r.ProcessBlobs(context.Background(), [][]byte{bin}, 100) + + require.Empty(t, env.captured()) + + got, src, ok := env.cache.GetPendingSignedHeader(hdr.Height()) + require.True(t, ok) + require.Equal(t, hdr.Hash().String(), got.Hash().String()) + require.Equal(t, types.EvidenceSourceDA, src) +} + +// Double-sign detection through the envelope (strict-mode) path. +func TestDARetriever_StrictModeEnvelopeDoubleSign(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + + mkEnvelope := func(h *types.SignedHeader) []byte { + content, err := h.MarshalBinary() + require.NoError(t, err) + envSig, err := env.signer.Sign(t.Context(), content) + require.NoError(t, err) + envBin, err := h.MarshalDAEnvelope(envSig) + require.NoError(t, err) + return envBin + } + + firstBin := mkEnvelope(first) + altBin := mkEnvelope(alt) + + mockClient := newMockDAClient(t) + + r := NewDARetriever(mockClient, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + + events := r.ProcessBlobs(context.Background(), [][]byte{firstBin, altBin}, 100) + require.Empty(t, events) + require.True(t, r.strictMode) + + captured := env.captured() + require.Len(t, captured, 1) + require.Equal(t, uint64(5), captured[0].Height) + require.Equal(t, types.EvidenceSourceDA, captured[0].FirstSource) + require.Equal(t, types.EvidenceSourceDA, captured[0].AlternateSource) +} + +// First DA observation must populate the pending cache so a later cross-source +// alternate can be matched against it. +func TestDARetriever_SetsPendingSignedHeaderOnFirstObservation(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + bin, err := first.MarshalBinary() + require.NoError(t, err) + + mockClient := newMockDAClient(t) + + r := NewDARetriever(mockClient, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + _ = r.ProcessBlobs(context.Background(), [][]byte{bin}, 100) + + got, src, ok := env.cache.GetPendingSignedHeader(5) + require.True(t, ok) + require.Equal(t, first.Hash().String(), got.Hash().String()) + require.Equal(t, types.EvidenceSourceDA, src) +} + diff --git a/block/internal/syncing/doublesign_test.go b/block/internal/syncing/doublesign_test.go new file mode 100644 index 0000000000..14916c7377 --- /dev/null +++ b/block/internal/syncing/doublesign_test.go @@ -0,0 +1,676 @@ +package syncing + +import ( + "context" + "sync/atomic" + "testing" + "time" + + gkmetrics "github.com/go-kit/kit/metrics" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + "github.com/evstack/ev-node/block/internal/cache" + "github.com/evstack/ev-node/block/internal/common" + "github.com/evstack/ev-node/pkg/config" + "github.com/evstack/ev-node/pkg/genesis" + signerpkg "github.com/evstack/ev-node/pkg/signer" + "github.com/evstack/ev-node/pkg/store" + testmocks "github.com/evstack/ev-node/test/mocks" + extmocks "github.com/evstack/ev-node/test/mocks/external" + "github.com/evstack/ev-node/types" + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +// dsTestEnv bundles the store, cache, genesis and signer used by the +// double-sign tests. +type dsTestEnv struct { + t *testing.T + store store.Store + cache cache.CacheManager + gen genesis.Genesis + addr []byte + pub crypto.PubKey + signer signerpkg.Signer + chainID string + capLock atomic.Pointer[[]*types.DoubleSignEvidence] + onDouble doubleSignHandler +} + +func newDSTestEnv(t *testing.T) *dsTestEnv { + t.Helper() + memDS := dssync.MutexWrap(ds.NewMapDatastore()) + st := store.New(memDS) + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ + ChainID: "ds-test", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: addr, + } + + env := &dsTestEnv{ + t: t, + store: st, + cache: cm, + gen: gen, + addr: addr, + pub: pub, + signer: signer, + chainID: gen.ChainID, + } + empty := []*types.DoubleSignEvidence{} + env.capLock.Store(&empty) + env.onDouble = func(ctx context.Context, ev *types.DoubleSignEvidence) { + require.NoError(t, ev.ValidateBasic()) + for { + cur := env.capLock.Load() + next := append([]*types.DoubleSignEvidence(nil), *cur...) + next = append(next, ev) + if env.capLock.CompareAndSwap(cur, &next) { + break + } + } + // Persist immediately so tests can verify round-trip decoding. + require.NoError(t, persistEvidence(ctx, st, ev)) + } + return env +} + +func (e *dsTestEnv) captured() []*types.DoubleSignEvidence { + return *e.capLock.Load() +} + +// signs a header at height by the genesis proposer; variant differentiates hashes. +func (e *dsTestEnv) signHeaderAtHeight(height uint64, variant byte) *types.SignedHeader { + e.t.Helper() + _, hdr := makeSignedHeaderBytes( + e.t, e.chainID, height, e.addr, e.pub, e.signer, + []byte{variant, variant, variant}, + nil, + nil, + ) + return hdr +} + +// signs a header by a fresh (non-genesis) signer. +func (e *dsTestEnv) signHeaderWithOtherProposer(height uint64, variant byte) *types.SignedHeader { + e.t.Helper() + otherAddr, otherPub, otherSigner := buildSyncTestSigner(e.t) + _, hdr := makeSignedHeaderBytes( + e.t, e.chainID, height, otherAddr, otherPub, otherSigner, + []byte{variant, variant, variant}, + nil, + nil, + ) + return hdr +} + +func (e *dsTestEnv) saveHeader(hdr *types.SignedHeader) { + e.t.Helper() + batch, err := e.store.NewBatch(context.Background()) + require.NoError(e.t, err) + require.NoError(e.t, batch.SaveBlockData(hdr, &types.Data{ + Metadata: &types.Metadata{ChainID: e.chainID, Height: hdr.Height(), Time: hdr.BaseHeader.Time}, + }, &hdr.Signature)) + require.NoError(e.t, batch.SetHeight(hdr.Height())) + require.NoError(e.t, batch.Commit()) +} + +func TestDetectDoubleSign_TwoValidHeadersSameHeight(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + env.saveHeader(first) + + alt := env.signHeaderAtHeight(5, 0x02) + require.NotEqual(t, first.Hash().String(), alt.Hash().String()) + + ev, err := detectDoubleSign(context.Background(), env.store, env.cache, alt, types.EvidenceSourceP2P) + require.NoError(t, err) + require.NotNil(t, ev) + require.Equal(t, uint64(5), ev.Height) + require.Equal(t, first.Hash().String(), ev.FirstHeader.Hash().String()) + require.Equal(t, alt.Hash().String(), ev.AlternateHeader.Hash().String()) + require.Equal(t, types.EvidenceSourceP2P, ev.AlternateSource) + + // Round-trip through marshal/unmarshal. + blob, err := ev.MarshalBinary() + require.NoError(t, err) + decoded := new(types.DoubleSignEvidence) + require.NoError(t, decoded.UnmarshalBinary(blob)) + require.Equal(t, ev.Height, decoded.Height) + require.Equal(t, ev.FirstHeader.Hash().String(), decoded.FirstHeader.Hash().String()) + require.Equal(t, ev.AlternateHeader.Hash().String(), decoded.AlternateHeader.Hash().String()) + require.Equal(t, ev.FirstSource, decoded.FirstSource) + require.Equal(t, ev.AlternateSource, decoded.AlternateSource) +} + +func TestDetectDoubleSign_IdenticalHashNoEvidence(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + env.saveHeader(first) + + ev, err := detectDoubleSign(context.Background(), env.store, env.cache, first, types.EvidenceSourceP2P) + require.NoError(t, err) + require.Nil(t, ev) +} + +func TestDetectDoubleSign_NoPriorRecordReturnsNil(t *testing.T) { + env := newDSTestEnv(t) + alt := env.signHeaderAtHeight(5, 0x01) + ev, err := detectDoubleSign(context.Background(), env.store, env.cache, alt, types.EvidenceSourceP2P) + require.NoError(t, err) + require.Nil(t, ev) +} + +func TestBuildEvidenceFromPair_ProposerMismatch(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderWithOtherProposer(5, 0x02) + require.Nil(t, buildEvidenceFromPair(first, alt, types.EvidenceSourceDA, types.EvidenceSourceDA)) +} + +func TestBuildEvidenceFromPair_HappyPath(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + ev := buildEvidenceFromPair(first, alt, types.EvidenceSourceDA, types.EvidenceSourceDA) + require.NotNil(t, ev) + require.NoError(t, ev.ValidateBasic()) +} + +func TestBuildEvidenceFromPair_EdgeCases(t *testing.T) { + env := newDSTestEnv(t) + a := env.signHeaderAtHeight(5, 0x01) + b := env.signHeaderAtHeight(6, 0x02) + + require.Nil(t, buildEvidenceFromPair(nil, a, types.EvidenceSourceDA, types.EvidenceSourceDA)) + require.Nil(t, buildEvidenceFromPair(a, nil, types.EvidenceSourceDA, types.EvidenceSourceDA)) + require.Nil(t, buildEvidenceFromPair(a, b, types.EvidenceSourceDA, types.EvidenceSourceDA)) + require.Nil(t, buildEvidenceFromPair(a, a, types.EvidenceSourceDA, types.EvidenceSourceDA)) +} + +func TestPersistEvidence_RejectsInvalid(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + bad := &types.DoubleSignEvidence{ + Height: 5, + FirstHeader: first, + AlternateHeader: first, + } + require.Error(t, persistEvidence(context.Background(), env.store, bad)) +} + +func TestDoubleSignDedup(t *testing.T) { + d := newDoubleSignDedup() + require.True(t, d.markSeen(7, "abc")) + require.False(t, d.markSeen(7, "abc")) + require.True(t, d.markSeen(7, "def")) + require.True(t, d.markSeen(8, "abc")) +} + +func TestReportDoubleSign_PersistsAndHalts(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + ev := buildEvidenceFromPair(first, alt, types.EvidenceSourceP2P, types.EvidenceSourceDA) + require.NotNil(t, ev) + + metrics := common.NopMetrics() + seen := newDoubleSignDedup() + var halted atomic.Pointer[error] + crit := func(err error) { halted.Store(&err) } + + halt1 := reportDoubleSign(context.Background(), env.store, metrics, zerolog.Nop(), seen, crit, ev) + require.Error(t, halt1) + + // Second call must be a no-op via dedup. + halted.Store(nil) + halt2 := reportDoubleSign(context.Background(), env.store, metrics, zerolog.Nop(), seen, crit, ev) + require.NoError(t, halt2) + require.Nil(t, halted.Load()) + + key := store.GetDoubleSignEvidenceKey(ev.Height, ev.AlternateHeader.Hash()) + blob, err := env.store.GetMetadata(context.Background(), key) + require.NoError(t, err) + decoded := new(types.DoubleSignEvidence) + require.NoError(t, decoded.UnmarshalBinary(blob)) + require.Equal(t, ev.Height, decoded.Height) + require.Equal(t, ev.AlternateHeader.Hash().String(), decoded.AlternateHeader.Hash().String()) +} + +func TestP2PHandler_DoubleSignTriggersCriticalError(t *testing.T) { + env := newDSTestEnv(t) + + // Persist canonical header, then arrange a conflicting one to come in via P2P. + first := env.signHeaderAtHeight(5, 0x01) + env.saveHeader(first) + alt := env.signHeaderAtHeight(5, 0x02) + require.NotEqual(t, first.Hash().String(), alt.Hash().String()) + + headerStoreMock := extmocks.NewMockStore[*types.P2PSignedHeader](t) + dataStoreMock := extmocks.NewMockStore[*types.P2PData](t) + headerStoreMock.EXPECT(). + GetByHeight(mock.Anything, uint64(5)). + Return(&types.P2PSignedHeader{SignedHeader: alt}, nil). + Once() + + h := NewP2PHandler(headerStoreMock, dataStoreMock, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + + ch := make(chan common.DAHeightEvent, 1) + require.NoError(t, h.ProcessHeight(context.Background(), 5, ch)) + + captured := env.captured() + require.Len(t, captured, 1) + require.Equal(t, alt.Hash().String(), captured[0].AlternateHeader.Hash().String()) + require.Equal(t, types.EvidenceSourceP2P, captured[0].AlternateSource) + + key := store.GetDoubleSignEvidenceKey(5, alt.Hash()) + blob, err := env.store.GetMetadata(context.Background(), key) + require.NoError(t, err) + require.NotEmpty(t, blob) + + select { + case evt := <-ch: + t.Fatalf("expected no event on double-sign; got %+v", evt) + default: + } +} + +func TestP2PHandler_ProposerMismatchIsNotEvidence(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + env.saveHeader(first) + + // A header from a different signer must be rejected before the detector runs. + badHdr := env.signHeaderWithOtherProposer(5, 0x02) + + headerStoreMock := extmocks.NewMockStore[*types.P2PSignedHeader](t) + dataStoreMock := extmocks.NewMockStore[*types.P2PData](t) + headerStoreMock.EXPECT(). + GetByHeight(mock.Anything, uint64(5)). + Return(&types.P2PSignedHeader{SignedHeader: badHdr}, nil). + Once() + + h := NewP2PHandler(headerStoreMock, dataStoreMock, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + + ch := make(chan common.DAHeightEvent, 1) + err := h.ProcessHeight(context.Background(), 5, ch) + require.Error(t, err) + require.Empty(t, env.captured()) +} + +func TestDARetriever_DoubleSignSamePendingBatch(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + require.NotEqual(t, first.Hash().String(), alt.Hash().String()) + + firstBin, err := first.MarshalBinary() + require.NoError(t, err) + altBin, err := alt.MarshalBinary() + require.NoError(t, err) + + mockClient := testmocks.NewMockClient(t) + mockClient.On("GetHeaderNamespace").Return([]byte("ns")).Maybe() + mockClient.On("GetDataNamespace").Return([]byte("ns")).Maybe() + + r := NewDARetriever(mockClient, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + events := r.ProcessBlobs(context.Background(), [][]byte{firstBin, altBin}, 100) + require.Empty(t, events) + + captured := env.captured() + require.Len(t, captured, 1) + require.Equal(t, uint64(5), captured[0].Height) + require.Equal(t, types.EvidenceSourceDA, captured[0].FirstSource) + require.Equal(t, types.EvidenceSourceDA, captured[0].AlternateSource) +} + +func TestDARetriever_DoubleSignAcrossBatches(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + env.saveHeader(first) + + alt := env.signHeaderAtHeight(5, 0x02) + altBin, err := alt.MarshalBinary() + require.NoError(t, err) + + mockClient := testmocks.NewMockClient(t) + mockClient.On("GetHeaderNamespace").Return([]byte("ns")).Maybe() + mockClient.On("GetDataNamespace").Return([]byte("ns")).Maybe() + + r := NewDARetriever(mockClient, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + events := r.ProcessBlobs(context.Background(), [][]byte{altBin}, 101) + require.Empty(t, events) + + captured := env.captured() + require.Len(t, captured, 1) + require.Equal(t, alt.Hash().String(), captured[0].AlternateHeader.Hash().String()) +} + +func TestDARetriever_BenignDuplicateAcrossBatchesDoesNotFire(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + env.saveHeader(first) + + // Same header re-observed from DA (e.g. re-posted at a different DA height). + sameBin, err := first.MarshalBinary() + require.NoError(t, err) + + mockClient := testmocks.NewMockClient(t) + mockClient.On("GetHeaderNamespace").Return([]byte("ns")).Maybe() + mockClient.On("GetDataNamespace").Return([]byte("ns")).Maybe() + + r := NewDARetriever(mockClient, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + _ = r.ProcessBlobs(context.Background(), [][]byte{sameBin}, 101) + require.Empty(t, env.captured()) +} + +// A legacy blob with the correct proposer but a tampered signature must be +// rejected before reaching the detector or pending cache. +func TestDARetriever_LegacyForgedSignatureRejected(t *testing.T) { + env := newDSTestEnv(t) + + // Tamper the signature byte to invalidate verification while preserving + // every other field (proposer address included). + good := env.signHeaderAtHeight(5, 0x01) + pbHdr, err := good.ToProto() + require.NoError(t, err) + pbHdr.Signature = append([]byte(nil), good.Signature...) + pbHdr.Signature[0] ^= 0xff + bin, err := proto.Marshal(pbHdr) + require.NoError(t, err) + + mockClient := testmocks.NewMockClient(t) + mockClient.On("GetHeaderNamespace").Return([]byte("ns")).Maybe() + mockClient.On("GetDataNamespace").Return([]byte("ns")).Maybe() + + r := NewDARetriever(mockClient, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + require.Nil(t, r.tryDecodeHeader(bin, 100)) + + _, _, ok := env.cache.GetPendingSignedHeader(5) + require.False(t, ok) +} + +// Detection must trigger from a pending cache entry too, before persistence. +func TestDetectDoubleSign_PendingCacheHitProducesEvidence(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + env.cache.SetPendingSignedHeader(first, types.EvidenceSourceDA) + // First header is in-flight, not yet on disk. + + alt := env.signHeaderAtHeight(5, 0x02) + ev, err := detectDoubleSign(context.Background(), env.store, env.cache, alt, types.EvidenceSourceP2P) + require.NoError(t, err) + require.NotNil(t, ev) + require.Equal(t, first.Hash().String(), ev.FirstHeader.Hash().String()) + require.Equal(t, alt.Hash().String(), ev.AlternateHeader.Hash().String()) + require.Equal(t, types.EvidenceSourceDA, ev.FirstSource) + require.Equal(t, types.EvidenceSourceP2P, ev.AlternateSource) +} + +func TestDetectDoubleSign_PendingCacheBenignDuplicate(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + env.cache.SetPendingSignedHeader(first, types.EvidenceSourceDA) + + ev, err := detectDoubleSign(context.Background(), env.store, env.cache, first, types.EvidenceSourceP2P) + require.NoError(t, err) + require.Nil(t, ev) +} + +func TestDetectDoubleSign_PendingEvictedAfterRemoval(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + env.cache.SetPendingSignedHeader(first, types.EvidenceSourceDA) + env.cache.RemovePendingSignedHeader(5) + + alt := env.signHeaderAtHeight(5, 0x02) + ev, err := detectDoubleSign(context.Background(), env.store, env.cache, alt, types.EvidenceSourceP2P) + require.NoError(t, err) + require.Nil(t, ev) +} + +func TestDoubleSignEvidence_ValidateBasic(t *testing.T) { + env := newDSTestEnv(t) + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + + t.Run("nil receiver", func(t *testing.T) { + var e *types.DoubleSignEvidence + require.Error(t, e.ValidateBasic()) + }) + t.Run("missing headers", func(t *testing.T) { + require.Error(t, (&types.DoubleSignEvidence{Height: 5}).ValidateBasic()) + }) + t.Run("height mismatch", func(t *testing.T) { + e := &types.DoubleSignEvidence{Height: 99, FirstHeader: first, AlternateHeader: alt} + require.Error(t, e.ValidateBasic()) + }) + t.Run("identical hashes", func(t *testing.T) { + e := &types.DoubleSignEvidence{Height: 5, FirstHeader: first, AlternateHeader: first} + require.Error(t, e.ValidateBasic()) + }) + t.Run("proposer mismatch", func(t *testing.T) { + other := env.signHeaderWithOtherProposer(5, 0x02) + e := &types.DoubleSignEvidence{Height: 5, FirstHeader: first, AlternateHeader: other} + require.ErrorContains(t, e.ValidateBasic(), "different proposers") + }) + t.Run("happy path", func(t *testing.T) { + e := &types.DoubleSignEvidence{Height: 5, FirstHeader: first, AlternateHeader: alt} + require.NoError(t, e.ValidateBasic()) + }) +} + +func TestPBDoubleSignEvidence_RoundTrip(t *testing.T) { + env := newDSTestEnv(t) + ev := &types.DoubleSignEvidence{ + Height: 7, + FirstHeader: env.signHeaderAtHeight(7, 0x01), + AlternateHeader: env.signHeaderAtHeight(7, 0x02), + DetectedAt: time.Unix(1_700_000_000, 500).UTC(), + FirstSource: types.EvidenceSourceDA, + AlternateSource: types.EvidenceSourceP2P, + } + p, err := ev.ToProto() + require.NoError(t, err) + blob, err := proto.Marshal(p) + require.NoError(t, err) + + decoded := new(pb.DoubleSignEvidence) + require.NoError(t, proto.Unmarshal(blob, decoded)) + require.Equal(t, ev.Height, decoded.Height) + require.Equal(t, ev.DetectedAt.UnixNano(), decoded.DetectedAt) + require.Equal(t, ev.FirstSource, decoded.FirstSource) + require.Equal(t, ev.AlternateSource, decoded.AlternateSource) + require.NotNil(t, decoded.FirstHeader) + require.NotNil(t, decoded.AlternateHeader) + require.Equal(t, ev.FirstHeader.Height(), decoded.FirstHeader.Header.Height) +} + +func TestDARetriever_DoubleSignEvidenceHasMatchingProposers(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + alt := env.signHeaderAtHeight(5, 0x02) + firstBin, err := first.MarshalBinary() + require.NoError(t, err) + altBin, err := alt.MarshalBinary() + require.NoError(t, err) + + mockClient := testmocks.NewMockClient(t) + mockClient.On("GetHeaderNamespace").Return([]byte("ns")).Maybe() + mockClient.On("GetDataNamespace").Return([]byte("ns")).Maybe() + + r := NewDARetriever(mockClient, env.cache, env.gen, zerolog.Nop(), env.store, env.onDouble) + _ = r.ProcessBlobs(context.Background(), [][]byte{firstBin, altBin}, 100) + + captured := env.captured() + require.Len(t, captured, 1) + require.Equal(t, env.gen.ProposerAddress, []byte(captured[0].FirstHeader.ProposerAddress)) + require.Equal(t, env.gen.ProposerAddress, []byte(captured[0].AlternateHeader.ProposerAddress)) +} + +func TestSyncer_EvictsPendingHeaderOnPersist(t *testing.T) { + memDS := dssync.MutexWrap(ds.NewMapDatastore()) + st := store.New(memDS) + + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "syncer-evict", InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, + } + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), gen.ChainID). + Return([]byte("app0"), nil).Once() + mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.Anything, mock.Anything). + Return([]byte("app1"), nil).Once() + + mockHeaderStore := extmocks.NewMockStore[*types.P2PSignedHeader](t) + mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() + mockDataStore := extmocks.NewMockStore[*types.P2PData](t) + mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() + + s := NewSyncer( + st, mockExec, nil, cm, common.NopMetrics(), cfg, gen, + mockHeaderStore, mockDataStore, zerolog.Nop(), + common.DefaultBlockOptions(), make(chan error, 1), nil, + ) + require.NoError(t, s.initializeState()) + s.ctx = t.Context() + + state := s.getLastState() + data := makeData(gen.ChainID, 1, 0) + _, hdr := makeSignedHeaderBytes(t, gen.ChainID, 1, addr, pub, signer, state.AppHash, data, nil) + + cm.SetPendingSignedHeader(hdr, types.EvidenceSourceP2P) + _, _, ok := cm.GetPendingSignedHeader(1) + require.True(t, ok) + + evt := common.DAHeightEvent{Header: hdr, Data: data, DaHeight: 1} + s.processHeightEvent(s.ctx, &evt) + + _, _, ok = cm.GetPendingSignedHeader(1) + require.False(t, ok) +} + +// End-to-end: a double-sign through a real Syncer must halt on errorCh, +// flip hasCriticalError, and bump DoubleSignsDetected only once for duplicate evidence. +func TestSyncer_DoubleSignHaltsAndEmitsCriticalError(t *testing.T) { + memDS := dssync.MutexWrap(ds.NewMapDatastore()) + st := store.New(memDS) + + cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + require.NoError(t, err) + + addr, pub, signer := buildSyncTestSigner(t) + cfg := config.DefaultConfig() + gen := genesis.Genesis{ + ChainID: "syncer-ds", InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), ProposerAddress: addr, + } + + mockExec := testmocks.NewMockExecutor(t) + mockExec.EXPECT(). + InitChain(mock.Anything, mock.Anything, uint64(1), gen.ChainID). + Return([]byte("app0"), nil).Once() + + mockHeaderStore := extmocks.NewMockStore[*types.P2PSignedHeader](t) + mockHeaderStore.EXPECT().Height().Return(uint64(0)).Maybe() + mockDataStore := extmocks.NewMockStore[*types.P2PData](t) + mockDataStore.EXPECT().Height().Return(uint64(0)).Maybe() + + // Wire a counting metric so we can assert exact increments. + metrics := common.NopMetrics() + var dsCount atomic.Int64 + metrics.DoubleSignsDetected = &counterCtr{n: &dsCount} + + errCh := make(chan error, 4) + s := NewSyncer( + st, mockExec, nil, cm, metrics, cfg, gen, + mockHeaderStore, mockDataStore, zerolog.Nop(), + common.DefaultBlockOptions(), errCh, nil, + ) + require.NoError(t, s.initializeState()) + s.doubleSignSeen = newDoubleSignDedup() // normally set up by Start() + + // Fire two identical alternate events to simulate P2P + DA converging. + first := makeHeaderForSyncer(t, gen, addr, pub, signer, 1, 0x01) + saveHeaderViaBatch(t, st, gen, first) + + alt := makeHeaderForSyncer(t, gen, addr, pub, signer, 1, 0x02) + require.NotEqual(t, first.Hash().String(), alt.Hash().String()) + + p2pEv := &types.DoubleSignEvidence{ + Height: 1, FirstHeader: first, AlternateHeader: alt, + DetectedAt: time.Now(), FirstSource: types.EvidenceSourceStored, AlternateSource: types.EvidenceSourceP2P, + } + daEv := &types.DoubleSignEvidence{ + Height: 1, FirstHeader: first, AlternateHeader: alt, + DetectedAt: time.Now(), FirstSource: types.EvidenceSourceStored, AlternateSource: types.EvidenceSourceDA, + } + + s.handleDoubleSign(context.Background(), p2pEv) + s.handleDoubleSign(context.Background(), daEv) + + require.Equal(t, int64(1), dsCount.Load(), "duplicate evidence must not double-count") + require.True(t, s.hasCriticalError.Load()) + + select { + case got := <-errCh: + require.ErrorIs(t, got, ErrDoubleSign) + case <-time.After(time.Second): + t.Fatal("timed out waiting for critical error on errCh") + } + + key := store.GetDoubleSignEvidenceKey(1, alt.Hash()) + blob, err := st.GetMetadata(context.Background(), key) + require.NoError(t, err) + require.NotEmpty(t, blob) +} + +func makeHeaderForSyncer(t *testing.T, gen genesis.Genesis, addr []byte, pub crypto.PubKey, signer signerpkg.Signer, height uint64, variant byte) *types.SignedHeader { + t.Helper() + _, hdr := makeSignedHeaderBytes(t, gen.ChainID, height, addr, pub, signer, + []byte{variant, variant, variant}, nil, nil) + return hdr +} + +// persists a signed header + empty data + signature and bumps the store height. +func saveHeaderViaBatch(t *testing.T, st store.Store, gen genesis.Genesis, hdr *types.SignedHeader) { + t.Helper() + batch, err := st.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(hdr, &types.Data{ + Metadata: &types.Metadata{ChainID: gen.ChainID, Height: hdr.Height(), Time: hdr.BaseHeader.Time}, + }, &hdr.Signature)) + require.NoError(t, batch.SetHeight(hdr.Height())) + require.NoError(t, batch.Commit()) +} + +// go-kit Counter backed by an atomic int64 so tests can read exact increments. +type counterCtr struct { + n *atomic.Int64 +} + +func (c *counterCtr) Add(delta float64) { c.n.Add(int64(delta)) } +func (c *counterCtr) With(labelValues ...string) gkmetrics.Counter { return c } diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index a3778757a1..1fc839f150 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -12,6 +12,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/types" ) @@ -33,23 +34,31 @@ type P2PHandler struct { genesis genesis.Genesis logger zerolog.Logger + store store.Store + onDoubleSign doubleSignHandler // nil disables detection + processedHeight atomic.Uint64 } -// NewP2PHandler creates a new P2P handler. +// NewP2PHandler creates a new P2P handler. Double-sign detection is disabled +// when st or onDoubleSign is nil. func NewP2PHandler( headerStore header.Store[*types.P2PSignedHeader], dataStore header.Store[*types.P2PData], cache cache.CacheManager, genesis genesis.Genesis, logger zerolog.Logger, + st store.Store, + onDoubleSign doubleSignHandler, ) *P2PHandler { return &P2PHandler{ - headerStore: headerStore, - dataStore: dataStore, - cache: cache, - genesis: genesis, - logger: logger.With().Str("component", "p2p_handler").Logger(), + headerStore: headerStore, + dataStore: dataStore, + cache: cache, + genesis: genesis, + logger: logger.With().Str("component", "p2p_handler").Logger(), + store: st, + onDoubleSign: onDoubleSign, } } @@ -69,9 +78,14 @@ func (h *P2PHandler) SetProcessedHeight(height uint64) { // ProcessHeight retrieves and validates both header and data for the given height from P2P stores. // It blocks until both are available, validates consistency (proposer address and data hash match), // then emits the event to heightInCh or stores it as pending. Updates processedHeight on success. +// +// When double-sign detection is enabled, the processedHeight short-circuit is +// deferred so alternates at already-processed heights still trigger detection. func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInCh chan<- common.DAHeightEvent) error { - if height <= h.processedHeight.Load() { - return nil + if h.store == nil || h.onDoubleSign == nil { + if height <= h.processedHeight.Load() { + return nil + } } p2pHeader, err := h.headerStore.GetByHeight(ctx, height) @@ -86,6 +100,24 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC return err } + // ValidateBasic is the precondition for treating an alternate as evidence. + if h.store != nil && h.onDoubleSign != nil { + if err := p2pHeader.SignedHeader.ValidateBasic(); err != nil { + h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid signed header from P2P") + return err + } + if ev, derr := detectDoubleSign(ctx, h.store, h.cache, p2pHeader.SignedHeader, types.EvidenceSourceP2P); derr == nil && ev != nil { + h.onDoubleSign(ctx, ev) + return nil + } else if derr != nil { + h.logger.Warn().Err(derr).Uint64("height", height).Msg("double-sign detection error") + } + h.cache.SetPendingSignedHeader(p2pHeader.SignedHeader, types.EvidenceSourceP2P) + if height <= h.processedHeight.Load() { + return nil + } + } + p2pData, err := h.dataStore.GetByHeight(ctx, height) if err != nil { if ctx.Err() == nil { diff --git a/block/internal/syncing/p2p_handler_doublesign_test.go b/block/internal/syncing/p2p_handler_doublesign_test.go new file mode 100644 index 0000000000..dc5669687b --- /dev/null +++ b/block/internal/syncing/p2p_handler_doublesign_test.go @@ -0,0 +1,144 @@ +package syncing + +import ( + "context" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + "github.com/evstack/ev-node/block/internal/common" + extmocks "github.com/evstack/ev-node/test/mocks/external" + "github.com/evstack/ev-node/types" +) + +// The processedHeight short-circuit must run AFTER the detector so an +// alternate at an already-applied height still triggers detection. +func TestP2PHandler_DetectsAtAlreadyProcessedHeight(t *testing.T) { + env := newDSTestEnv(t) + + first := env.signHeaderAtHeight(5, 0x01) + env.saveHeader(first) + + alt := env.signHeaderAtHeight(5, 0x02) + require.NotEqual(t, first.Hash().String(), alt.Hash().String()) + + headerStoreMock := extmocks.NewMockStore[*types.P2PSignedHeader](t) + dataStoreMock := extmocks.NewMockStore[*types.P2PData](t) + headerStoreMock.EXPECT(). + GetByHeight(mock.Anything, uint64(5)). + Return(&types.P2PSignedHeader{SignedHeader: alt}, nil). + Once() + + h := NewP2PHandler(headerStoreMock, dataStoreMock, env.cache, env.gen, + zerolog.Nop(), env.store, env.onDouble) + + h.SetProcessedHeight(5) + + ch := make(chan common.DAHeightEvent, 1) + require.NoError(t, h.ProcessHeight(context.Background(), 5, ch)) + + captured := env.captured() + require.Len(t, captured, 1) + require.Equal(t, alt.Hash().String(), captured[0].AlternateHeader.Hash().String()) + require.Equal(t, types.EvidenceSourceP2P, captured[0].AlternateSource) + + select { + case evt := <-ch: + t.Fatalf("expected no event when double-sign fires; got %+v", evt) + default: + } +} + +// When detection is disabled the legacy short-circuit must still fire. +func TestP2PHandler_LegacyShortCircuitWhenDetectionDisabled(t *testing.T) { + env := newDSTestEnv(t) + + headerStoreMock := extmocks.NewMockStore[*types.P2PSignedHeader](t) + dataStoreMock := extmocks.NewMockStore[*types.P2PData](t) + + h := NewP2PHandler(headerStoreMock, dataStoreMock, env.cache, env.gen, + zerolog.Nop(), nil, nil) + + h.SetProcessedHeight(5) + + // Mock has no expectation set — a call to GetByHeight would panic. + ch := make(chan common.DAHeightEvent, 1) + require.NoError(t, h.ProcessHeight(context.Background(), 5, ch)) +} + +// A P2P header with the correct proposer but a tampered signature must be +// rejected before the detector runs. +func TestP2PHandler_InvalidSigRejectedBeforeDetector(t *testing.T) { + env := newDSTestEnv(t) + + good := env.signHeaderAtHeight(5, 0x01) + pbHdr, err := good.ToProto() + require.NoError(t, err) + pbHdr.Signature = append([]byte(nil), good.Signature...) + pbHdr.Signature[0] ^= 0xff + bin, err := proto.Marshal(pbHdr) + require.NoError(t, err) + + forged := new(types.SignedHeader) + { + var pbDecoded = pbHdr + require.NoError(t, proto.Unmarshal(bin, pbDecoded)) + require.NoError(t, forged.FromProto(pbDecoded)) + } + + headerStoreMock := extmocks.NewMockStore[*types.P2PSignedHeader](t) + dataStoreMock := extmocks.NewMockStore[*types.P2PData](t) + headerStoreMock.EXPECT(). + GetByHeight(mock.Anything, uint64(5)). + Return(&types.P2PSignedHeader{SignedHeader: forged}, nil). + Once() + + h := NewP2PHandler(headerStoreMock, dataStoreMock, env.cache, env.gen, + zerolog.Nop(), env.store, env.onDouble) + + ch := make(chan common.DAHeightEvent, 1) + err = h.ProcessHeight(context.Background(), 5, ch) + require.Error(t, err) + + require.Empty(t, env.captured()) + + _, _, ok := env.cache.GetPendingSignedHeader(5) + require.False(t, ok) +} + +// First P2P observation must populate the pending cache so a later DA blob +// at the same height can be matched against it. +func TestP2PHandler_SetsPendingSignedHeaderOnFirstObservation(t *testing.T) { + env := newDSTestEnv(t) + + // Provide both header and data so ProcessHeight reaches the emit step. + first := env.signHeaderAtHeight(5, 0x01) + first.DataHash = common.DataHashForEmptyTxs + + headerStoreMock := extmocks.NewMockStore[*types.P2PSignedHeader](t) + dataStoreMock := extmocks.NewMockStore[*types.P2PData](t) + headerStoreMock.EXPECT(). + GetByHeight(mock.Anything, uint64(5)). + Return(&types.P2PSignedHeader{SignedHeader: first}, nil). + Once() + dataStoreMock.EXPECT(). + GetByHeight(mock.Anything, uint64(5)). + Return(&types.P2PData{Data: &types.Data{ + Metadata: &types.Metadata{ChainID: env.chainID, Height: 5, Time: first.BaseHeader.Time}, + }}, nil). + Once() + + h := NewP2PHandler(headerStoreMock, dataStoreMock, env.cache, env.gen, + zerolog.Nop(), env.store, env.onDouble) + + ch := make(chan common.DAHeightEvent, 1) + require.NoError(t, h.ProcessHeight(context.Background(), 5, ch)) + + got, src, ok := env.cache.GetPendingSignedHeader(5) + require.True(t, ok) + require.Equal(t, first.Hash().String(), got.Hash().String()) + require.Equal(t, types.EvidenceSourceP2P, src) +} diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index 8bffc31ede..8feda8c2d6 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -90,7 +90,7 @@ func setupP2P(t *testing.T) *P2PTestData { cacheManager, err := cache.NewManager(cfg, st, zerolog.Nop()) require.NoError(t, err, "failed to create cache manager") - handler := NewP2PHandler(headerStoreMock, dataStoreMock, cacheManager, gen, zerolog.Nop()) + handler := NewP2PHandler(headerStoreMock, dataStoreMock, cacheManager, gen, zerolog.Nop(), nil, nil) return &P2PTestData{ Handler: handler, HeaderStore: headerStoreMock, diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 40e3c9523f..f9d4570a2b 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -95,6 +95,9 @@ type Syncer struct { wg sync.WaitGroup hasCriticalError atomic.Bool + // Double-sign detection + doubleSignSeen *doubleSignDedup + // P2P wait coordination p2pWaitState atomic.Value // stores p2pWaitState @@ -180,15 +183,18 @@ func (s *Syncer) Start(ctx context.Context) (err error) { return fmt.Errorf("failed to initialize syncer state: %w", err) } - // Initialize handlers - s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger) + // Initialize handlers. DA and P2P share dsHandler so cross-path duplicates + // are deduped through doubleSignSeen and only reported once. + s.doubleSignSeen = newDoubleSignDedup() + dsHandler := s.handleDoubleSign + s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger, s.store, dsHandler) if s.config.Instrumentation.IsTracingEnabled() { s.daRetriever = WithTracingDARetriever(s.daRetriever) } s.fiRetriever = da.NewForcedInclusionRetriever(s.daClient, s.logger, s.config.DA.BlockTime.Duration, s.config.Instrumentation.IsTracingEnabled(), s.genesis.DAStartHeight, s.genesis.DAEpochForcedInclusion) s.fiRetriever.Start(ctx) - s.p2pHandler = NewP2PHandler(s.headerStore, s.dataStore, s.cache, s.genesis, s.logger) + s.p2pHandler = NewP2PHandler(s.headerStore, s.dataStore, s.cache, s.genesis, s.logger, s.store, dsHandler) currentHeight, initErr := s.store.Height(ctx) if initErr != nil { @@ -798,6 +804,8 @@ func (s *Syncer) trySyncNextBlockWithState(ctx context.Context, event *common.DA if !bytes.Equal(header.DataHash, common.DataHashForEmptyTxs) { s.cache.SetDataSeen(data.DACommitment().String(), newState.LastBlockHeight) } + // Subsequent alternates resolve against the persisted header. + s.cache.RemovePendingSignedHeader(header.Height()) if s.p2pHandler != nil { s.p2pHandler.SetProcessedHeight(newState.LastBlockHeight) @@ -1065,6 +1073,12 @@ func (s *Syncer) sendCriticalError(err error) { } } +// handleDoubleSign persists evidence, bumps the metric, and halts the syncer +// via sendCriticalError. Wired into the DA retriever and P2P handler. +func (s *Syncer) handleDoubleSign(ctx context.Context, ev *types.DoubleSignEvidence) { + _ = reportDoubleSign(ctx, s.store, s.metrics, s.logger, s.doubleSignSeen, s.sendCriticalError, ev) +} + // processPendingEvents fetches and processes pending events from cache // optimistically fetches the next events from cache until no matching heights are found func (s *Syncer) processPendingEvents(ctx context.Context) { diff --git a/block/internal/syncing/syncer_forced_inclusion_test.go b/block/internal/syncing/syncer_forced_inclusion_test.go index 3c15fde125..fe8372996b 100644 --- a/block/internal/syncing/syncer_forced_inclusion_test.go +++ b/block/internal/syncing/syncer_forced_inclusion_test.go @@ -77,7 +77,7 @@ func newForcedInclusionSyncer(t *testing.T, daStart, epochSize uint64) (*Syncer, subCh := make(chan datypes.SubscriptionEvent) client.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return((<-chan datypes.SubscriptionEvent)(subCh), nil).Maybe() - daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop()) + daRetriever := NewDARetriever(client, cm, gen, zerolog.Nop(), nil, nil) fiRetriever := da.NewForcedInclusionRetriever(client, zerolog.Nop(), cfg.DA.BlockTime.Duration, false, gen.DAStartHeight, gen.DAEpochForcedInclusion) t.Cleanup(fiRetriever.Stop) diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 67c87e06ed..e8fcd0aac7 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -1070,7 +1070,7 @@ func TestProcessHeightEvent_TriggersAsyncDARetrieval(t *testing.T) { s.ctx = context.Background() // Create a real daRetriever to test priority queue - s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop(), nil, nil) s.daFollower = NewDAFollower(DAFollowerConfig{ Retriever: s.daRetriever, Logger: zerolog.Nop(), @@ -1139,7 +1139,7 @@ func TestProcessHeightEvent_RejectsUnreasonableDAHint(t *testing.T) { ) require.NoError(t, s.initializeState()) s.ctx = context.Background() - s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop(), nil, nil) s.daFollower = NewDAFollower(DAFollowerConfig{ Retriever: s.daRetriever, Logger: zerolog.Nop(), @@ -1208,7 +1208,7 @@ func TestProcessHeightEvent_AcceptsValidDAHint(t *testing.T) { ) require.NoError(t, s.initializeState()) s.ctx = context.Background() - s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop(), nil, nil) s.daFollower = NewDAFollower(DAFollowerConfig{ Retriever: s.daRetriever, Logger: zerolog.Nop(), @@ -1278,7 +1278,7 @@ func TestProcessHeightEvent_SkipsDAHintWhenAlreadyDAIncluded(t *testing.T) { ) require.NoError(t, s.initializeState()) s.ctx = context.Background() - s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop(), nil, nil) s.daFollower = NewDAFollower(DAFollowerConfig{ Retriever: s.daRetriever, Logger: zerolog.Nop(), @@ -1377,7 +1377,7 @@ func TestProcessHeightEvent_SkipsDAHintWhenBelowRetrieverCursor(t *testing.T) { s.ctx = context.Background() // Create a real daRetriever to test priority queue - s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop()) + s.daRetriever = NewDARetriever(nil, cm, gen, zerolog.Nop(), nil, nil) s.daFollower = NewDAFollower(DAFollowerConfig{ Retriever: s.daRetriever, Logger: zerolog.Nop(), diff --git a/pkg/store/keys.go b/pkg/store/keys.go index 02053bb849..578d300111 100644 --- a/pkg/store/keys.go +++ b/pkg/store/keys.go @@ -30,6 +30,10 @@ const ( // pruned state height in the store. LastPrunedStateHeightKey = "lst-prnd-s" + // DoubleSignEvidenceKey is the metadata key prefix for persisted double-sign + // evidence. Full keys are like: ds// + DoubleSignEvidenceKey = "ds" + headerPrefix = "h" dataPrefix = "d" signaturePrefix = "c" @@ -102,3 +106,9 @@ func GetHeightToDAHeightHeaderKey(height uint64) string { func GetHeightToDAHeightDataKey(height uint64) string { return HeightToDAHeightKey + "/" + strconv.FormatUint(height, 10) + "/d" } + +// GetDoubleSignEvidenceKey returns the metadata key for persisted double-sign +// evidence at the given height and alternate-header hash. +func GetDoubleSignEvidenceKey(height uint64, altHash types.Hash) string { + return DoubleSignEvidenceKey + "/" + strconv.FormatUint(height, 10) + "/" + altHash.String() +} diff --git a/proto/evnode/v1/evnode.proto b/proto/evnode/v1/evnode.proto index e60bd56e0d..39716126bd 100644 --- a/proto/evnode/v1/evnode.proto +++ b/proto/evnode/v1/evnode.proto @@ -121,3 +121,15 @@ message P2PData { repeated bytes txs = 2; optional uint64 da_height_hint = 3; } + +// DoubleSignEvidence records two validly-signed SignedHeaders at the same +// height produced by the sequencer. Persisted as proof of equivocation. +message DoubleSignEvidence { + uint64 height = 1; + SignedHeader first_header = 2; + SignedHeader alternate_header = 3; + int64 detected_at = 4; + // Ingestion source for each header: "p2p", "da", or "stored". + string first_source = 5; + string alternate_source = 6; +} diff --git a/types/double_sign_evidence.go b/types/double_sign_evidence.go new file mode 100644 index 0000000000..e883d0cc64 --- /dev/null +++ b/types/double_sign_evidence.go @@ -0,0 +1,120 @@ +package types + +import ( + "bytes" + "errors" + "fmt" + "time" + + "google.golang.org/protobuf/proto" + + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +// Ingestion source identifying which path observed a SignedHeader. +const ( + EvidenceSourceP2P = "p2p" + EvidenceSourceDA = "da" + EvidenceSourceStored = "stored" +) + +// DoubleSignEvidence records two validly-signed SignedHeaders at the same +// height produced by the sequencer. Persisted as proof of equivocation. +type DoubleSignEvidence struct { + Height uint64 + FirstHeader *SignedHeader + AlternateHeader *SignedHeader + DetectedAt time.Time + FirstSource string + AlternateSource string +} + +// ValidateBasic checks structural consistency of the evidence. +func (e *DoubleSignEvidence) ValidateBasic() error { + if e == nil { + return errors.New("evidence is nil") + } + if e.FirstHeader == nil || e.AlternateHeader == nil { + return errors.New("evidence requires both first and alternate headers") + } + if e.FirstHeader.Height() != e.Height || e.AlternateHeader.Height() != e.Height { + return fmt.Errorf("evidence height %d does not match both headers (%d, %d)", + e.Height, e.FirstHeader.Height(), e.AlternateHeader.Height()) + } + if bytes.Equal(e.FirstHeader.Hash(), e.AlternateHeader.Hash()) { + return errors.New("evidence headers have identical hash — no equivocation") + } + if !bytes.Equal(e.FirstHeader.ProposerAddress, e.AlternateHeader.ProposerAddress) { + return errors.New("evidence headers have different proposers — not an equivocation") + } + return nil +} + +// ToProto converts DoubleSignEvidence to protobuf representation. +func (e *DoubleSignEvidence) ToProto() (*pb.DoubleSignEvidence, error) { + if e == nil { + return nil, errors.New("evidence is nil") + } + if e.FirstHeader == nil || e.AlternateHeader == nil { + return nil, errors.New("evidence requires both first and alternate headers") + } + first, err := e.FirstHeader.ToProto() + if err != nil { + return nil, fmt.Errorf("marshal first header: %w", err) + } + alt, err := e.AlternateHeader.ToProto() + if err != nil { + return nil, fmt.Errorf("marshal alternate header: %w", err) + } + return &pb.DoubleSignEvidence{ + Height: e.Height, + FirstHeader: first, + AlternateHeader: alt, + DetectedAt: e.DetectedAt.UnixNano(), + FirstSource: e.FirstSource, + AlternateSource: e.AlternateSource, + }, nil +} + +// FromProto fills DoubleSignEvidence from protobuf representation. +func (e *DoubleSignEvidence) FromProto(p *pb.DoubleSignEvidence) error { + if p == nil { + return errors.New("proto evidence is nil") + } + if p.FirstHeader == nil || p.AlternateHeader == nil { + return errors.New("proto evidence missing first or alternate header") + } + first := new(SignedHeader) + if err := first.FromProto(p.FirstHeader); err != nil { + return fmt.Errorf("unmarshal first header: %w", err) + } + alt := new(SignedHeader) + if err := alt.FromProto(p.AlternateHeader); err != nil { + return fmt.Errorf("unmarshal alternate header: %w", err) + } + e.Height = p.Height + e.FirstHeader = first + e.AlternateHeader = alt + e.DetectedAt = time.Unix(0, p.DetectedAt).UTC() + e.FirstSource = p.FirstSource + e.AlternateSource = p.AlternateSource + return nil +} + +// MarshalBinary encodes DoubleSignEvidence to protobuf bytes. +func (e *DoubleSignEvidence) MarshalBinary() ([]byte, error) { + p, err := e.ToProto() + if err != nil { + return nil, err + } + return proto.Marshal(p) +} + +// UnmarshalBinary decodes DoubleSignEvidence from protobuf bytes. +func (e *DoubleSignEvidence) UnmarshalBinary(data []byte) error { + p := new(pb.DoubleSignEvidence) + if err := proto.Unmarshal(data, p); err != nil { + return fmt.Errorf("proto unmarshal double sign evidence: %w", err) + } + return e.FromProto(p) +} diff --git a/types/pb/evnode/v1/evnode.pb.go b/types/pb/evnode/v1/evnode.pb.go index b0a866e76e..8f34134fed 100644 --- a/types/pb/evnode/v1/evnode.pb.go +++ b/types/pb/evnode/v1/evnode.pb.go @@ -785,6 +785,93 @@ func (x *P2PData) GetDaHeightHint() uint64 { return 0 } +// DoubleSignEvidence records two validly-signed SignedHeaders at the same +// height produced by the sequencer. Persisted as proof of equivocation. +type DoubleSignEvidence struct { + state protoimpl.MessageState `protogen:"open.v1"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + FirstHeader *SignedHeader `protobuf:"bytes,2,opt,name=first_header,json=firstHeader,proto3" json:"first_header,omitempty"` + AlternateHeader *SignedHeader `protobuf:"bytes,3,opt,name=alternate_header,json=alternateHeader,proto3" json:"alternate_header,omitempty"` + DetectedAt int64 `protobuf:"varint,4,opt,name=detected_at,json=detectedAt,proto3" json:"detected_at,omitempty"` + // Ingestion source for each header: "p2p", "da", or "stored". + FirstSource string `protobuf:"bytes,5,opt,name=first_source,json=firstSource,proto3" json:"first_source,omitempty"` + AlternateSource string `protobuf:"bytes,6,opt,name=alternate_source,json=alternateSource,proto3" json:"alternate_source,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DoubleSignEvidence) Reset() { + *x = DoubleSignEvidence{} + mi := &file_evnode_v1_evnode_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DoubleSignEvidence) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleSignEvidence) ProtoMessage() {} + +func (x *DoubleSignEvidence) ProtoReflect() protoreflect.Message { + mi := &file_evnode_v1_evnode_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleSignEvidence.ProtoReflect.Descriptor instead. +func (*DoubleSignEvidence) Descriptor() ([]byte, []int) { + return file_evnode_v1_evnode_proto_rawDescGZIP(), []int{11} +} + +func (x *DoubleSignEvidence) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *DoubleSignEvidence) GetFirstHeader() *SignedHeader { + if x != nil { + return x.FirstHeader + } + return nil +} + +func (x *DoubleSignEvidence) GetAlternateHeader() *SignedHeader { + if x != nil { + return x.AlternateHeader + } + return nil +} + +func (x *DoubleSignEvidence) GetDetectedAt() int64 { + if x != nil { + return x.DetectedAt + } + return 0 +} + +func (x *DoubleSignEvidence) GetFirstSource() string { + if x != nil { + return x.FirstSource + } + return "" +} + +func (x *DoubleSignEvidence) GetAlternateSource() string { + if x != nil { + return x.AlternateSource + } + return "" +} + var File_evnode_v1_evnode_proto protoreflect.FileDescriptor const file_evnode_v1_evnode_proto_rawDesc = "" + @@ -846,7 +933,15 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\bmetadata\x18\x01 \x01(\v2\x13.evnode.v1.MetadataR\bmetadata\x12\x10\n" + "\x03txs\x18\x02 \x03(\fR\x03txs\x12)\n" + "\x0eda_height_hint\x18\x03 \x01(\x04H\x00R\fdaHeightHint\x88\x01\x01B\x11\n" + - "\x0f_da_height_hintB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" + "\x0f_da_height_hint\"\x9b\x02\n" + + "\x12DoubleSignEvidence\x12\x16\n" + + "\x06height\x18\x01 \x01(\x04R\x06height\x12:\n" + + "\ffirst_header\x18\x02 \x01(\v2\x17.evnode.v1.SignedHeaderR\vfirstHeader\x12B\n" + + "\x10alternate_header\x18\x03 \x01(\v2\x17.evnode.v1.SignedHeaderR\x0falternateHeader\x12\x1f\n" + + "\vdetected_at\x18\x04 \x01(\x03R\n" + + "detectedAt\x12!\n" + + "\ffirst_source\x18\x05 \x01(\tR\vfirstSource\x12)\n" + + "\x10alternate_source\x18\x06 \x01(\tR\x0falternateSourceB/Z-github.com/evstack/ev-node/types/pb/evnode/v1b\x06proto3" var ( file_evnode_v1_evnode_proto_rawDescOnce sync.Once @@ -860,7 +955,7 @@ func file_evnode_v1_evnode_proto_rawDescGZIP() []byte { return file_evnode_v1_evnode_proto_rawDescData } -var file_evnode_v1_evnode_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_evnode_v1_evnode_proto_msgTypes = make([]protoimpl.MessageInfo, 12) var file_evnode_v1_evnode_proto_goTypes = []any{ (*Version)(nil), // 0: evnode.v1.Version (*Header)(nil), // 1: evnode.v1.Header @@ -873,7 +968,8 @@ var file_evnode_v1_evnode_proto_goTypes = []any{ (*Vote)(nil), // 8: evnode.v1.Vote (*P2PSignedHeader)(nil), // 9: evnode.v1.P2PSignedHeader (*P2PData)(nil), // 10: evnode.v1.P2PData - (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*DoubleSignEvidence)(nil), // 11: evnode.v1.DoubleSignEvidence + (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp } var file_evnode_v1_evnode_proto_depIdxs = []int32{ 0, // 0: evnode.v1.Header.version:type_name -> evnode.v1.Version @@ -884,15 +980,17 @@ var file_evnode_v1_evnode_proto_depIdxs = []int32{ 5, // 5: evnode.v1.Data.metadata:type_name -> evnode.v1.Metadata 6, // 6: evnode.v1.SignedData.data:type_name -> evnode.v1.Data 4, // 7: evnode.v1.SignedData.signer:type_name -> evnode.v1.Signer - 11, // 8: evnode.v1.Vote.timestamp:type_name -> google.protobuf.Timestamp + 12, // 8: evnode.v1.Vote.timestamp:type_name -> google.protobuf.Timestamp 1, // 9: evnode.v1.P2PSignedHeader.header:type_name -> evnode.v1.Header 4, // 10: evnode.v1.P2PSignedHeader.signer:type_name -> evnode.v1.Signer 5, // 11: evnode.v1.P2PData.metadata:type_name -> evnode.v1.Metadata - 12, // [12:12] is the sub-list for method output_type - 12, // [12:12] is the sub-list for method input_type - 12, // [12:12] is the sub-list for extension type_name - 12, // [12:12] is the sub-list for extension extendee - 0, // [0:12] is the sub-list for field type_name + 2, // 12: evnode.v1.DoubleSignEvidence.first_header:type_name -> evnode.v1.SignedHeader + 2, // 13: evnode.v1.DoubleSignEvidence.alternate_header:type_name -> evnode.v1.SignedHeader + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name } func init() { file_evnode_v1_evnode_proto_init() } @@ -908,7 +1006,7 @@ func file_evnode_v1_evnode_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_evnode_v1_evnode_proto_rawDesc), len(file_evnode_v1_evnode_proto_rawDesc)), NumEnums: 0, - NumMessages: 11, + NumMessages: 12, NumExtensions: 0, NumServices: 0, },