Skip to content

Commit ae22cbb

Browse files
committed
Update
1 parent 62dc54f commit ae22cbb

File tree

8 files changed

+252
-175
lines changed

8 files changed

+252
-175
lines changed

apps/evm/server/force_inclusion_test.go

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@ import (
1818

1919
// mockDA implements block/internal/da.Client for testing
2020
type mockDA struct {
21-
submitFunc func(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit
21+
submitFunc func(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit
22+
subscribeFunc func(ctx context.Context, namespace []byte) (<-chan da.ResultRetrieve, error)
2223
}
2324

2425
func (m *mockDA) Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit {
@@ -29,6 +30,13 @@ func (m *mockDA) Submit(ctx context.Context, data [][]byte, gasPrice float64, na
2930
return da.ResultSubmit{BaseResult: da.BaseResult{Code: da.StatusSuccess, Height: 1}}
3031
}
3132

33+
func (m *mockDA) Subscribe(ctx context.Context, namespace []byte) (<-chan da.ResultRetrieve, error) {
34+
if m.subscribeFunc != nil {
35+
return m.subscribeFunc(ctx, namespace)
36+
}
37+
return nil, nil
38+
}
39+
3240
func (m *mockDA) Retrieve(ctx context.Context, height uint64, namespace []byte) da.ResultRetrieve {
3341
return da.ResultRetrieve{}
3442
}

block/internal/da/client.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -487,10 +487,10 @@ func (c *client) Subscribe(ctx context.Context, namespace []byte) (<-chan datype
487487
// Using 0 or Now() is a trade-off. Let's use Now() for liveness.
488488
outCh <- datypes.ResultRetrieve{
489489
BaseResult: datypes.BaseResult{
490-
Code: datypes.StatusSuccess,
491-
IDs: ids,
492-
Height: resp.Height,
493-
Timestamp: time.Now(),
490+
Code: datypes.StatusSuccess,
491+
IDs: ids,
492+
Height: resp.Height,
493+
//Timestamp: // TODO: set proper value
494494
},
495495
Data: data,
496496
}

block/internal/syncing/da_retriever.go

Lines changed: 53 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import (
55
"context"
66
"errors"
77
"fmt"
8+
"sort"
89
"sync"
910

1011
"github.com/rs/zerolog"
@@ -22,7 +23,7 @@ import (
2223
// DARetriever defines the interface for retrieving events from the DA layer
2324
type DARetriever interface {
2425
RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error)
25-
Subscribe(ctx context.Context) (<-chan common.DAHeightEvent, error)
26+
Subscribe(ctx context.Context, ch chan common.DAHeightEvent) error
2627
}
2728

2829
// daRetriever handles DA retrieval operations for syncing
@@ -78,65 +79,68 @@ func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co
7879
return r.processBlobs(ctx, blobsResp.Data, daHeight), nil
7980
}
8081

81-
// Subscribe subscribes to specific DA namespace and returns a channel of height events
82-
func (r *daRetriever) Subscribe(ctx context.Context) (<-chan common.DAHeightEvent, error) {
83-
// Subscribe to header namespace
84-
// Note: We currently only subscribe to header namespace.
85-
// If header and data namespaces are different, we might need to subscribe to both or fetch data on demand.
86-
// For now, assuming header subscription is sufficient to trigger catchup or process full blocks if namespaces are same.
87-
// Actually, if we follow the "Subscribe" API from recent changes, we likely want to subscribe to *headers*.
88-
// However, processBlobs expects blobs.
89-
// Let's subscribe to the header namespace.
90-
91-
// Use combined channel for output
92-
outCh := make(chan common.DAHeightEvent, 100)
93-
94-
subCh, err := r.client.Subscribe(ctx, r.client.GetHeaderNamespace())
82+
// Subscribe subscribes to specific DA namespace
83+
func (r *daRetriever) Subscribe(ctx context.Context, outCh chan common.DAHeightEvent) error {
84+
subChHeader, err := r.client.Subscribe(ctx, r.client.GetHeaderNamespace())
9585
if err != nil {
96-
return nil, fmt.Errorf("failed to subscribe to headers: %w", err)
86+
return fmt.Errorf("subscribe to headers: %w", err)
87+
}
88+
89+
var subChData <-chan datypes.ResultRetrieve
90+
if !bytes.Equal(r.client.GetHeaderNamespace(), r.client.GetDataNamespace()) {
91+
var err error
92+
subChData, err = r.client.Subscribe(ctx, r.client.GetDataNamespace())
93+
if err != nil {
94+
return fmt.Errorf("subscribe to data: %w", err)
95+
}
9796
}
9897

9998
go func() {
10099
defer close(outCh)
101100
for {
101+
var blobs [][]byte
102+
var height uint64
103+
var errCode datypes.StatusCode
104+
102105
select {
103106
case <-ctx.Done():
104107
return
105-
case res, ok := <-subCh:
108+
case res, ok := <-subChHeader:
106109
if !ok {
107110
return
108111
}
109-
if res.Code != datypes.StatusSuccess {
110-
r.logger.Error().Uint64("code", uint64(res.Code)).Msg("subscription error")
112+
blobs = res.Data
113+
height = res.Height
114+
errCode = res.Code
115+
case res, ok := <-subChData:
116+
if subChData == nil {
111117
continue
112118
}
119+
if !ok {
120+
return
121+
}
122+
blobs = res.Data
123+
height = res.Height
124+
errCode = res.Code
125+
}
113126

114-
// We received some blobs (headers) via subscription.
115-
// We process them similar to RetrieveFromDA, but we might need to fetch data if namespaces differ.
116-
// If namespaces differ, data might not be in the subscription result.
117-
// For the "Follow" case, we want to emit an event.
118-
// If we have just headers, we might need to fetch data corresponding to these headers.
119-
120-
// However, `processBlobs` handles pending headers/data.
121-
// If we only get headers, they will be stored in pendingHeaders.
122-
// If we need data, we might need to fetch it.
123-
124-
// IMPORTANT: processBlobs takes "blobs" [][]byte. `res.Data` is []byte (aliases to Blob).
125-
// So we can pass res.Data directly.
126-
127-
events := r.processBlobs(ctx, res.Data, res.Height)
128-
for _, ev := range events {
129-
select {
130-
case <-ctx.Done():
131-
return
132-
case outCh <- ev:
133-
}
127+
if errCode != datypes.StatusSuccess {
128+
r.logger.Error().Uint64("code", uint64(errCode)).Msg("subscription error")
129+
continue
130+
}
131+
132+
events := r.processBlobs(ctx, blobs, height)
133+
for _, ev := range events {
134+
select {
135+
case <-ctx.Done():
136+
return
137+
case outCh <- ev:
134138
}
135139
}
136140
}
137141
}()
138142

139-
return outCh, nil
143+
return nil
140144
}
141145

142146
// fetchBlobs retrieves blobs from both header and data namespaces
@@ -279,18 +283,17 @@ func (r *daRetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight
279283
events = append(events, event)
280284
}
281285

286+
// Sort events by height to match execution order
287+
sort.Slice(events, func(i, j int) bool {
288+
if events[i].DaHeight != events[j].DaHeight {
289+
return events[i].DaHeight < events[j].DaHeight
290+
}
291+
return events[i].Header.Height() < events[j].Header.Height()
292+
})
293+
282294
if len(events) > 0 {
283295
startHeight := events[0].Header.Height()
284-
endHeight := events[0].Header.Height()
285-
for _, event := range events {
286-
h := event.Header.Height()
287-
if h < startHeight {
288-
startHeight = h
289-
}
290-
if h > endHeight {
291-
endHeight = h
292-
}
293-
}
296+
endHeight := events[len(events)-1].Header.Height()
294297
r.logger.Info().Uint64("da_height", daHeight).Uint64("start_height", startHeight).Uint64("end_height", endHeight).Msg("processed blocks from DA")
295298
}
296299

block/internal/syncing/da_retriever_mock.go

Lines changed: 19 additions & 24 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

block/internal/syncing/da_retriever_test.go

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -374,3 +374,32 @@ func Test_isEmptyDataExpected(t *testing.T) {
374374
h.DataHash = common.DataHashForEmptyTxs
375375
assert.True(t, isEmptyDataExpected(h))
376376
}
377+
378+
func TestDARetriever_ProcessBlobs_Sorting(t *testing.T) {
379+
addr, pub, signer := buildSyncTestSigner(t)
380+
gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr}
381+
r := newTestDARetriever(t, nil, config.DefaultConfig(), gen)
382+
383+
// Event A: Block Height 10
384+
// Event B: Block Height 5
385+
// Although DaHeight is currently identical for all events in a single processBlobs call,
386+
// this test ensures that the secondary sort key (Block Height) behaves correctly.
387+
388+
data1Bin, data1 := makeSignedDataBytes(t, gen.ChainID, 10, addr, pub, signer, 1)
389+
data2Bin, data2 := makeSignedDataBytes(t, gen.ChainID, 5, addr, pub, signer, 1)
390+
391+
hdr1Bin, _ := makeSignedHeaderBytes(t, gen.ChainID, 10, addr, pub, signer, nil, &data1.Data, nil)
392+
hdr2Bin, _ := makeSignedHeaderBytes(t, gen.ChainID, 5, addr, pub, signer, nil, &data2.Data, nil)
393+
394+
// Process blobs.
395+
daHeight := uint64(100)
396+
// We pass them in mixed order to ensure sorting happens.
397+
events := r.processBlobs(context.Background(), [][]byte{hdr1Bin, data1Bin, hdr2Bin, data2Bin}, daHeight)
398+
399+
require.Len(t, events, 2)
400+
assert.Equal(t, uint64(5), events[0].Header.Height(), "Events should be sorted by block height asc")
401+
assert.Equal(t, uint64(10), events[1].Header.Height())
402+
403+
assert.Equal(t, daHeight, events[0].DaHeight)
404+
assert.Equal(t, daHeight, events[1].DaHeight)
405+
}

block/internal/syncing/syncer.go

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -186,9 +186,7 @@ func (s *Syncer) Start(ctx context.Context) error {
186186
}
187187

188188
// Initialize handlers
189-
if s.daRetriever == nil {
190-
s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger)
191-
}
189+
s.daRetriever = NewDARetriever(s.daClient, s.cache, s.genesis, s.logger)
192190
s.fiRetriever = da.NewForcedInclusionRetriever(s.daClient, s.logger, s.genesis.DAStartHeight, s.genesis.DAEpochForcedInclusion)
193191
s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger)
194192
if currentHeight, err := s.store.Height(s.ctx); err != nil {
@@ -318,17 +316,15 @@ func (s *Syncer) daWorkerLoop() {
318316

319317
s.logger.Info().Msg("starting DA worker")
320318
defer s.logger.Info().Msg("DA worker stopped")
319+
backoff := s.config.DA.BlockTime.Duration
320+
if backoff <= 0 {
321+
backoff = 2 * time.Second
322+
}
321323

322324
for {
323325
// 1. Catch up mode: fetch sequentially until we are up to date
324326
if err := s.fetchDAUntilCaughtUp(); err != nil {
325327
s.logger.Error().Err(err).Msg("DA catchup failed, retrying after backoff")
326-
327-
backoff := s.config.DA.BlockTime.Duration
328-
if backoff <= 0 {
329-
backoff = 2 * time.Second
330-
}
331-
332328
select {
333329
case <-s.ctx.Done():
334330
return
@@ -343,25 +339,30 @@ func (s *Syncer) daWorkerLoop() {
343339
if errors.Is(err, context.Canceled) {
344340
return
345341
}
346-
s.logger.Warn().Err(err).Msg("DA follow disrupted, switching to catchup")
342+
s.logger.Warn().Err(err).Msg("DA follow disrupted")
347343
// We don't need explicit backoff here as we'll switch to catchup immediately,
348344
// checking if there are new blocks to fetch.
349345
}
346+
s.logger.Info().Msg("DA follow mode resumed")
350347
}
351348
}
352349

353350
// followDA subscribes to DA events and processes them until a gap is detected or error occurs
354351
func (s *Syncer) followDA() error {
355352
s.logger.Info().Msg("entering DA follow mode")
356-
subCh, err := s.daRetriever.Subscribe(s.ctx)
357-
if err != nil {
358-
return fmt.Errorf("failed to subscribe to DA: %w", err)
353+
354+
ctx, cancel := context.WithCancel(s.ctx)
355+
defer cancel()
356+
subCh := make(chan common.DAHeightEvent, 1)
357+
358+
if err := s.daRetriever.Subscribe(ctx, subCh); err != nil {
359+
return fmt.Errorf("subscribe to DA: %w", err)
359360
}
360361

361362
for {
362363
select {
363-
case <-s.ctx.Done():
364-
return s.ctx.Err()
364+
case <-ctx.Done():
365+
return ctx.Err()
365366
case event, ok := <-subCh:
366367
if !ok {
367368
return errors.New("DA subscription channel closed")

0 commit comments

Comments
 (0)