Skip to content

Commit 6ae2167

Browse files
neutrino: expose OverlapMode in public API and update integration tests
Public API Changes: - Add OverlapMode field to HeadersImportConfig - Wire OverlapMode through to ImportOptions in Start() - Allow users to configure overlap handling behavior (AppendOnly vs ValidateAndAppend) Integration Test Updates: - Update TestNeutrinoSyncWithHeadersImport for new overlap behavior - Add proper error handling for Start() calls with require.NoError - Test both AppendOnly and ValidateAndAppend modes in different scenarios - Update test documentation to reflect overlap region processing - Remove references to "import skipping" for populated stores - Add proper defer statements for service cleanup - Fix error handling in TestNeutrinoSyncWithoutHeadersImport These changes complete the feature by exposing overlap processing functionality to end users while ensuring integration tests validate end-to-end behavior. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 58a353f commit 6ae2167

File tree

2 files changed

+32
-19
lines changed

2 files changed

+32
-19
lines changed

neutrino.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -657,6 +657,11 @@ type HeadersImportConfig struct {
657657
// Actual memory usage can vary depending on factors such as database
658658
// state, Go garbage collector semantics and activity.
659659
WriteBatchSizePerRegion int
660+
661+
// OverlapMode defines how to handle headers that overlap between the
662+
// import source and existing data in the target stores. Defaults to
663+
// AppendOnly.
664+
OverlapMode chainimport.OverlapMode
660665
}
661666

662667
// peerSubscription holds a peer subscription which we'll notify about any
@@ -1676,6 +1681,7 @@ func (s *ChainService) Start(ctx context.Context) error {
16761681
TargetBlockHeaderStore: s.BlockHeaders,
16771682
TargetFilterHeaderStore: s.RegFilterHeaders,
16781683
WriteBatchSizePerRegion: s.headersImport.WriteBatchSizePerRegion,
1684+
OverlapMode: s.headersImport.OverlapMode,
16791685
}
16801686
if _, err := options.Import(ctx); err != nil {
16811687
return err

sync_test.go

Lines changed: 26 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1065,12 +1065,11 @@ func testRandomBlocks(harness *neutrinoHarness, t *testing.T) {
10651065
// The second part generates additional blocks and creates another Neutrino
10661066
// instance with the same import configuration but with network connectivity.
10671067
// This tests that Neutrino correctly handles the case where header stores are
1068-
// already populated, skipping the import process and continuing synchronization
1069-
// from its current state.
1068+
// already populated, successfully processing the overlap headers region, and
1069+
// continuing synchronization from its current state.
10701070
//
10711071
// This test validates that the headers import mechanism works correctly both as
1072-
// a way to bootstrap new nodes and when restarting existing nodes,
1073-
// significantly improving sync performance in the former scenario.
1072+
// a way to bootstrap nodes and significantly improving sync performance.
10741073
func TestNeutrinoSyncWithHeadersImport(t *testing.T) {
10751074
// Setup context during testing.
10761075
rootCtx := context.Background()
@@ -1116,7 +1115,9 @@ func TestNeutrinoSyncWithHeadersImport(t *testing.T) {
11161115
// Set up export service to connect to h1 and download headers.
11171116
exportSvc, err := neutrino.NewChainService(config)
11181117
require.NoError(t, err)
1119-
exportSvc.Start(rootCtx)
1118+
err = exportSvc.Start(rootCtx)
1119+
require.NoError(t, err)
1120+
defer exportSvc.Stop()
11201121

11211122
testHarness := &neutrinoHarness{
11221123
h1: h1,
@@ -1202,6 +1203,7 @@ func TestNeutrinoSyncWithHeadersImport(t *testing.T) {
12021203
BlockHeadersSource: blockHeadersImportPath,
12031204
FilterHeadersSource: filterHeadersImportPath,
12041205
WriteBatchSizePerRegion: 1000,
1206+
OverlapMode: chainimport.AppendOnly,
12051207
},
12061208
AddPeers: nil,
12071209
}
@@ -1210,7 +1212,8 @@ func TestNeutrinoSyncWithHeadersImport(t *testing.T) {
12101212
require.NoError(t, err)
12111213

12121214
// Start the import service.
1213-
importSvc.Start(rootCtx)
1215+
err = importSvc.Start(rootCtx)
1216+
require.NoError(t, err)
12141217
defer importSvc.Stop()
12151218

12161219
// Ensure that neutrino initial synced using the imported headers.
@@ -1224,17 +1227,17 @@ func TestNeutrinoSyncWithHeadersImport(t *testing.T) {
12241227

12251228
// Generate an additional 300 blocks on h1. This ensures that when we
12261229
// sync again, the client will need to fetch these new blocks from the
1227-
// network. Since the header stores are no longer empty after the first
1228-
// import, the import process should be skipped and Neutrino should
1229-
// continue syncing from its current state.
1230+
// network. Neutrino should continue syncing properly from its current
1231+
// state.
12301232
_, err = h1.Client.Generate(300)
12311233
require.NoError(t, err)
12321234
t.Log("Syncing again after generating more 300 blocks on h1")
12331235

12341236
// Create a new service configuration that includes both the headers
12351237
// import and a connection to h1. Since the stores already contain data
1236-
// from the previous import, the import operation should be skipped and
1237-
// synchronization should continue from the current state.
1238+
// from the previous import, the import operation should process overlap
1239+
// headers region properly and synchronization should continue from the
1240+
// current state.
12381241
importConfig = neutrino.Config{
12391242
DataDir: tempDir,
12401243
Database: db,
@@ -1243,25 +1246,28 @@ func TestNeutrinoSyncWithHeadersImport(t *testing.T) {
12431246
BlockHeadersSource: blockHeadersImportPath,
12441247
FilterHeadersSource: filterHeadersImportPath,
12451248
WriteBatchSizePerRegion: 1000,
1249+
OverlapMode: chainimport.ValidateAndAppend,
12461250
},
12471251
AddPeers: []string{h1.P2PAddress()},
12481252
}
12491253

1250-
importSvcToBeSkipped, err := neutrino.NewChainService(importConfig)
1254+
importSvcToBeVerified, err := neutrino.NewChainService(importConfig)
12511255
require.NoError(t, err)
12521256

12531257
// Start the service with p2p network connectivity.
1254-
importSvcToBeSkipped.Start(rootCtx)
1255-
defer importSvcToBeSkipped.Stop()
1258+
err = importSvcToBeVerified.Start(rootCtx)
1259+
require.NoError(t, err)
1260+
defer importSvcToBeVerified.Stop()
12561261

1257-
// This test doesn't explicitly verify that the import is skipped, but
1258-
// demonstrates that the service can successfully sync to the chain tip
1259-
// after the database has already been populated with headers.
1262+
// This test doesn't explicitly verify that the import handled overlap
1263+
// headers region unit properly, but demonstrates that the service can
1264+
// successfully sync to the chain tip after the database has already
1265+
// been populated with headers without any errors raised.
12601266
testHarness = &neutrinoHarness{
12611267
h1: h1,
12621268
h2: nil,
12631269
h3: nil,
1264-
svc: importSvcToBeSkipped,
1270+
svc: importSvcToBeVerified,
12651271
}
12661272
testInitialSync(testHarness, t)
12671273
}
@@ -1371,7 +1377,8 @@ func TestNeutrinoSyncWithoutHeadersImport(t *testing.T) {
13711377
svc, err := neutrino.NewChainService(config)
13721378
require.NoError(t, err)
13731379

1374-
svc.Start(rootCtx)
1380+
err = svc.Start(rootCtx)
1381+
require.NoError(t, err)
13751382
defer svc.Stop()
13761383

13771384
// Create a test harness with the three nodes and the neutrino instance.

0 commit comments

Comments
 (0)