Skip to content

Commit 6ef1348

Browse files
committed
test: verify filestore respects Provide.Strategy
- add positive/negative test pair for filestore provide gating - positive: filestore + "all" strategy provides root and leaf CIDs - negative: filestore + "roots" strategy with --pin=false does not - increase providerTimeout to 30s for CI reliability - replace fixed 500ms DHT sleep with waitForProviderReady: polls 'ipfs provide stat' for SweepingProvider connectivity, then runs a canary provide+findprovs round-trip - add expectNoneProvided for parallel negative assertions to avoid sequential timeout accumulation
1 parent 5e995a3 commit 6ef1348

1 file changed

Lines changed: 186 additions & 26 deletions

File tree

test/cli/provider_test.go

Lines changed: 186 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -20,21 +20,143 @@ import (
2020
)
2121

2222
const (
23-
timeStep = 20 * time.Millisecond
24-
timeout = time.Second
23+
providerPollInterval = 200 * time.Millisecond
24+
providerTimeout = 30 * time.Second
2525
)
2626

2727
type cfgApplier func(*harness.Node)
2828

2929
func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
3030
t.Helper()
3131

32+
expectNoProviders := func(t *testing.T, cid string, nodes ...*harness.Node) {
33+
// Poll for providerTimeout to be confident that provide records
34+
// have not propagated. Uses RunIPFS (not IPFS) to avoid panics
35+
// if the node is cleaned up while require.Never is still polling.
36+
require.Never(t, func() bool {
37+
for _, node := range nodes {
38+
res := node.RunIPFS("routing", "findprovs", "-n=1", cid)
39+
if res.Err == nil && res.Stdout.Trimmed() != "" {
40+
return true
41+
}
42+
}
43+
return false
44+
}, providerTimeout, providerPollInterval, "expected no providers for %s", cid)
45+
}
46+
47+
// expectNoneProvided is like expectNoProviders but checks multiple CIDs
48+
// in parallel subtests so they share a single providerTimeout window
49+
// instead of accumulating sequentially.
50+
expectNoneProvided := func(t *testing.T, cids []string, nodes ...*harness.Node) {
51+
t.Helper()
52+
t.Run("expect-no-providers", func(t *testing.T) {
53+
for _, c := range cids {
54+
t.Run(c[:16], func(t *testing.T) {
55+
t.Parallel()
56+
require.Never(t, func() bool {
57+
for _, node := range nodes {
58+
res := node.RunIPFS("routing", "findprovs", "-n=1", c)
59+
if res.Err == nil && res.Stdout.Trimmed() != "" {
60+
return true
61+
}
62+
}
63+
return false
64+
}, providerTimeout, providerPollInterval, "expected no providers for %s", c)
65+
})
66+
}
67+
})
68+
}
69+
70+
expectProviders := func(t *testing.T, cid, expectedProvider string, nodes ...*harness.Node) {
71+
for _, node := range nodes {
72+
// Uses RunIPFS (not IPFS) to avoid panics if the node is
73+
// cleaned up while require.Eventually is still polling.
74+
require.Eventually(t, func() bool {
75+
res := node.RunIPFS("routing", "findprovs", "-n=1", cid)
76+
return res.Err == nil && res.Stdout.Trimmed() == expectedProvider
77+
}, providerTimeout, providerPollInterval, "expected a provider for %s", cid)
78+
}
79+
}
80+
81+
// provideStatResult holds the subset of 'ipfs provide stat --enc=json'
82+
// fields needed to determine provider readiness.
83+
type provideStatResult struct {
84+
Sweep *struct {
85+
Connectivity struct {
86+
Status string `json:"status"`
87+
} `json:"connectivity"`
88+
} `json:"Sweep"`
89+
Legacy *json.RawMessage `json:"Legacy"`
90+
}
91+
92+
// isProviderOnline parses 'ipfs provide stat --enc=json' output and
93+
// returns true when the provider is ready to accept provide requests.
94+
//
95+
// - LegacyProvider: ready as soon as stats are available (Legacy != nil).
96+
// - SweepingProvider: ready when connectivity.status == "online",
97+
// meaning approxPrefixLen completed and StartProviding won't
98+
// silently drop requests.
99+
isProviderOnline := func(output string) bool {
100+
var s provideStatResult
101+
if err := json.Unmarshal([]byte(output), &s); err != nil {
102+
return false
103+
}
104+
if s.Legacy != nil {
105+
return true
106+
}
107+
return s.Sweep != nil && s.Sweep.Connectivity.Status == "online"
108+
}
109+
110+
// waitForProviderReady blocks until every node's provider system is
111+
// initialized and the DHT provide+find pipeline works end-to-end.
112+
//
113+
// It polls 'ipfs provide stat' (WAN then LAN, since test networks
114+
// use private addresses where only the LAN DHT has peers) and then
115+
// runs a canary provide+findprovs round-trip. The canary uses manual
116+
// 'ipfs routing provide' which bypasses Strategy and Interval checks,
117+
// so it works for every configuration where Provide.Enabled is true.
118+
waitForProviderReady := func(t *testing.T, nodes harness.Nodes) {
119+
t.Helper()
120+
providerActive := false
121+
for _, node := range nodes {
122+
require.Eventually(t, func() bool {
123+
res := node.RunIPFS("provide", "stat", "--enc=json")
124+
if res.Err != nil {
125+
return true // providing disabled (NoopProvider)
126+
}
127+
if isProviderOnline(res.Stdout.Trimmed()) {
128+
providerActive = true
129+
return true
130+
}
131+
// WAN DHT stays offline in test networks (private addrs
132+
// only). Fall back to LAN DHT stats.
133+
res = node.RunIPFS("provide", "stat", "--lan", "--enc=json")
134+
if res.Err != nil {
135+
return false
136+
}
137+
if isProviderOnline(res.Stdout.Trimmed()) {
138+
providerActive = true
139+
return true
140+
}
141+
return false
142+
}, providerTimeout, providerPollInterval,
143+
"timed out waiting for provider to come online")
144+
}
145+
146+
if providerActive && len(nodes) >= 2 {
147+
canary := nodes[0].IPFSAddStr(time.Now().String(), "--pin=false")
148+
nodes[0].IPFS("routing", "provide", canary)
149+
expectProviders(t, canary, nodes[0].PeerID().String(), nodes[1:]...)
150+
}
151+
}
152+
32153
initNodes := func(t *testing.T, n int, fn func(n *harness.Node)) harness.Nodes {
33154
nodes := harness.NewT(t).NewNodes(n).Init()
34155
nodes.ForEachPar(apply)
35156
nodes.ForEachPar(fn)
36157
nodes = nodes.StartDaemons().Connect()
37-
time.Sleep(500 * time.Millisecond) // wait for DHT clients to be bootstrapped
158+
time.Sleep(500 * time.Millisecond) // baseline delay for DHT routing tables to settle
159+
waitForProviderReady(t, nodes)
38160
return nodes
39161
}
40162

@@ -45,26 +167,6 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
45167
return nodes
46168
}
47169

48-
expectNoProviders := func(t *testing.T, cid string, nodes ...*harness.Node) {
49-
for _, node := range nodes {
50-
res := node.IPFS("routing", "findprovs", "-n=1", cid)
51-
require.Empty(t, res.Stdout.String())
52-
}
53-
}
54-
55-
expectProviders := func(t *testing.T, cid, expectedProvider string, nodes ...*harness.Node) {
56-
outerLoop:
57-
for _, node := range nodes {
58-
for i := time.Duration(0); i*timeStep < timeout; i++ {
59-
res := node.IPFS("routing", "findprovs", "-n=1", cid)
60-
if res.Stdout.Trimmed() == expectedProvider {
61-
continue outerLoop
62-
}
63-
}
64-
require.FailNowf(t, "found no providers", "expected a provider for %s", cid)
65-
}
66-
}
67-
68170
t.Run("Provide.Enabled=true announces new CIDs created by ipfs add", func(t *testing.T) {
69171
t.Parallel()
70172

@@ -355,6 +457,66 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
355457
expectProviders(t, cid, nodes[0].PeerID().String(), nodes[1:]...)
356458
})
357459

460+
// The filestore (--nocopy) uses a separate code path (FilestoreBlockstoreCtor)
461+
// that passes a provider to filestore.NewFilestore. These two tests work as
462+
// a pair: the positive test proves filestore content is findable when the
463+
// strategy says it should be (control), and the negative test verifies the
464+
// provider is NOT passed when the strategy does not include "all".
465+
//
466+
// Both tests use a 2MiB file (larger than chunk size) to produce multiple
467+
// leaf blocks plus a root, so we can verify behavior for all block types.
468+
469+
initFilestoreNodesWithProvideStrategy := func(t *testing.T, strategy string) (provider *harness.Node, querier *harness.Node) {
470+
nodes := initNodes(t, 2, func(n *harness.Node) {
471+
n.SetIPFSConfig("Experimental.FilestoreEnabled", true)
472+
n.SetIPFSConfig("Provide.Strategy", strategy)
473+
})
474+
t.Cleanup(func() { nodes.StopDaemons() })
475+
return nodes[0], nodes[1]
476+
}
477+
478+
addNocopyFile := func(t *testing.T, node *harness.Node, addFlags ...string) (rootCID string, leafCIDs []string) {
479+
filePath := filepath.Join(node.Dir, "testfile.bin")
480+
require.NoError(t, os.WriteFile(filePath, random.Bytes(2*1024*1024), 0o644))
481+
482+
args := append([]string{"add", "-q", "--nocopy"}, addFlags...)
483+
args = append(args, filePath)
484+
rootCID = strings.TrimSpace(node.IPFS(args...).Stdout.String())
485+
486+
leafCIDs = strings.Fields(node.IPFS("refs", rootCID).Stdout.Trimmed())
487+
require.NotEmpty(t, leafCIDs)
488+
return rootCID, leafCIDs
489+
}
490+
491+
t.Run("Filestore with 'all' strategy provides --nocopy blocks", func(t *testing.T) {
492+
t.Parallel()
493+
provider, querier := initFilestoreNodesWithProvideStrategy(t, "all")
494+
495+
rootCID, leafCIDs := addNocopyFile(t, provider)
496+
497+
// Check root and at least one leaf CID. The root alone is not
498+
// sufficient because --fast-provide-root (on by default) provides
499+
// it via a separate mechanism. Leaf blocks are only provided by
500+
// the filestore provider, so finding one proves it works.
501+
pid := provider.PeerID().String()
502+
expectProviders(t, rootCID, pid, querier)
503+
expectProviders(t, leafCIDs[0], pid, querier)
504+
})
505+
506+
t.Run("Filestore with non-all strategy does not provide --nocopy blocks", func(t *testing.T) {
507+
t.Parallel()
508+
provider, querier := initFilestoreNodesWithProvideStrategy(t, "roots")
509+
510+
// Use --pin=false to avoid recursive pins that would trigger
511+
// pin-based providing, and --fast-provide-root=false to prevent
512+
// the root from being immediately provided via the fast path.
513+
// This isolates the test to the filestore add-time provide path.
514+
rootCID, leafCIDs := addNocopyFile(t, provider, "--pin=false", "--fast-provide-root=false")
515+
516+
// Before the fix, the filestore always provided on add regardless of strategy.
517+
expectNoneProvided(t, []string{rootCID, leafCIDs[0]}, querier)
518+
})
519+
358520
if reprovide {
359521

360522
t.Run("Reprovides with 'all' strategy when strategy is '' (empty)", func(t *testing.T) {
@@ -415,9 +577,7 @@ func runProviderSuite(t *testing.T, reprovide bool, apply cfgApplier) {
415577

416578
// Nothing should have been provided. The pin was offline, and
417579
// the others should not be provided per the strategy.
418-
expectNoProviders(t, cidFoo, nodes[1:]...)
419-
expectNoProviders(t, cidBar, nodes[1:]...)
420-
expectNoProviders(t, cidBarDir, nodes[1:]...)
580+
expectNoneProvided(t, []string{cidFoo, cidBar, cidBarDir}, nodes[1:]...)
421581

422582
nodes[0].IPFS("routing", "reprovide")
423583

0 commit comments

Comments
 (0)