-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpeerHashCache_test.go
134 lines (106 loc) · 2.43 KB
/
peerHashCache_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
package p2p
import (
"crypto/sha1"
"math/rand"
"testing"
"time"
)
func _newTestPeerHashCache(buckets int, interval time.Duration) *PeerHashCache {
pr := new(PeerHashCache)
pr.buckets = make([]*phcBucket, buckets)
for i := 0; i < buckets; i++ {
pr.buckets[i] = newphcBucket()
}
pr.stopper = make(chan interface{}, 1)
return pr
}
func rhash(l int) [sha1.Size]byte {
r := make([]byte, l)
rand.Read(r)
return sha1.Sum(r)
}
func TestPhcBucket(t *testing.T) {
bucket := newphcBucket()
testset := make([][sha1.Size]byte, 128)
for i := range testset {
testset[i] = rhash(rand.Intn(512) + 64)
}
for _, h := range testset {
if bucket.Has(h) {
t.Error("empty bucket reports it has %h", h)
}
}
for i := 0; i < 64; i++ {
bucket.Add(testset[i])
}
for i, h := range testset {
has := bucket.Has(h)
if i >= 64 {
if has {
t.Error("full bucket reports it has %h when it shouldn't", h)
}
} else {
if !has {
t.Error("bucket should have %h but doesn't", h)
}
}
}
}
func TestPhcBucket_MultiThreaded_Try(t *testing.T) {
pr := _newTestPeerHashCache(16, time.Hour)
done := make(chan bool, 8)
testroutine := func() {
testset := make([][sha1.Size]byte, 1024)
for i := range testset {
testset[i] = rhash(rand.Intn(512) + 64)
pr.Add(testset[i])
}
for _, p := range testset {
if !pr.Has(p) {
t.Errorf("data missing")
}
}
done <- true
}
for i := 0; i < 8; i++ {
go testroutine()
}
for i := 0; i < 8; i++ {
<-done
}
}
func TestPhcBucket_MultiThreaded_Cleanup(t *testing.T) {
testpl := sha1.Sum([]byte{0xff, 0x00, 0x00})
pr := _newTestPeerHashCache(1, time.Hour)
pr.Add(testpl)
pr.dropOldestBucket()
if pr.Has(testpl) {
t.Errorf("single bucket didn't get cleaned properly")
}
pr = _newTestPeerHashCache(2, time.Hour)
pr.Add(testpl)
pr.dropOldestBucket()
if !pr.Has(testpl) {
t.Errorf("item not found but should still be in bucket #2")
}
pr.dropOldestBucket()
if pr.Has(testpl) {
t.Errorf("double bucket didn't get cleaned properly")
}
pr = NewPeerHashCache(3, time.Millisecond*50)
pr.Add(testpl)
time.Sleep(time.Millisecond * 75)
if !pr.Has(testpl) {
t.Errorf("timed item not found")
}
time.Sleep(time.Millisecond * 100) // 175 ms > 150 ms
if pr.Has(testpl) {
t.Errorf("timed bucket didn't get cleaned properly")
}
pr.Stop()
pr.Add(testpl)
time.Sleep(time.Millisecond * 200)
if !pr.Has(testpl) {
t.Errorf("timed bucket didn't get stopped properly")
}
}