@@ -2,32 +2,27 @@ package storage
2
2
3
3
import (
4
4
"bytes"
5
+ "encoding/binary"
5
6
"fmt"
6
7
"io"
7
8
"runtime"
8
9
"sync"
9
10
"testing"
10
11
"time"
11
-
12
- "github.com/ethereum/go-ethereum/logger"
13
- "github.com/ethereum/go-ethereum/logger/glog"
14
12
)
15
13
16
- func init () {
17
- glog .SetV (logger .Info )
18
- glog .SetToStderr (true )
19
- }
20
-
21
14
/*
22
15
Tests TreeChunker by splitting and joining a random byte slice
23
16
*/
24
17
25
18
type test interface {
26
19
Fatalf (string , ... interface {})
20
+ Logf (string , ... interface {})
27
21
}
28
22
29
23
type chunkerTester struct {
30
- chunks []* Chunk
24
+ inputs map [uint64 ][]byte
25
+ chunks map [string ]* Chunk
31
26
t test
32
27
}
33
28
@@ -40,7 +35,12 @@ func (self *chunkerTester) checkChunks(t *testing.T, want int) {
40
35
41
36
func (self * chunkerTester ) Split (chunker Splitter , data io.Reader , size int64 , chunkC chan * Chunk , swg * sync.WaitGroup ) (key Key ) {
42
37
// reset
43
- self .chunks = nil
38
+ self .chunks = make (map [string ]* Chunk )
39
+
40
+ if self .inputs == nil {
41
+ self .inputs = make (map [uint64 ][]byte )
42
+ }
43
+
44
44
quitC := make (chan bool )
45
45
timeout := time .After (600 * time .Second )
46
46
if chunkC != nil {
@@ -57,7 +57,8 @@ func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, c
57
57
return
58
58
}
59
59
// glog.V(logger.Info).Infof("chunk %v received", len(self.chunks))
60
- self .chunks = append (self .chunks , chunk )
60
+ // self.chunks = append(self.chunks, chunk)
61
+ self .chunks [chunk .Key .String ()] = chunk
61
62
if chunk .wg != nil {
62
63
chunk .wg .Done ()
63
64
}
@@ -73,22 +74,26 @@ func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, c
73
74
if swg != nil {
74
75
// glog.V(logger.Info).Infof("Waiting for storage to finish")
75
76
swg .Wait ()
76
- // glog.V(logger.Info).Infof("St orage finished")
77
+ // glog.V(logger.Info).Infof("Storage finished")
77
78
}
78
79
close (chunkC )
79
80
}
80
81
if chunkC != nil {
82
+ // glog.V(logger.Info).Infof("waiting for splitter finished")
81
83
<- quitC
84
+ // glog.V(logger.Info).Infof("Splitter finished")
82
85
}
83
86
return
84
87
}
85
88
86
- func (self * chunkerTester ) Join (chunker * TreeChunker , key Key , c int , chunkC chan * Chunk , quitC chan bool ) LazySectionReader {
89
+ func (self * chunkerTester ) Join (chunker Chunker , key Key , c int , chunkC chan * Chunk , quitC chan bool ) LazySectionReader {
87
90
// reset but not the chunks
88
91
92
+ // glog.V(logger.Info).Infof("Splitter finished")
89
93
reader := chunker .Join (key , chunkC )
90
94
91
95
timeout := time .After (600 * time .Second )
96
+ // glog.V(logger.Info).Infof("Splitter finished")
92
97
i := 0
93
98
go func () {
94
99
for {
@@ -101,66 +106,77 @@ func (self *chunkerTester) Join(chunker *TreeChunker, key Key, c int, chunkC cha
101
106
close (quitC )
102
107
return
103
108
}
104
- i ++
109
+ // glog.V(logger.Info).Infof("chunk %v: %v", i, chunk.Key.String())
105
110
// this just mocks the behaviour of a chunk store retrieval
106
- var found bool
107
- for _ , ch := range self .chunks {
108
- if bytes .Equal (chunk .Key , ch .Key ) {
109
- found = true
110
- chunk .SData = ch .SData
111
- break
112
- }
113
- }
114
- if ! found {
115
- self .t .Fatalf ("not found " )
111
+ stored , success := self .chunks [chunk .Key .String ()]
112
+ // glog.V(logger.Info).Infof("chunk %v, success: %v", chunk.Key.String(), success)
113
+ if ! success {
114
+ self .t .Fatalf ("not found" )
115
+ return
116
116
}
117
+ // glog.V(logger.Info).Infof("chunk %v: %v", i, chunk.Key.String())
118
+ chunk .SData = stored .SData
119
+ chunk .Size = int64 (binary .LittleEndian .Uint64 (chunk .SData [0 :8 ]))
117
120
close (chunk .C )
121
+ i ++
118
122
}
119
123
}
120
124
}()
121
125
return reader
122
126
}
123
127
124
- func testRandomData (n int , chunks int , t * testing.T ) {
125
- chunker := NewTreeChunker (& ChunkerParams {
126
- Branches : 128 ,
127
- Hash : "SHA3" ,
128
- })
129
- tester := & chunkerTester {t : t }
130
- data , input := testDataReaderAndSlice (n )
128
+ func testRandomData (splitter Splitter , n int , tester * chunkerTester ) {
129
+ if tester .inputs == nil {
130
+ tester .inputs = make (map [uint64 ][]byte )
131
+ }
132
+ input , found := tester .inputs [uint64 (n )]
133
+ var data io.Reader
134
+ if ! found {
135
+ data , input = testDataReaderAndSlice (n )
136
+ tester .inputs [uint64 (n )] = input
137
+ } else {
138
+ data = limitReader (bytes .NewReader (input ), n )
139
+ }
131
140
132
141
chunkC := make (chan * Chunk , 1000 )
133
142
swg := & sync.WaitGroup {}
134
143
135
- splitter := chunker
136
144
key := tester .Split (splitter , data , int64 (n ), chunkC , swg )
145
+ tester .t .Logf (" Key = %v\n " , key )
137
146
138
- // t.Logf(" Key = %v\n", key)
139
-
140
- // tester.checkChunks(t, chunks)
141
147
chunkC = make (chan * Chunk , 1000 )
142
148
quitC := make (chan bool )
143
149
150
+ chunker := NewTreeChunker (NewChunkerParams ())
144
151
reader := tester .Join (chunker , key , 0 , chunkC , quitC )
145
152
output := make ([]byte , n )
153
+ // glog.V(logger.Info).Infof(" Key = %v\n", key)
146
154
r , err := reader .Read (output )
155
+ // glog.V(logger.Info).Infof(" read = %v %v\n", r, err)
147
156
if r != n || err != io .EOF {
148
- t .Fatalf ("read error read: %v n = %v err = %v\n " , r , n , err )
157
+ tester . t .Fatalf ("read error read: %v n = %v err = %v\n " , r , n , err )
149
158
}
150
159
if input != nil {
151
160
if ! bytes .Equal (output , input ) {
152
- t .Fatalf ("input and output mismatch\n IN: %v\n OUT: %v\n " , input , output )
161
+ tester . t .Fatalf ("input and output mismatch\n IN: %v\n OUT: %v\n " , input , output )
153
162
}
154
163
}
155
164
close (chunkC )
156
165
<- quitC
157
166
}
158
167
159
168
func TestRandomData (t * testing.T ) {
160
- testRandomData (60 , 1 , t )
161
- testRandomData (83 , 3 , t )
162
- testRandomData (179 , 5 , t )
163
- testRandomData (253 , 7 , t )
169
+ // sizes := []int{123456}
170
+ sizes := []int {1 , 60 , 83 , 179 , 253 , 1024 , 4095 , 4096 , 4097 , 123456 }
171
+ tester := & chunkerTester {t : t }
172
+ chunker := NewTreeChunker (NewChunkerParams ())
173
+ for _ , s := range sizes {
174
+ testRandomData (chunker , s , tester )
175
+ }
176
+ pyramid := NewPyramidChunker (NewChunkerParams ())
177
+ for _ , s := range sizes {
178
+ testRandomData (pyramid , s , tester )
179
+ }
164
180
}
165
181
166
182
func readAll (reader LazySectionReader , result []byte ) {
@@ -186,36 +202,34 @@ func benchReadAll(reader LazySectionReader) {
186
202
}
187
203
188
204
func benchmarkJoin (n int , t * testing.B ) {
205
+ t .ReportAllocs ()
189
206
for i := 0 ; i < t .N ; i ++ {
190
- chunker := NewTreeChunker (& ChunkerParams {
191
- Branches : 128 ,
192
- Hash : "SHA3" ,
193
- })
207
+ chunker := NewTreeChunker (NewChunkerParams ())
194
208
tester := & chunkerTester {t : t }
195
209
data := testDataReader (n )
196
210
197
211
chunkC := make (chan * Chunk , 1000 )
198
212
swg := & sync.WaitGroup {}
199
213
200
214
key := tester .Split (chunker , data , int64 (n ), chunkC , swg )
201
- t .StartTimer ()
215
+ // t.StartTimer()
202
216
chunkC = make (chan * Chunk , 1000 )
203
217
quitC := make (chan bool )
204
218
reader := tester .Join (chunker , key , i , chunkC , quitC )
205
- t .StopTimer ()
206
219
benchReadAll (reader )
207
220
close (chunkC )
208
221
<- quitC
222
+ // t.StopTimer()
209
223
}
224
+ stats := new (runtime.MemStats )
225
+ runtime .ReadMemStats (stats )
226
+ fmt .Println (stats .Sys )
210
227
}
211
228
212
229
func benchmarkSplitTree (n int , t * testing.B ) {
213
230
t .ReportAllocs ()
214
231
for i := 0 ; i < t .N ; i ++ {
215
- chunker := NewTreeChunker (& ChunkerParams {
216
- Branches : 128 ,
217
- Hash : "SHA3" ,
218
- })
232
+ chunker := NewTreeChunker (NewChunkerParams ())
219
233
tester := & chunkerTester {t : t }
220
234
data := testDataReader (n )
221
235
// glog.V(logger.Info).Infof("splitting data of length %v", n)
@@ -229,10 +243,7 @@ func benchmarkSplitTree(n int, t *testing.B) {
229
243
func benchmarkSplitPyramid (n int , t * testing.B ) {
230
244
t .ReportAllocs ()
231
245
for i := 0 ; i < t .N ; i ++ {
232
- splitter := NewPyramidChunker (& ChunkerParams {
233
- Branches : 128 ,
234
- Hash : "SHA3" ,
235
- })
246
+ splitter := NewPyramidChunker (NewChunkerParams ())
236
247
tester := & chunkerTester {t : t }
237
248
data := testDataReader (n )
238
249
// glog.V(logger.Info).Infof("splitting data of length %v", n)
@@ -243,11 +254,13 @@ func benchmarkSplitPyramid(n int, t *testing.B) {
243
254
fmt .Println (stats .Sys )
244
255
}
245
256
246
- func BenchmarkJoin_100_2 (t * testing.B ) { benchmarkJoin (100 , t ) }
247
- func BenchmarkJoin_1000_2 (t * testing.B ) { benchmarkJoin (1000 , t ) }
248
- func BenchmarkJoin_10000_2 (t * testing.B ) { benchmarkJoin (10000 , t ) }
249
- func BenchmarkJoin_100000_2 (t * testing.B ) { benchmarkJoin (100000 , t ) }
250
- func BenchmarkJoin_1000000_2 (t * testing.B ) { benchmarkJoin (1000000 , t ) }
257
+ func BenchmarkJoin_2 (t * testing.B ) { benchmarkJoin (100 , t ) }
258
+ func BenchmarkJoin_3 (t * testing.B ) { benchmarkJoin (1000 , t ) }
259
+ func BenchmarkJoin_4 (t * testing.B ) { benchmarkJoin (10000 , t ) }
260
+ func BenchmarkJoin_5 (t * testing.B ) { benchmarkJoin (100000 , t ) }
261
+ func BenchmarkJoin_6 (t * testing.B ) { benchmarkJoin (1000000 , t ) }
262
+ func BenchmarkJoin_7 (t * testing.B ) { benchmarkJoin (10000000 , t ) }
263
+ func BenchmarkJoin_8 (t * testing.B ) { benchmarkJoin (100000000 , t ) }
251
264
252
265
func BenchmarkSplitTree_2 (t * testing.B ) { benchmarkSplitTree (100 , t ) }
253
266
func BenchmarkSplitTree_2h (t * testing.B ) { benchmarkSplitTree (500 , t ) }
0 commit comments