@@ -32,12 +32,12 @@ import (
32
32
"unsafe"
33
33
34
34
mmap "github.com/edsrzf/mmap-go"
35
- "github.com/ethereum/go-ethereum/common/hexutil"
36
35
"github.com/ethereum/go-ethereum/log"
37
36
metrics "github.com/rcrowley/go-metrics"
38
37
)
39
38
40
39
var (
40
+ ErrInvalidDumpMagic = errors .New ("invalid dump magic" )
41
41
ErrNonceOutOfRange = errors .New ("nonce out of range" )
42
42
ErrInvalidDifficulty = errors .New ("non-positive difficulty" )
43
43
ErrInvalidMixDigest = errors .New ("invalid mix digest" )
55
55
algorithmRevision = 23
56
56
57
57
// dumpMagic is a dataset dump header to sanity check a data dump.
58
- dumpMagic = hexutil . MustDecode ( "0xfee1deadbaddcafe" )
58
+ dumpMagic = [] uint32 { 0xbaddcafe , 0xfee1dead }
59
59
)
60
60
61
61
// isLittleEndian returns whether the local system is running in little or big
@@ -76,7 +76,14 @@ func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
76
76
file .Close ()
77
77
return nil , nil , nil , err
78
78
}
79
- return file , mem , buffer , err
79
+ for i , magic := range dumpMagic {
80
+ if buffer [i ] != magic {
81
+ mem .Unmap ()
82
+ file .Close ()
83
+ return nil , nil , nil , ErrInvalidDumpMagic
84
+ }
85
+ }
86
+ return file , mem , buffer [len (dumpMagic ):], err
80
87
}
81
88
82
89
// memoryMapFile tries to memory map an already opened file descriptor.
@@ -113,7 +120,7 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
113
120
if err != nil {
114
121
return nil , nil , nil , err
115
122
}
116
- if err = dump .Truncate (int64 (size )); err != nil {
123
+ if err = dump .Truncate (int64 (len ( dumpMagic )) * 4 + int64 ( size )); err != nil {
117
124
return nil , nil , nil , err
118
125
}
119
126
// Memory map the file for writing and fill it with the generator
@@ -122,15 +129,18 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
122
129
dump .Close ()
123
130
return nil , nil , nil , err
124
131
}
125
- generator (buffer )
132
+ copy (buffer , dumpMagic )
133
+
134
+ data := buffer [len (dumpMagic ):]
135
+ generator (data )
126
136
127
137
if err := mem .Flush (); err != nil {
128
138
mem .Unmap ()
129
139
dump .Close ()
130
140
return nil , nil , nil , err
131
141
}
132
142
os .Rename (temp , path )
133
- return dump , mem , buffer , nil
143
+ return dump , mem , data , nil
134
144
}
135
145
136
146
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
@@ -165,11 +175,11 @@ func (c *cache) generate(dir string, limit int, test bool) {
165
175
return
166
176
}
167
177
// Disk storage is needed, this will get fancy
168
- endian := "le"
178
+ var endian string
169
179
if ! isLittleEndian () {
170
- endian = "be"
180
+ endian = ". be"
171
181
}
172
- path := filepath .Join (dir , fmt .Sprintf ("cache-R%d-%x. %s" , algorithmRevision , seed , endian ))
182
+ path := filepath .Join (dir , fmt .Sprintf ("cache-R%d-%x%s" , algorithmRevision , seed [: 8 ] , endian ))
173
183
logger := log .New ("epoch" , c .epoch )
174
184
175
185
// Try to load the file from disk and memory map it
@@ -192,7 +202,7 @@ func (c *cache) generate(dir string, limit int, test bool) {
192
202
// Iterate over all previous instances and delete old ones
193
203
for ep := int (c .epoch ) - limit ; ep >= 0 ; ep -- {
194
204
seed := seedHash (uint64 (ep )* epochLength + 1 )
195
- path := filepath .Join (dir , fmt .Sprintf ("cache-R%d-%x. %s" , algorithmRevision , seed , endian ))
205
+ path := filepath .Join (dir , fmt .Sprintf ("cache-R%d-%x%s" , algorithmRevision , seed [: 8 ] , endian ))
196
206
os .Remove (path )
197
207
}
198
208
})
@@ -249,11 +259,11 @@ func (d *dataset) generate(dir string, limit int, test bool) {
249
259
generateDataset (d .dataset , d .epoch , cache )
250
260
}
251
261
// Disk storage is needed, this will get fancy
252
- endian := "le"
262
+ var endian string
253
263
if ! isLittleEndian () {
254
- endian = "be"
264
+ endian = ". be"
255
265
}
256
- path := filepath .Join (dir , fmt .Sprintf ("full-R%d-%x. %s" , algorithmRevision , seed , endian ))
266
+ path := filepath .Join (dir , fmt .Sprintf ("full-R%d-%x%s" , algorithmRevision , seed [: 8 ] , endian ))
257
267
logger := log .New ("epoch" , d .epoch )
258
268
259
269
// Try to load the file from disk and memory map it
@@ -279,7 +289,7 @@ func (d *dataset) generate(dir string, limit int, test bool) {
279
289
// Iterate over all previous instances and delete old ones
280
290
for ep := int (d .epoch ) - limit ; ep >= 0 ; ep -- {
281
291
seed := seedHash (uint64 (ep )* epochLength + 1 )
282
- path := filepath .Join (dir , fmt .Sprintf ("full-R%d-%x. %s" , algorithmRevision , seed , endian ))
292
+ path := filepath .Join (dir , fmt .Sprintf ("full-R%d-%x%s" , algorithmRevision , seed [: 8 ] , endian ))
283
293
os .Remove (path )
284
294
}
285
295
})
0 commit comments