Skip to content

Commit f3579f6

Browse files
karalabefjl
authored andcommitted
pow: make data dumps backwards compatible, fix DAG end
1 parent 5c8fa6a commit f3579f6

File tree

2 files changed

+32
-21
lines changed

2 files changed

+32
-21
lines changed

pow/ethash.go

Lines changed: 24 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -32,12 +32,12 @@ import (
3232
"unsafe"
3333

3434
mmap "github.com/edsrzf/mmap-go"
35-
"github.com/ethereum/go-ethereum/common/hexutil"
3635
"github.com/ethereum/go-ethereum/log"
3736
metrics "github.com/rcrowley/go-metrics"
3837
)
3938

4039
var (
40+
ErrInvalidDumpMagic = errors.New("invalid dump magic")
4141
ErrNonceOutOfRange = errors.New("nonce out of range")
4242
ErrInvalidDifficulty = errors.New("non-positive difficulty")
4343
ErrInvalidMixDigest = errors.New("invalid mix digest")
@@ -55,7 +55,7 @@ var (
5555
algorithmRevision = 23
5656

5757
// dumpMagic is a dataset dump header to sanity check a data dump.
58-
dumpMagic = hexutil.MustDecode("0xfee1deadbaddcafe")
58+
dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
5959
)
6060

6161
// isLittleEndian returns whether the local system is running in little or big
@@ -76,7 +76,14 @@ func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
7676
file.Close()
7777
return nil, nil, nil, err
7878
}
79-
return file, mem, buffer, err
79+
for i, magic := range dumpMagic {
80+
if buffer[i] != magic {
81+
mem.Unmap()
82+
file.Close()
83+
return nil, nil, nil, ErrInvalidDumpMagic
84+
}
85+
}
86+
return file, mem, buffer[len(dumpMagic):], err
8087
}
8188

8289
// memoryMapFile tries to memory map an already opened file descriptor.
@@ -113,7 +120,7 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
113120
if err != nil {
114121
return nil, nil, nil, err
115122
}
116-
if err = dump.Truncate(int64(size)); err != nil {
123+
if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
117124
return nil, nil, nil, err
118125
}
119126
// Memory map the file for writing and fill it with the generator
@@ -122,15 +129,18 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
122129
dump.Close()
123130
return nil, nil, nil, err
124131
}
125-
generator(buffer)
132+
copy(buffer, dumpMagic)
133+
134+
data := buffer[len(dumpMagic):]
135+
generator(data)
126136

127137
if err := mem.Flush(); err != nil {
128138
mem.Unmap()
129139
dump.Close()
130140
return nil, nil, nil, err
131141
}
132142
os.Rename(temp, path)
133-
return dump, mem, buffer, nil
143+
return dump, mem, data, nil
134144
}
135145

136146
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
@@ -165,11 +175,11 @@ func (c *cache) generate(dir string, limit int, test bool) {
165175
return
166176
}
167177
// Disk storage is needed, this will get fancy
168-
endian := "le"
178+
var endian string
169179
if !isLittleEndian() {
170-
endian = "be"
180+
endian = ".be"
171181
}
172-
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian))
182+
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
173183
logger := log.New("epoch", c.epoch)
174184

175185
// Try to load the file from disk and memory map it
@@ -192,7 +202,7 @@ func (c *cache) generate(dir string, limit int, test bool) {
192202
// Iterate over all previous instances and delete old ones
193203
for ep := int(c.epoch) - limit; ep >= 0; ep-- {
194204
seed := seedHash(uint64(ep)*epochLength + 1)
195-
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian))
205+
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
196206
os.Remove(path)
197207
}
198208
})
@@ -249,11 +259,11 @@ func (d *dataset) generate(dir string, limit int, test bool) {
249259
generateDataset(d.dataset, d.epoch, cache)
250260
}
251261
// Disk storage is needed, this will get fancy
252-
endian := "le"
262+
var endian string
253263
if !isLittleEndian() {
254-
endian = "be"
264+
endian = ".be"
255265
}
256-
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian))
266+
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
257267
logger := log.New("epoch", d.epoch)
258268

259269
// Try to load the file from disk and memory map it
@@ -279,7 +289,7 @@ func (d *dataset) generate(dir string, limit int, test bool) {
279289
// Iterate over all previous instances and delete old ones
280290
for ep := int(d.epoch) - limit; ep >= 0; ep-- {
281291
seed := seedHash(uint64(ep)*epochLength + 1)
282-
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian))
292+
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
283293
os.Remove(path)
284294
}
285295
})

pow/ethash_algo.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -225,15 +225,16 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
225225
// Print some debug logs to allow analysis on low end devices
226226
logger := log.New("epoch", epoch)
227227

228-
defer func(start time.Time) {
228+
start := time.Now()
229+
defer func() {
229230
elapsed := time.Since(start)
230231

231232
logFn := logger.Debug
232233
if elapsed > 3*time.Second {
233234
logFn = logger.Info
234235
}
235236
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
236-
}(time.Now())
237+
}()
237238

238239
// Figure out whether the bytes need to be swapped for the machine
239240
swapped := !isLittleEndian()
@@ -260,23 +261,23 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
260261
keccak512 := makeHasher(sha3.NewKeccak512())
261262

262263
// Calculate the data segment this thread should generate
263-
batch := uint32(size / hashBytes / uint64(threads))
264-
start := uint32(id) * batch
265-
limit := start + batch
264+
batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
265+
first := uint32(id) * batch
266+
limit := first + batch
266267
if limit > uint32(size/hashBytes) {
267268
limit = uint32(size / hashBytes)
268269
}
269270
// Calculate the dataset segment
270271
percent := uint32(size / hashBytes / 100)
271-
for index := start; index < limit; index++ {
272+
for index := first; index < limit; index++ {
272273
item := generateDatasetItem(cache, index, keccak512)
273274
if swapped {
274275
swap(item)
275276
}
276277
copy(dataset[index*hashBytes:], item)
277278

278279
if status := atomic.AddUint32(&progress, 1); status%percent == 0 {
279-
logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes))
280+
logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
280281
}
281282
}
282283
}(i)

0 commit comments

Comments
 (0)