Skip to content

Commit 83375b0

Browse files
committed
core: remove old conversion to shuffle leveldb blocks into ancients
1 parent 34f3c95 commit 83375b0

File tree

1 file changed

+7
-56
lines changed

1 file changed

+7
-56
lines changed

core/blockchain.go

Lines changed: 7 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -1207,63 +1207,14 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
12071207
if !bc.HasHeader(block.Hash(), block.NumberU64()) {
12081208
return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
12091209
}
1210-
var (
1211-
start = time.Now()
1212-
logged = time.Now()
1213-
count int
1214-
)
1215-
// Migrate all ancient blocks. This can happen if someone upgrades from Geth
1216-
// 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the
1217-
// long term.
1218-
for {
1219-
// We can ignore the error here since light client won't hit this code path.
1220-
frozen, _ := bc.db.Ancients()
1221-
if frozen >= block.NumberU64() {
1222-
break
1223-
}
1224-
h := rawdb.ReadCanonicalHash(bc.db, frozen)
1225-
b := rawdb.ReadBlock(bc.db, h, frozen)
1226-
size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen))
1227-
count += 1
1228-
1229-
// Always keep genesis block in active database.
1230-
if b.NumberU64() != 0 {
1231-
deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()})
1210+
if block.NumberU64() == 1 {
1211+
// Make sure to write the genesis into the freezer
1212+
if frozen, _ := bc.db.Ancients(); frozen == 0 {
1213+
h := rawdb.ReadCanonicalHash(bc.db, 0)
1214+
b := rawdb.ReadBlock(bc.db, h, 0)
1215+
size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, 0, bc.chainConfig), rawdb.ReadTd(bc.db, h, 0))
1216+
log.Info("Wrote genesis to ancients")
12321217
}
1233-
if time.Since(logged) > 8*time.Second {
1234-
log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
1235-
logged = time.Now()
1236-
}
1237-
// Don't collect too much in-memory, write it out every 100K blocks
1238-
if len(deleted) > 100000 {
1239-
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
1240-
if err := bc.db.Sync(); err != nil {
1241-
return 0, err
1242-
}
1243-
// Wipe out canonical block data.
1244-
for _, nh := range deleted {
1245-
rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
1246-
rawdb.DeleteCanonicalHash(batch, nh.number)
1247-
}
1248-
if err := batch.Write(); err != nil {
1249-
return 0, err
1250-
}
1251-
batch.Reset()
1252-
// Wipe out side chain too.
1253-
for _, nh := range deleted {
1254-
for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
1255-
rawdb.DeleteBlock(batch, hash, nh.number)
1256-
}
1257-
}
1258-
if err := batch.Write(); err != nil {
1259-
return 0, err
1260-
}
1261-
batch.Reset()
1262-
deleted = deleted[0:]
1263-
}
1264-
}
1265-
if count > 0 {
1266-
log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
12671218
}
12681219
// Flush data into ancient database.
12691220
size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))

0 commit comments

Comments
 (0)