Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,21 @@
# Changelog
## v1.5.14
### FEATURE
[\#3130](https://github.com/bnb-chain/bsc/pull/3130) config: update BSC Mainnet hardfork time: Maxwell

### BUGFIX
[\#3117](https://github.com/bnb-chain/bsc/pull/3117) core, ethdb: introduce database sync function (#31703)
[\#3122](https://github.com/bnb-chain/bsc/pull/3122) freezer: implement tail method in prunedfreezer
[\#3121](https://github.com/bnb-chain/bsc/pull/3121) miner: discard outdated bids before simulation

### IMPROVEMENT
[\#3105](https://github.com/bnb-chain/bsc/pull/3105) parlia.go: adjust timeForMining to 4/5 second
[\#3112](https://github.com/bnb-chain/bsc/pull/3112) feat: add storagechange object pool for better performance
[\#3110](https://github.com/bnb-chain/bsc/pull/3110) refactor: use the built-in max/min to simplify the code
[\#3120](https://github.com/bnb-chain/bsc/pull/3120) tx_pool: remove one non-necessary allocation
[\#3123](https://github.com/bnb-chain/bsc/pull/3123) refactor: use maps.copy for cleaner map handling
[\#3126](https://github.com/bnb-chain/bsc/pull/3126) jsutils: update getKeyParameters

## v1.5.13
### FEATURE
[\#3019](https://github.com/bnb-chain/bsc/pull/3019) BEP-524: Short Block Interval Phase Two: 0.75 seconds
Expand Down
2 changes: 1 addition & 1 deletion cmd/geth/dbcmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -1282,7 +1282,7 @@ func hbss2pbss(ctx *cli.Context) error {
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false, false)
db.BlockStore().Sync()
db.BlockStore().SyncAncient()
stateDiskDb := db.StateStore()
defer db.Close()

Expand Down
41 changes: 35 additions & 6 deletions cmd/jsutils/getchainstatus.js
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,17 @@ function printUsage() {
console.log(" GetLargeTxs: get large txs of a block range");
console.log("\nOptions:");
console.log(" --rpc specify the url of RPC endpoint");
console.log(" mainnet: https://bsc-mainnet.nodereal.io/v1/454e504917db4f82b756bd0cf6317dce");
console.log(" testnet: https://bsc-testnet-dataseed.bnbchain.org");
console.log(" --startNum the start block number");
console.log(" --endNum the end block number");
console.log(" --miner the miner address");
console.log(" --num the number of blocks to be checked");
console.log(" --topNum the topNum of blocks to be checked");
console.log(" --blockNum the block number to be checked");
console.log("\nExample:");
// mainnet https://bsc-mainnet.nodereal.io/v1/454e504917db4f82b756bd0cf6317dce
console.log(" node getchainstatus.js GetMaxTxCountInBlockRange --rpc https://bsc-testnet-dataseed.bnbchain.org --startNum 40000001 --endNum 40000005");
console.log(" node getchainstatus.js GetBinaryVersion --rpc https://bsc-testnet-dataseed.bnbchain.org --num 21 --turnLength 4");
console.log(" node getchainstatus.js GetBinaryVersion --rpc https://bsc-testnet-dataseed.bnbchain.org --num 21 --turnLength 8");
console.log(" node getchainstatus.js GetTopAddr --rpc https://bsc-testnet-dataseed.bnbchain.org --startNum 40000001 --endNum 40000010 --topNum 10");
console.log(" node getchainstatus.js GetSlashCount --rpc https://bsc-testnet-dataseed.bnbchain.org --blockNum 40000001"); // default: latest block
console.log(" node getchainstatus.js GetPerformanceData --rpc https://bsc-testnet-dataseed.bnbchain.org --startNum 40000001 --endNum 40000010");
Expand All @@ -59,6 +60,7 @@ const addrValidatorSet = "0x0000000000000000000000000000000000001000";
const addrSlash = "0x0000000000000000000000000000000000001001";
const addrStakeHub = "0x0000000000000000000000000000000000002002";
const addrGovernor = "0x0000000000000000000000000000000000002004";
const TimelockContract = "0x0000000000000000000000000000000000002006";

const validatorSetAbi = [
"function validatorExtraSet(uint256 offset) external view returns (uint256, bool, bytes)",
Expand Down Expand Up @@ -92,17 +94,25 @@ const stakeHubAbi = [
"function felonySlashAmount() public view returns (uint256)", // default 200BNB, valid: > max(100, downtimeSlashAmount)
"function downtimeJailTime() public view returns (uint256)", // default 2days,
"function felonyJailTime() public view returns (uint256)", // default 30days,
];
"function getValidators(uint256, uint256) external view returns(address[], address[], uint256)",
"function getNodeIDs(address[] validatorsToQuery) external view returns(address[], bytes32[][])",
];


const governorAbi = [
"function votingPeriod() public view returns (uint256)",
"function lateQuorumVoteExtension() public view returns (uint64)", // it represents minPeriodAfterQuorum
];

const timelockAbi = [
"function getMinDelay() public view returns (uint256)",
];

const validatorSet = new ethers.Contract(addrValidatorSet, validatorSetAbi, provider);
const slashIndicator = new ethers.Contract(addrSlash, slashAbi, provider);
const stakeHub = new ethers.Contract(addrStakeHub, stakeHubAbi, provider);
const governor = new ethers.Contract(addrGovernor, governorAbi, provider);
const timelock = new ethers.Contract(TimelockContract, timelockAbi, provider);

const validatorMap = new Map([
// BSC mainnet
Expand Down Expand Up @@ -279,7 +289,7 @@ async function getMaxTxCountInBlockRange() {
// node getchainstatus.js GetBinaryVersion \
// --rpc https://bsc-testnet-dataseed.bnbchain.org \
// --num(optional): default 21, the number of blocks that will be checked
// --turnLength(optional): default 4, the consecutive block length
// --turnLength(optional): default 8, the consecutive block length
async function getBinaryVersion() {
const blockNum = await provider.getBlockNumber();
let turnLength = program.turnLength;
Expand Down Expand Up @@ -408,7 +418,7 @@ async function getPerformanceData() {
let gasUsedTotal = 0;
let inturnBlocks = 0;
let justifiedBlocks = 0;
let turnLength = 4;
let turnLength = 8;
let lastTimestamp = null;
let parliaEnabled = true;

Expand Down Expand Up @@ -621,21 +631,40 @@ async function getKeyParameters() {
let validatorTable = [];
for (let i = 0; i < totalLength; i++) {
validatorTable.push({
addr: consensusAddrs[i],
consensusAddr: consensusAddrs[i],
votingPower: Number(votingPowers[i] / BigInt(10 ** 18)),
voteAddr: voteAddrs[i],
moniker: await getValidatorMoniker(consensusAddrs[i], blockNum),
});
}
validatorTable.sort((a, b) => b.votingPower - a.votingPower);
console.table(validatorTable);
// get EVN node ids
let validators = await stakeHub.getValidators(0, 1000, { blockTag: blockNum });
let operatorAddrs = validators[0];
let nodeIdss = await stakeHub.getNodeIDs(Array.from(operatorAddrs), { blockTag: blockNum });
let consensusAddrs2 = nodeIdss[0];
let nodeIdArr = nodeIdss[1];
for (let i = 0; i < consensusAddrs2.length; i++) {
let addr = consensusAddrs2[i];
let nodeId = nodeIdArr[i];
if (nodeId.length > 0) {
console.log("consensusAddr:", addr, "nodeId:", nodeId);
}
}


// part 4: governance
let votingPeriod = await governor.votingPeriod({ blockTag: blockNum });
let minPeriodAfterQuorum = await governor.lateQuorumVoteExtension({ blockTag: blockNum });
console.log("\n##==== GovernorContract: 0x0000000000000000000000000000000000002004")
console.log("\tvotingPeriod", Number(votingPeriod));
console.log("\tminPeriodAfterQuorum", Number(minPeriodAfterQuorum));

// part 5: timelock
let minDelay = await timelock.getMinDelay({ blockTag: blockNum });
console.log("\n##==== TimelockContract: 0x0000000000000000000000000000000000002006")
console.log("\tminDelay", Number(minDelay));
}

// 9.cmd: "getEip7623", usage:
Expand Down
7 changes: 2 additions & 5 deletions common/fdlimit/fdlimit_darwin.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,14 @@ const hardlimit = 10240
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
// Returns the size it was set to (may differ from the desired 'max')
func Raise(max uint64) (uint64, error) {
func Raise(maxVal uint64) (uint64, error) {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
if limit.Cur > max {
limit.Cur = max
}
limit.Cur = min(limit.Max, maxVal)
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
Expand Down
2 changes: 1 addition & 1 deletion consensus/parlia/parlia.go
Original file line number Diff line number Diff line change
Expand Up @@ -1668,7 +1668,7 @@ func (p *Parlia) Delay(chain consensus.ChainReader, header *types.Header, leftOv
// The blocking time should be no more than half of period when snap.TurnLength == 1
timeForMining := time.Duration(snap.BlockInterval) * time.Millisecond / 2
if !snap.lastBlockInOneTurn(header.Number.Uint64()) {
timeForMining = time.Duration(snap.BlockInterval) * time.Millisecond * 2 / 3
timeForMining = time.Duration(snap.BlockInterval) * time.Millisecond * 4 / 5
}
if delay > timeForMining {
delay = timeForMining
Expand Down
8 changes: 4 additions & 4 deletions core/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
if !disk {
db = rawdb.NewMemoryDatabase()
} else {
pdb, err := pebble.New(b.TempDir(), 128, 128, "", false, true)
pdb, err := pebble.New(b.TempDir(), 128, 128, "", false)
if err != nil {
b.Fatalf("cannot create temporary database: %v", err)
}
Expand Down Expand Up @@ -304,7 +304,7 @@ func makeChainForBench(db ethdb.Database, genesis *Genesis, full bool, count uin
func benchWriteChain(b *testing.B, full bool, count uint64) {
genesis := &Genesis{Config: params.AllEthashProtocolChanges}
for i := 0; i < b.N; i++ {
pdb, err := pebble.New(b.TempDir(), 1024, 128, "", false, true)
pdb, err := pebble.New(b.TempDir(), 1024, 128, "", false)
if err != nil {
b.Fatalf("error opening database: %v", err)
}
Expand All @@ -317,7 +317,7 @@ func benchWriteChain(b *testing.B, full bool, count uint64) {
func benchReadChain(b *testing.B, full bool, count uint64) {
dir := b.TempDir()

pdb, err := pebble.New(dir, 1024, 128, "", false, true)
pdb, err := pebble.New(dir, 1024, 128, "", false)
if err != nil {
b.Fatalf("error opening database: %v", err)
}
Expand All @@ -333,7 +333,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
b.ResetTimer()

for i := 0; i < b.N; i++ {
pdb, err = pebble.New(dir, 1024, 128, "", false, true)
pdb, err = pebble.New(dir, 1024, 128, "", false)
if err != nil {
b.Fatalf("error opening database: %v", err)
}
Expand Down
21 changes: 10 additions & 11 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1119,17 +1119,16 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// Ignore the error here since light client won't hit this path
frozen, _ := bc.db.BlockStore().Ancients()
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
if _, err := bc.db.BlockStore().TruncateHead(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
// Remove the hash <-> number mapping from the active store.
rawdb.DeleteHeaderNumber(db, hash)
// The chain segment, such as the block header, canonical hash,
// body, and receipt, will be removed from the ancient store
// in one go.
//
// The hash-to-number mapping in the key-value store will be
// removed by the hc.SetHead function.
} else {
// Remove relative body and receipts from the active store.
// The header, total difficulty and canonical hash will be
// removed in the hc.SetHead function.
// Remove the associated body and receipts from the key-value store.
// The header, hash-to-number mapping, and canonical hash will be
// removed by the hc.SetHead function.
rawdb.DeleteBody(db, hash, num)
rawdb.DeleteBlobSidecars(db, hash, num)
rawdb.DeleteReceipts(db, hash, num)
Expand Down Expand Up @@ -1599,7 +1598,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
size += writeSize

// Sync the ancient store explicitly to ensure all data has been flushed to disk.
if err := bc.db.BlockStore().Sync(); err != nil {
if err := bc.db.BlockStore().SyncAncient(); err != nil {
return 0, err
}
// Update the current snap block because all block data is now present in DB.
Expand Down
8 changes: 4 additions & 4 deletions core/blockchain_repair_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1767,7 +1767,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
datadir := t.TempDir()
ancient := filepath.Join(datadir, "ancient")

pdb, err := pebble.New(datadir, 0, 0, "", false, true)
pdb, err := pebble.New(datadir, 0, 0, "", false)
if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err)
}
Expand Down Expand Up @@ -1861,7 +1861,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
chain.stopWithoutSaving()

// Start a new blockchain back up and see where the repair leads us
pdb, err = pebble.New(datadir, 0, 0, "", false, true)
pdb, err = pebble.New(datadir, 0, 0, "", false)
if err != nil {
t.Fatalf("Failed to reopen persistent key-value database: %v", err)
}
Expand Down Expand Up @@ -1926,7 +1926,7 @@ func testIssue23496(t *testing.T, scheme string) {
datadir := t.TempDir()
ancient := filepath.Join(datadir, "ancient")

pdb, err := pebble.New(datadir, 0, 0, "", false, true)
pdb, err := pebble.New(datadir, 0, 0, "", false)
if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err)
}
Expand Down Expand Up @@ -1984,7 +1984,7 @@ func testIssue23496(t *testing.T, scheme string) {
chain.stopWithoutSaving()

// Start a new blockchain back up and see where the repair leads us
pdb, err = pebble.New(datadir, 0, 0, "", false, true)
pdb, err = pebble.New(datadir, 0, 0, "", false)
if err != nil {
t.Fatalf("Failed to reopen persistent key-value database: %v", err)
}
Expand Down
2 changes: 1 addition & 1 deletion core/blockchain_sethead_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1971,7 +1971,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
datadir := t.TempDir()
ancient := filepath.Join(datadir, "ancient")

pdb, err := pebble.New(datadir, 0, 0, "", false, true)
pdb, err := pebble.New(datadir, 0, 0, "", false)
if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err)
}
Expand Down
4 changes: 2 additions & 2 deletions core/blockchain_snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
datadir := t.TempDir()
ancient := filepath.Join(datadir, "ancient")

pdb, err := pebble.New(datadir, 0, 0, "", false, true)
pdb, err := pebble.New(datadir, 0, 0, "", false)
if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err)
}
Expand Down Expand Up @@ -257,7 +257,7 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
chain.triedb.Close()

// Start a new blockchain back up and see where the repair leads us
pdb, err := pebble.New(snaptest.datadir, 0, 0, "", false, true)
pdb, err := pebble.New(snaptest.datadir, 0, 0, "", false)
if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err)
}
Expand Down
2 changes: 1 addition & 1 deletion core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2670,7 +2670,7 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
datadir := t.TempDir()
ancient := path.Join(datadir, "ancient")

pdb, err := pebble.New(datadir, 0, 0, "", false, true)
pdb, err := pebble.New(datadir, 0, 0, "", false)
if err != nil {
t.Fatalf("Failed to create persistent key-value database: %v", err)
}
Expand Down
35 changes: 34 additions & 1 deletion core/headerchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -695,18 +695,51 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
hashes = append(hashes, hdr.Hash())
}
for _, hash := range hashes {
// Remove the associated block body and receipts if required.
//
// If the block is in the chain freezer, then this delete operation
// is actually ineffective.
if delFn != nil {
delFn(blockBatch, hash, num)
}
// Remove the hash->number mapping along with the header itself
rawdb.DeleteHeader(blockBatch, hash, num)
rawdb.DeleteTd(blockBatch, hash, num)
}
// Remove the number->hash mapping
rawdb.DeleteCanonicalHash(blockBatch, num)
}
}
// Flush all accumulated deletions.
if err := blockBatch.Write(); err != nil {
log.Crit("Failed to rewind block", "error", err)
log.Crit("Failed to commit batch in setHead", "err", err)
}
// Explicitly flush the pending writes in the key-value store to disk, ensuring
// data durability of the previous deletions.
if err := hc.chainDb.SyncKeyValue(); err != nil {
log.Crit("Failed to sync the key-value store in setHead", "err", err)
}
// Truncate the excessive chain segments in the ancient store.
// These are actually deferred deletions from the loop above.
//
// This step must be performed after synchronizing the key-value store;
// otherwise, in the event of a panic, it's theoretically possible to
// lose recent key-value store writes while the ancient store deletions
// remain, leading to data inconsistency, e.g., the gap between the key
// value store and ancient can be created due to unclean shutdown.
if delFn != nil {
// Ignore the error here since light client won't hit this path
frozen, _ := hc.chainDb.Ancients()
header := hc.CurrentHeader()

// Truncate the excessive chain segment above the current chain head
// in the ancient store.
if header.Number.Uint64()+1 < frozen {
_, err := hc.chainDb.BlockStore().TruncateHead(header.Number.Uint64() + 1)
if err != nil {
log.Crit("Failed to truncate head block", "err", err)
}
}
}
// Clear out any stale content from the caches
hc.headerCache.Purge()
Expand Down
2 changes: 1 addition & 1 deletion core/rawdb/chain_freezer.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
continue
}
// Batch of blocks have been frozen, flush them before wiping from key-value store
if err := f.Sync(); err != nil {
if err := f.SyncAncient(); err != nil {
log.Crit("Failed to flush frozen tables", "err", err)
}
// Wipe out all data from the active database
Expand Down
Loading
Loading