Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 25 additions & 9 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ func initNetwork(ctx *cli.Context) error {
staticConnect = true
}

configs, enodes, err := createConfigs(config, initDir, "node", ips, ports, sentryEnodes, connectOneExtraEnodes, staticConnect)
configs, enodes, accounts, err := createConfigs(config, initDir, "node", ips, ports, sentryEnodes, connectOneExtraEnodes, staticConnect)
if err != nil {
utils.Fatalf("Failed to create node configs: %v", err)
}
Expand All @@ -489,6 +489,11 @@ func initNetwork(ctx *cli.Context) error {
nodeIDs[i] = enodes[i].ID()
}
// add more feature configs
if enableSentryNode {
for i := 0; i < len(sentryConfigs); i++ {
sentryConfigs[i].Node.P2P.ProxyedValidatorAddresses = accounts[i]
}
}
if ctx.Bool(utils.InitEVNValidatorWhitelist.Name) {
for i := 0; i < size; i++ {
configs[i].Node.P2P.EVNNodeIdsWhitelist = nodeIDs
Expand All @@ -501,7 +506,10 @@ func initNetwork(ctx *cli.Context) error {
}
if enableSentryNode && ctx.Bool(utils.InitEVNSentryWhitelist.Name) {
for i := 0; i < len(sentryConfigs); i++ {
sentryConfigs[i].Node.P2P.EVNNodeIdsWhitelist = sentryNodeIDs
// whitelist all sentry nodes + proxyed validator NodeID
wlNodeIDs := []enode.ID{nodeIDs[i]}
wlNodeIDs = append(wlNodeIDs, sentryNodeIDs...)
sentryConfigs[i].Node.P2P.EVNNodeIdsWhitelist = wlNodeIDs
}
}
if enableSentryNode && ctx.Bool(utils.InitEVNSentryRegister.Name) {
Expand Down Expand Up @@ -555,8 +563,11 @@ func createSentryNodeConfigs(ctx *cli.Context, baseConfig gethConfig, initDir st
if err != nil {
utils.Fatalf("Failed to parse ports: %v", err)
}

return createConfigs(baseConfig, initDir, "sentry", ips, ports, nil, false, true)
configs, enodes, _, err := createConfigs(baseConfig, initDir, "sentry", ips, ports, nil, false, true)
if err != nil {
utils.Fatalf("Failed to create config: %v", err)
}
return configs, enodes, nil
}

func createAndSaveFullNodeConfigs(ctx *cli.Context, inGenesisFile *os.File, baseConfig gethConfig, initDir string, extraEnodes []*enode.Node) ([]gethConfig, []*enode.Node, error) {
Expand All @@ -575,7 +586,7 @@ func createAndSaveFullNodeConfigs(ctx *cli.Context, inGenesisFile *os.File, base
utils.Fatalf("Failed to parse ports: %v", err)
}

configs, enodes, err := createConfigs(baseConfig, initDir, "fullnode", ips, ports, extraEnodes, false, false)
configs, enodes, _, err := createConfigs(baseConfig, initDir, "fullnode", ips, ports, extraEnodes, false, false)
if err != nil {
utils.Fatalf("Failed to create config: %v", err)
}
Expand All @@ -590,19 +601,24 @@ func createAndSaveFullNodeConfigs(ctx *cli.Context, inGenesisFile *os.File, base
return configs, enodes, nil
}

func createConfigs(base gethConfig, initDir string, prefix string, ips []string, ports []int, extraEnodes []*enode.Node, connectOneExtraEnodes bool, staticConnect bool) ([]gethConfig, []*enode.Node, error) {
func createConfigs(base gethConfig, initDir string, prefix string, ips []string, ports []int, extraEnodes []*enode.Node, connectOneExtraEnodes bool, staticConnect bool) ([]gethConfig, []*enode.Node, [][]common.Address, error) {
if len(ips) != len(ports) {
return nil, nil, errors.New("mismatch of size and length of ports")
return nil, nil, nil, errors.New("mismatch of size and length of ports")
}
size := len(ips)
enodes := make([]*enode.Node, size)
accounts := make([][]common.Address, size)
for i := 0; i < size; i++ {
nodeConfig := base.Node
nodeConfig.DataDir = path.Join(initDir, fmt.Sprintf("%s%d", prefix, i))
stack, err := node.New(&nodeConfig)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
if err := setAccountManagerBackends(stack.Config(), stack.AccountManager(), stack.KeyStoreDir()); err != nil {
utils.Fatalf("Failed to set account manager backends: %v", err)
}
accounts[i] = stack.AccountManager().Accounts()
pk := stack.Config().NodeKey()
enodes[i] = enode.NewV4(&pk.PublicKey, net.ParseIP(ips[i]), ports[i], ports[i])
}
Expand All @@ -618,7 +634,7 @@ func createConfigs(base gethConfig, initDir string, prefix string, ips []string,
}
configs[i] = createNodeConfig(base, ips[i], ports[i], allEnodes, index, staticConnect)
}
return configs, enodes, nil
return configs, enodes, accounts, nil
}

func writeConfig(inGenesisFile *os.File, config gethConfig, dir string) error {
Expand Down
30 changes: 6 additions & 24 deletions common/bitutil/bitutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,7 @@ func XORBytes(dst, a, b []byte) int {
// fastXORBytes xors in bulk. It only works on architectures that support
// unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
Expand All @@ -49,10 +46,7 @@ func fastXORBytes(dst, a, b []byte) int {
// safeXORBytes xors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
for i := 0; i < n; i++ {
dst[i] = a[i] ^ b[i]
}
Expand All @@ -71,10 +65,7 @@ func ANDBytes(dst, a, b []byte) int {
// fastANDBytes ands in bulk. It only works on architectures that support
// unaligned read/writes.
func fastANDBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
Expand All @@ -93,10 +84,7 @@ func fastANDBytes(dst, a, b []byte) int {
// safeANDBytes ands one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeANDBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
for i := 0; i < n; i++ {
dst[i] = a[i] & b[i]
}
Expand All @@ -115,10 +103,7 @@ func ORBytes(dst, a, b []byte) int {
// fastORBytes ors in bulk. It only works on architectures that support
// unaligned read/writes.
func fastORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
Expand All @@ -137,10 +122,7 @@ func fastORBytes(dst, a, b []byte) int {
// safeORBytes ors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
for i := 0; i < n; i++ {
dst[i] = a[i] | b[i]
}
Expand Down
5 changes: 1 addition & 4 deletions common/hexutil/hexutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,10 +147,7 @@ func DecodeBig(input string) (*big.Int, error) {
words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw)
for i := range words {
start := end - bigWordNibbles
if start < 0 {
start = 0
}
start := max(end-bigWordNibbles, 0)
for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri])
if nib == badNibble {
Expand Down
5 changes: 1 addition & 4 deletions common/hexutil/json.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,10 +179,7 @@ func (b *Big) UnmarshalText(input []byte) error {
words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw)
for i := range words {
start := end - bigWordNibbles
if start < 0 {
start = 0
}
start := max(end-bigWordNibbles, 0)
for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri])
if nib == badNibble {
Expand Down
59 changes: 35 additions & 24 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,10 @@ const (
ChainData = "chaindata"
)

const (
MaxBlockHandleDelayMs = 3000 // max delay for block handles, max 3000 ms
)

var (
sendBlockTimer = metrics.NewRegisteredTimer("chain/delay/block/send", nil)
recvBlockTimer = metrics.NewRegisteredTimer("chain/delay/block/recv", nil)
Expand Down Expand Up @@ -373,22 +377,22 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Permit the downloader to use the trie cache allowance during fast sync
cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit
if eth.handler, err = newHandler(&handlerConfig{
NodeID: eth.p2pServer.Self().ID(),
Database: chainDb,
Chain: eth.blockchain,
TxPool: eth.txPool,
Network: networkID,
Sync: config.SyncMode,
BloomCache: uint64(cacheLimit),
EventMux: eth.eventMux,
RequiredBlocks: config.RequiredBlocks,
DirectBroadcast: config.DirectBroadcast,
EnableEVNFeatures: stack.Config().EnableEVNFeatures,
EVNNodeIdsWhitelist: stack.Config().P2P.EVNNodeIdsWhitelist,
ProxyedValidatorNodeIDs: stack.Config().P2P.ProxyedValidatorNodeIDs,
DisablePeerTxBroadcast: config.DisablePeerTxBroadcast,
PeerSet: peers,
EnableQuickBlockFetching: stack.Config().EnableQuickBlockFetching,
NodeID: eth.p2pServer.Self().ID(),
Database: chainDb,
Chain: eth.blockchain,
TxPool: eth.txPool,
Network: networkID,
Sync: config.SyncMode,
BloomCache: uint64(cacheLimit),
EventMux: eth.eventMux,
RequiredBlocks: config.RequiredBlocks,
DirectBroadcast: config.DirectBroadcast,
EnableEVNFeatures: stack.Config().EnableEVNFeatures,
EVNNodeIdsWhitelist: stack.Config().P2P.EVNNodeIdsWhitelist,
ProxyedValidatorAddresses: stack.Config().P2P.ProxyedValidatorAddresses,
DisablePeerTxBroadcast: config.DisablePeerTxBroadcast,
PeerSet: peers,
EnableQuickBlockFetching: stack.Config().EnableQuickBlockFetching,
}); err != nil {
return nil, err
}
Expand Down Expand Up @@ -901,32 +905,39 @@ func (s *Ethereum) reportRecentBlocksLoop() {
records["BlockTime"] = common.FormatMilliTime(blockMsTime)
metrics.GetOrRegisterLabel("report-blocks", nil).Mark(records)

if sendBlockTime > blockMsTime {
if validTimeMetric(blockMsTime, sendBlockTime) {
sendBlockTimer.Update(time.Duration(sendBlockTime - blockMsTime))
}
if recvNewBlockTime > blockMsTime {
if validTimeMetric(blockMsTime, recvNewBlockTime) {
recvBlockTimer.Update(time.Duration(recvNewBlockTime - blockMsTime))
}
if startImportBlockTime > blockMsTime {
if validTimeMetric(blockMsTime, startImportBlockTime) {
startInsertBlockTimer.Update(time.Duration(startImportBlockTime - blockMsTime))
}
if sendVoteTime > blockMsTime {
if validTimeMetric(blockMsTime, sendVoteTime) {
sendVoteTimer.Update(time.Duration(sendVoteTime - blockMsTime))
}
if firstVoteTime > blockMsTime {
if validTimeMetric(blockMsTime, firstVoteTime) {
firstVoteTimer.Update(time.Duration(firstVoteTime - blockMsTime))
}
if recvMajorityTime > blockMsTime {
if validTimeMetric(blockMsTime, recvMajorityTime) {
majorityVoteTimer.Update(time.Duration(recvMajorityTime - blockMsTime))
}
if importedBlockTime > blockMsTime {
if validTimeMetric(blockMsTime, importedBlockTime) {
importedBlockTimer.Update(time.Duration(importedBlockTime - blockMsTime))
}
if startMiningTime < blockMsTime {
if validTimeMetric(startMiningTime, blockMsTime) {
startMiningTimer.Update(time.Duration(blockMsTime - startMiningTime))
}
case <-s.stopCh:
return
}
}
}

func validTimeMetric(startMs, endMs int64) bool {
if startMs >= endMs {
return false
}
return endMs-startMs <= MaxBlockHandleDelayMs
}
Loading