Skip to content

Commit fdd50da

Browse files
committed
changing flag name
1 parent a5e03c3 commit fdd50da

File tree

4 files changed

+37
-37
lines changed

4 files changed

+37
-37
lines changed

multinode/node.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,8 @@ type node[
105105
ws *url.URL
106106
http *url.URL
107107

108-
rpc RPC
109-
isRPCProxy bool
108+
rpc RPC
109+
isLoadBalancedRPC bool
110110

111111
stateMu sync.RWMutex // protects state* fields
112112
state nodeState
@@ -137,7 +137,7 @@ func NewNode[
137137
nodeOrder int32,
138138
rpc RPC,
139139
chainFamily string,
140-
isRPCProxy bool,
140+
isLoadBalancedRPC bool,
141141
) Node[CHAIN_ID, RPC] {
142142
n := new(node[CHAIN_ID, HEAD, RPC])
143143
n.name = name
@@ -164,7 +164,7 @@ func NewNode[
164164
)
165165
n.lfcLog = logger.Named(lggr, "Lifecycle")
166166
n.rpc = rpc
167-
n.isRPCProxy = isRPCProxy
167+
n.isLoadBalancedRPC = isLoadBalancedRPC
168168
n.chainFamily = chainFamily
169169
return n
170170
}

multinode/node_lifecycle.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() {
128128
if pollFailureThreshold > 0 && pollFailures >= pollFailureThreshold {
129129
lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.getCachedState())
130130
if n.poolInfoProvider != nil {
131-
if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 && !n.isRPCProxy {
131+
if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 && !n.isLoadBalancedRPC {
132132
lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState)
133133
continue
134134
}
@@ -138,7 +138,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() {
138138
}
139139
if outOfSync, liveNodes := n.isOutOfSyncWithPool(); outOfSync {
140140
// note: there must be another live node for us to be out of sync
141-
if liveNodes < 2 && !n.isRPCProxy {
141+
if liveNodes < 2 && !n.isLoadBalancedRPC {
142142
lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState)
143143
continue
144144
}
@@ -166,7 +166,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() {
166166
if n.poolInfoProvider != nil {
167167
// if its the only node and its not a proxy, keep waiting for sync (check LatestChainInfo)
168168
// if its a proxy, then declare out of sync and try reconnecting because proxy might return a healthier rpc
169-
if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 && !n.isRPCProxy {
169+
if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 && !n.isLoadBalancedRPC {
170170
lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState)
171171
// We don't necessarily want to wait the full timeout to check again, we should
172172
// check regularly and log noisily in this state
@@ -194,7 +194,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() {
194194
if n.poolInfoProvider != nil {
195195
// if its the only node and its not a proxy, keep waiting for sync (check LatestChainInfo)
196196
// if its a proxy, then declare out of sync and try reconnecting because proxy might return a healthier rpc
197-
if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 && !n.isRPCProxy {
197+
if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 && !n.isLoadBalancedRPC {
198198
lggr.Criticalf("RPC's finalized state is out of sync; %s %s", msgCannotDisable, msgDegradedState)
199199
// We don't necessarily want to wait the full timeout to check again, we should
200200
// check regularly and log noisily in this state
@@ -460,7 +460,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(syncIssues syncStatus) {
460460
case <-time.After(zombieNodeCheckInterval(noNewHeadsTimeoutThreshold)):
461461
if n.poolInfoProvider != nil {
462462
if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 1 {
463-
if n.isRPCProxy {
463+
if n.isLoadBalancedRPC {
464464
n.declareUnreachable()
465465
return
466466
}

multinode/node_lifecycle_test.go

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -212,9 +212,9 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) {
212212
pollFailureThreshold: pollFailureThreshold,
213213
pollInterval: tests.TestInterval,
214214
},
215-
rpc: rpc,
216-
lggr: lggr,
217-
isRPCProxy: true,
215+
rpc: rpc,
216+
lggr: lggr,
217+
isLoadBalancedRPC: true,
218218
})
219219
defer func() { assert.NoError(t, node.close()) }()
220220
poolInfo := newMockPoolChainInfoProvider(t)
@@ -302,9 +302,9 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) {
302302
syncThreshold: syncThreshold,
303303
selectionMode: NodeSelectionModeRoundRobin,
304304
},
305-
rpc: rpc,
306-
lggr: lggr,
307-
isRPCProxy: true,
305+
rpc: rpc,
306+
lggr: lggr,
307+
isLoadBalancedRPC: true,
308308
})
309309
defer func() { assert.NoError(t, node.close()) }()
310310
rpc.On("ClientVersion", mock.Anything).Return("", nil)
@@ -407,8 +407,8 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) {
407407
chainConfig: clientMocks.ChainConfig{
408408
NoNewHeadsThresholdVal: tests.TestInterval,
409409
},
410-
rpc: rpc,
411-
isRPCProxy: true,
410+
rpc: rpc,
411+
isLoadBalancedRPC: true,
412412
})
413413
defer func() { assert.NoError(t, node.close()) }()
414414
poolInfo := newMockPoolChainInfoProvider(t)
@@ -660,9 +660,9 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) {
660660
NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeadsThreshold,
661661
IsFinalityTagEnabled: true,
662662
},
663-
rpc: rpc,
664-
lggr: lggr,
665-
isRPCProxy: true,
663+
rpc: rpc,
664+
lggr: lggr,
665+
isLoadBalancedRPC: true,
666666
})
667667
defer func() { assert.NoError(t, node.close()) }()
668668
poolInfo := newMockPoolChainInfoProvider(t)
@@ -1072,10 +1072,10 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) {
10721072
chainConfig: clientMocks.ChainConfig{
10731073
NoNewHeadsThresholdVal: tests.TestInterval,
10741074
},
1075-
rpc: rpc,
1076-
chainID: nodeChainID,
1077-
lggr: lggr,
1078-
isRPCProxy: true,
1075+
rpc: rpc,
1076+
chainID: nodeChainID,
1077+
lggr: lggr,
1078+
isLoadBalancedRPC: true,
10791079
})
10801080
defer func() { assert.NoError(t, node.close()) }()
10811081
poolInfo := newMockPoolChainInfoProvider(t)

multinode/node_test.go

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -71,18 +71,18 @@ type testNode struct {
7171
}
7272

7373
type testNodeOpts struct {
74-
config testNodeConfig
75-
chainConfig mocks.ChainConfig
76-
lggr logger.Logger
77-
wsuri *url.URL
78-
httpuri *url.URL
79-
name string
80-
id int
81-
chainID ID
82-
nodeOrder int32
83-
rpc *mockRPCClient[ID, Head]
84-
chainFamily string
85-
isRPCProxy bool
74+
config testNodeConfig
75+
chainConfig mocks.ChainConfig
76+
lggr logger.Logger
77+
wsuri *url.URL
78+
httpuri *url.URL
79+
name string
80+
id int
81+
chainID ID
82+
nodeOrder int32
83+
rpc *mockRPCClient[ID, Head]
84+
chainFamily string
85+
isLoadBalancedRPC bool
8686
}
8787

8888
func newTestNode(t *testing.T, opts testNodeOpts) testNode {
@@ -110,7 +110,7 @@ func newTestNode(t *testing.T, opts testNodeOpts) testNode {
110110
require.NoError(t, err)
111111

112112
nodeI := NewNode[ID, Head, RPCClient[ID, Head]](opts.config, opts.chainConfig, opts.lggr, nodeMetrics,
113-
opts.wsuri, opts.httpuri, opts.name, opts.id, opts.chainID, opts.nodeOrder, opts.rpc, opts.chainFamily, opts.isRPCProxy)
113+
opts.wsuri, opts.httpuri, opts.name, opts.id, opts.chainID, opts.nodeOrder, opts.rpc, opts.chainFamily, opts.isLoadBalancedRPC)
114114

115115
return testNode{
116116
nodeI.(*node[ID, Head, RPCClient[ID, Head]]),

0 commit comments

Comments
 (0)