From eea9d50a399b285163cf61d97ab88c276ef87d42 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 27 Nov 2024 12:05:55 -0500 Subject: [PATCH 01/11] Initial commit --- .github/CODEOWNERS | 6 + .github/pull_request_template.md | 39 + .gitignore | 118 + .mockery.yaml | 23 + LICENSE | 23 + Makefile | 9 + README.md | 13 +- go.mod | 45 + go.sum | 107 + multinode/README.md | 22 + multinode/ctx.go | 17 + multinode/ctx_test.go | 16 + multinode/mock_hashable_test.go | 18 + multinode/mock_head_test.go | 173 ++ multinode/mock_node_selector_test.go | 127 ++ multinode/mock_node_test.go | 563 +++++ .../mock_pool_chain_info_provider_test.go | 132 ++ multinode/mock_rpc_client_test.go | 508 +++++ multinode/mock_send_only_client_test.go | 171 ++ multinode/mock_send_only_node_test.go | 353 +++ multinode/mocks/config.go | 31 + multinode/models.go | 121 + multinode/models_test.go | 50 + multinode/multi_node.go | 364 +++ multinode/multi_node_test.go | 517 +++++ multinode/node.go | 336 +++ multinode/node_fsm.go | 377 ++++ multinode/node_fsm_test.go | 131 ++ multinode/node_lifecycle.go | 700 ++++++ multinode/node_lifecycle_test.go | 1983 +++++++++++++++++ multinode/node_selector.go | 43 + multinode/node_selector_highest_head.go | 40 + multinode/node_selector_highest_head_test.go | 176 ++ multinode/node_selector_priority_level.go | 123 + .../node_selector_priority_level_test.go | 91 + multinode/node_selector_round_robin.go | 48 + multinode/node_selector_round_robin_test.go | 61 + multinode/node_selector_test.go | 18 + multinode/node_selector_total_difficulty.go | 53 + .../node_selector_total_difficulty_test.go | 178 ++ multinode/node_test.go | 107 + multinode/poller.go | 95 + multinode/poller_test.go | 194 ++ multinode/send_only_node.go | 183 ++ multinode/send_only_node_lifecycle.go | 67 + multinode/send_only_node_test.go | 139 ++ multinode/transaction_sender.go | 284 +++ multinode/transaction_sender_test.go | 398 ++++ multinode/types.go | 83 + multinode/types_test.go | 34 + types/chain.go | 32 + types/hashable.go | 12 + types/head.go | 45 + types/mocks/head.go | 601 +++++ types/mocks/subscription.go | 111 + types/receipt.go | 14 + types/subscription.go | 16 + types/test_utils.go | 16 + utils/utils.go | 35 + 59 files changed, 10389 insertions(+), 1 deletion(-) create mode 100644 .github/CODEOWNERS create mode 100644 .github/pull_request_template.md create mode 100644 .gitignore create mode 100644 .mockery.yaml create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 go.mod create mode 100644 go.sum create mode 100644 multinode/README.md create mode 100644 multinode/ctx.go create mode 100644 multinode/ctx_test.go create mode 100644 multinode/mock_hashable_test.go create mode 100644 multinode/mock_head_test.go create mode 100644 multinode/mock_node_selector_test.go create mode 100644 multinode/mock_node_test.go create mode 100644 multinode/mock_pool_chain_info_provider_test.go create mode 100644 multinode/mock_rpc_client_test.go create mode 100644 multinode/mock_send_only_client_test.go create mode 100644 multinode/mock_send_only_node_test.go create mode 100644 multinode/mocks/config.go create mode 100644 multinode/models.go create mode 100644 multinode/models_test.go create mode 100644 multinode/multi_node.go create mode 100644 multinode/multi_node_test.go create mode 100644 multinode/node.go create mode 100644 multinode/node_fsm.go create mode 100644 multinode/node_fsm_test.go create mode 100644 multinode/node_lifecycle.go create mode 100644 multinode/node_lifecycle_test.go create mode 100644 multinode/node_selector.go create mode 100644 multinode/node_selector_highest_head.go create mode 100644 multinode/node_selector_highest_head_test.go create mode 100644 multinode/node_selector_priority_level.go create mode 100644 multinode/node_selector_priority_level_test.go create mode 100644 multinode/node_selector_round_robin.go create mode 100644 multinode/node_selector_round_robin_test.go create mode 100644 multinode/node_selector_test.go create mode 100644 multinode/node_selector_total_difficulty.go create mode 100644 multinode/node_selector_total_difficulty_test.go create mode 100644 multinode/node_test.go create mode 100644 multinode/poller.go create mode 100644 multinode/poller_test.go create mode 100644 multinode/send_only_node.go create mode 100644 multinode/send_only_node_lifecycle.go create mode 100644 multinode/send_only_node_test.go create mode 100644 multinode/transaction_sender.go create mode 100644 multinode/transaction_sender_test.go create mode 100644 multinode/types.go create mode 100644 multinode/types_test.go create mode 100644 types/chain.go create mode 100644 types/hashable.go create mode 100644 types/head.go create mode 100644 types/mocks/head.go create mode 100644 types/mocks/subscription.go create mode 100644 types/receipt.go create mode 100644 types/subscription.go create mode 100644 types/test_utils.go create mode 100644 utils/utils.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..ee652b6 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,6 @@ +# CODEOWNERS Best Practices +# 1. Per Github docs: "Order is important; the last matching pattern takes the most precedence." +# Please define less specific codeowner paths before more specific codeowner paths in order for the more specific rule to have priority + +# global ownership +* @smartcontractkit/bix-framework diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..1244140 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,39 @@ +### Description + + + +### Requires Dependencies + + +### Resolves Dependencies + \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e5ce014 --- /dev/null +++ b/.gitignore @@ -0,0 +1,118 @@ +# dependencies +node_modules/ +tmp/ +.pnp +.pnp.js +tools/bin/abigen + +/chainlink +core/chainlink + +# SQLite +tools/clroot/db.sqlite3-shm +tools/clroot/db.sqlite3-wal + +# Tooling caches +*.tsbuildinfo +.eslintcache + +# Log files +*.log + +# misc +.DS_Store +.envrc +.env* +.dbenv +!crib/.env.example +!.github/actions/setup-postgres/.env +.direnv +.idea +.vscode/ +*.iml +debug.env +*.txt +operator_ui/install +.devenv + +# codeship +*.aes +dockercfg +env +credentials.env +gcr_creds.env + +# DB backups + +cl_backup_*.tar.gz + +# Test artifacts +core/cmd/TestClient_ImportExportP2PKeyBundle_test_key.json +output.txt +race.* +golangci-lint-output.txt +/golangci-lint/ +.covdata +core/services/job/testdata/wasm/testmodule.wasm +core/services/job/testdata/wasm/testmodule.br + +# DB state +./db/ +.s.PGSQL.5432.lock + +# can be left behind by tests +core/cmd/vrfkey1 + +# Integration Tests +integration-tests/**/logs/ +tests-*.xml +*.test +tmp-manifest-*.yaml +ztarrepo.tar.gz +**/test-ledger/* +__debug_bin* +.test_summary/ +db_dumps/ +.run.id +integration-tests/**/traces/ +integration-tests/**/integration-tests +benchmark_report.csv +benchmark_summary.json +secrets.toml +tmp_laneconfig/ + +# goreleaser builds +cosign.* +dist/ +MacOSX* + +# Test & linter reports +*report.xml +*report.json +*.out +dot_graphs/ + +contracts/yarn.lock + +# Ignore DevSpace cache and log folder +.devspace/ +go.work* + +# This sometimes shows up for some reason +tools/flakeytests/coverage.txt + +# Fuzz tests can create these files +**/testdata/fuzz/* + +# Runtime test configuration that might contain secrets +override*.toml + +# Python venv +.venv/ + +ocr_soak_report.csv + +vendor/* + +*.wasm +contracts/lcov.info \ No newline at end of file diff --git a/.mockery.yaml b/.mockery.yaml new file mode 100644 index 0000000..c71f222 --- /dev/null +++ b/.mockery.yaml @@ -0,0 +1,23 @@ +dir: "{{ .InterfaceDir }}/mocks" +mockname: "{{ .InterfaceName }}" +outpkg: mocks +filename: "{{ .InterfaceName | snakecase }}.go" +packages: + github.com/smartcontractkit/chainlink-framework/multinode: + config: + dir: "{{ .InterfaceDir }}" + filename: "mock_{{ .InterfaceName | snakecase }}_test.go" + inpackage: true + mockname: "mock{{ .InterfaceName | camelcase }}" + interfaces: + Node: + NodeSelector: + sendOnlyClient: + SendOnlyNode: + RPCClient: + Head: + PoolChainInfoProvider: + github.com/smartcontractkit/chainlink-framework/types: + interfaces: + Head: + Subscription: diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..e6bed04 --- /dev/null +++ b/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2018 SmartContract ChainLink Limited SEZC + +Portions of this software are licensed as follows: + +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..217accd --- /dev/null +++ b/Makefile @@ -0,0 +1,9 @@ + +.PHONY: mockery +mockery: $(mockery) ## Install mockery. + go install github.com/vektra/mockery/v2@v2.46.3 + +.PHONY: rm-mocked +rm-mocked: + grep -rl "^// Code generated by mockery" | grep .go$ | xargs -r rm + diff --git a/README.md b/README.md index fa922ce..221a692 100644 --- a/README.md +++ b/README.md @@ -1 +1,12 @@ -# chainlink-framework \ No newline at end of file +# chainlink-framework + +This repo contains common components created and maintained by the Blockchain Integrations Framework team. +These components are used across EVM and non-EVM chain integrations. + +## Components + +### MultiNode +Enables the use of multiple RPCs in chain integrations. Performs critical health checks, +load balancing, node metrics, and is used to send transactions to all RPCs and aggregate results. +MultiNode is used by all other components which require reading from or writing to the chain. + diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..e30ee48 --- /dev/null +++ b/go.mod @@ -0,0 +1,45 @@ +module github.com/smartcontractkit/chainlink-framework + +go 1.23.3 + +require ( + github.com/cometbft/cometbft v0.38.15 + github.com/jpillora/backoff v1.0.0 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_model v0.6.1 + github.com/smartcontractkit/chainlink-common v0.3.1-0.20241127162636-07aa781ee1f4 + github.com/stretchr/testify v1.10.0 + go.uber.org/zap v1.27.0 + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/sasha-s/go-deadlock v0.3.5 // indirect + github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 // indirect + github.com/stretchr/objx v0.5.2 // indirect + go.opentelemetry.io/otel v1.30.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/trace v1.30.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/sys v0.26.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..c9289ca --- /dev/null +++ b/go.sum @@ -0,0 +1,107 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cometbft/cometbft v0.38.15 h1:5veFd8k1uXM27PBg9sMO3hAfRJ3vbh4OmmLf6cVrqXg= +github.com/cometbft/cometbft v0.38.15/go.mod h1:+wh6ap6xctVG+JOHwbl8pPKZ0GeqdPYqISu7F4b43cQ= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= +github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241127162636-07aa781ee1f4 h1:atCZ1jol7a+tdtgU/wNqXgliBun5H7BjGBicGL8Tj6o= +github.com/smartcontractkit/chainlink-common v0.3.1-0.20241127162636-07aa781ee1f4/go.mod h1:bQktEJf7sJ0U3SmIcXvbGUox7SmXcnSEZ4kUbT8R5Nk= +github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12 h1:NzZGjaqez21I3DU7objl3xExTH4fxYvzTqar8DC6360= +github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12/go.mod h1:fb1ZDVXACvu4frX3APHZaEBp0xi1DIm34DcA0CwTsZM= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/multinode/README.md b/multinode/README.md new file mode 100644 index 0000000..12aef2d --- /dev/null +++ b/multinode/README.md @@ -0,0 +1,22 @@ +# MultiNode + +Enables the use of multiple RPCs in chain integrations. Performs critical health checks, +RPC selection, node metrics, and is used to send transactions to all RPCs and aggregate results. +MultiNode is used by all other components which require reading from or writing to the chain. + +## Components + +### RPCClient +Interface for wrapping an RPC of any chain type. Required for integrating a new chain with MultiNode. + +### Node +Wrapper of an RPCClient with state and lifecycles to handle health of an individual RPC. + +### MultiNode +Manages all nodes performing node selection and load balancing, health checks and metrics, and running actions across all nodes. + +### Poller +Used to poll for new heads and finalized heads within subscriptions. + +### Transaction Sender +Used to send transactions to all healthy RPCs and aggregate the results. \ No newline at end of file diff --git a/multinode/ctx.go b/multinode/ctx.go new file mode 100644 index 0000000..57b2fc8 --- /dev/null +++ b/multinode/ctx.go @@ -0,0 +1,17 @@ +package client + +import "context" + +type multiNodeContextKey int + +const ( + contextKeyHeathCheckRequest multiNodeContextKey = iota + 1 +) + +func CtxAddHealthCheckFlag(ctx context.Context) context.Context { + return context.WithValue(ctx, contextKeyHeathCheckRequest, struct{}{}) +} + +func CtxIsHeathCheckRequest(ctx context.Context) bool { + return ctx.Value(contextKeyHeathCheckRequest) != nil +} diff --git a/multinode/ctx_test.go b/multinode/ctx_test.go new file mode 100644 index 0000000..822b36c --- /dev/null +++ b/multinode/ctx_test.go @@ -0,0 +1,16 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" +) + +func TestContext(t *testing.T) { + ctx := tests.Context(t) + assert.False(t, CtxIsHeathCheckRequest(ctx), "expected false for test context") + ctx = CtxAddHealthCheckFlag(ctx) + assert.True(t, CtxIsHeathCheckRequest(ctx), "expected context to contain the healthcheck flag") +} diff --git a/multinode/mock_hashable_test.go b/multinode/mock_hashable_test.go new file mode 100644 index 0000000..d9f1670 --- /dev/null +++ b/multinode/mock_hashable_test.go @@ -0,0 +1,18 @@ +package client + +import "cmp" + +// Hashable - simple implementation of types.Hashable interface to be used as concrete type in tests +type Hashable string + +func (h Hashable) Cmp(c Hashable) int { + return cmp.Compare(h, c) +} + +func (h Hashable) String() string { + return string(h) +} + +func (h Hashable) Bytes() []byte { + return []byte(h) +} diff --git a/multinode/mock_head_test.go b/multinode/mock_head_test.go new file mode 100644 index 0000000..f75bb34 --- /dev/null +++ b/multinode/mock_head_test.go @@ -0,0 +1,173 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package client + +import ( + big "math/big" + + mock "github.com/stretchr/testify/mock" +) + +// mockHead is an autogenerated mock type for the Head type +type mockHead struct { + mock.Mock +} + +type mockHead_Expecter struct { + mock *mock.Mock +} + +func (_m *mockHead) EXPECT() *mockHead_Expecter { + return &mockHead_Expecter{mock: &_m.Mock} +} + +// BlockDifficulty provides a mock function with given fields: +func (_m *mockHead) BlockDifficulty() *big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockDifficulty") + } + + var r0 *big.Int + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// mockHead_BlockDifficulty_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockDifficulty' +type mockHead_BlockDifficulty_Call struct { + *mock.Call +} + +// BlockDifficulty is a helper method to define mock.On call +func (_e *mockHead_Expecter) BlockDifficulty() *mockHead_BlockDifficulty_Call { + return &mockHead_BlockDifficulty_Call{Call: _e.mock.On("BlockDifficulty")} +} + +func (_c *mockHead_BlockDifficulty_Call) Run(run func()) *mockHead_BlockDifficulty_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockHead_BlockDifficulty_Call) Return(_a0 *big.Int) *mockHead_BlockDifficulty_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockHead_BlockDifficulty_Call) RunAndReturn(run func() *big.Int) *mockHead_BlockDifficulty_Call { + _c.Call.Return(run) + return _c +} + +// BlockNumber provides a mock function with given fields: +func (_m *mockHead) BlockNumber() int64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// mockHead_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type mockHead_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +func (_e *mockHead_Expecter) BlockNumber() *mockHead_BlockNumber_Call { + return &mockHead_BlockNumber_Call{Call: _e.mock.On("BlockNumber")} +} + +func (_c *mockHead_BlockNumber_Call) Run(run func()) *mockHead_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockHead_BlockNumber_Call) Return(_a0 int64) *mockHead_BlockNumber_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockHead_BlockNumber_Call) RunAndReturn(run func() int64) *mockHead_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// IsValid provides a mock function with given fields: +func (_m *mockHead) IsValid() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsValid") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// mockHead_IsValid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsValid' +type mockHead_IsValid_Call struct { + *mock.Call +} + +// IsValid is a helper method to define mock.On call +func (_e *mockHead_Expecter) IsValid() *mockHead_IsValid_Call { + return &mockHead_IsValid_Call{Call: _e.mock.On("IsValid")} +} + +func (_c *mockHead_IsValid_Call) Run(run func()) *mockHead_IsValid_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockHead_IsValid_Call) Return(_a0 bool) *mockHead_IsValid_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockHead_IsValid_Call) RunAndReturn(run func() bool) *mockHead_IsValid_Call { + _c.Call.Return(run) + return _c +} + +// newMockHead creates a new instance of mockHead. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockHead(t interface { + mock.TestingT + Cleanup(func()) +}) *mockHead { + mock := &mockHead{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multinode/mock_node_selector_test.go b/multinode/mock_node_selector_test.go new file mode 100644 index 0000000..71b3b53 --- /dev/null +++ b/multinode/mock_node_selector_test.go @@ -0,0 +1,127 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package client + +import ( + types "github.com/smartcontractkit/chainlink-framework/types" + mock "github.com/stretchr/testify/mock" +) + +// mockNodeSelector is an autogenerated mock type for the NodeSelector type +type mockNodeSelector[CHAIN_ID types.ID, RPC any] struct { + mock.Mock +} + +type mockNodeSelector_Expecter[CHAIN_ID types.ID, RPC any] struct { + mock *mock.Mock +} + +func (_m *mockNodeSelector[CHAIN_ID, RPC]) EXPECT() *mockNodeSelector_Expecter[CHAIN_ID, RPC] { + return &mockNodeSelector_Expecter[CHAIN_ID, RPC]{mock: &_m.Mock} +} + +// Name provides a mock function with given fields: +func (_m *mockNodeSelector[CHAIN_ID, RPC]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// mockNodeSelector_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' +type mockNodeSelector_Name_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// Name is a helper method to define mock.On call +func (_e *mockNodeSelector_Expecter[CHAIN_ID, RPC]) Name() *mockNodeSelector_Name_Call[CHAIN_ID, RPC] { + return &mockNodeSelector_Name_Call[CHAIN_ID, RPC]{Call: _e.mock.On("Name")} +} + +func (_c *mockNodeSelector_Name_Call[CHAIN_ID, RPC]) Run(run func()) *mockNodeSelector_Name_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNodeSelector_Name_Call[CHAIN_ID, RPC]) Return(_a0 string) *mockNodeSelector_Name_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNodeSelector_Name_Call[CHAIN_ID, RPC]) RunAndReturn(run func() string) *mockNodeSelector_Name_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// Select provides a mock function with given fields: +func (_m *mockNodeSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Select") + } + + var r0 Node[CHAIN_ID, RPC] + if rf, ok := ret.Get(0).(func() Node[CHAIN_ID, RPC]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(Node[CHAIN_ID, RPC]) + } + } + + return r0 +} + +// mockNodeSelector_Select_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Select' +type mockNodeSelector_Select_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// Select is a helper method to define mock.On call +func (_e *mockNodeSelector_Expecter[CHAIN_ID, RPC]) Select() *mockNodeSelector_Select_Call[CHAIN_ID, RPC] { + return &mockNodeSelector_Select_Call[CHAIN_ID, RPC]{Call: _e.mock.On("Select")} +} + +func (_c *mockNodeSelector_Select_Call[CHAIN_ID, RPC]) Run(run func()) *mockNodeSelector_Select_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNodeSelector_Select_Call[CHAIN_ID, RPC]) Return(_a0 Node[CHAIN_ID, RPC]) *mockNodeSelector_Select_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNodeSelector_Select_Call[CHAIN_ID, RPC]) RunAndReturn(run func() Node[CHAIN_ID, RPC]) *mockNodeSelector_Select_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// newMockNodeSelector creates a new instance of mockNodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockNodeSelector[CHAIN_ID types.ID, RPC any](t interface { + mock.TestingT + Cleanup(func()) +}) *mockNodeSelector[CHAIN_ID, RPC] { + mock := &mockNodeSelector[CHAIN_ID, RPC]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multinode/mock_node_test.go b/multinode/mock_node_test.go new file mode 100644 index 0000000..87a0194 --- /dev/null +++ b/multinode/mock_node_test.go @@ -0,0 +1,563 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package client + +import ( + context "context" + + types "github.com/smartcontractkit/chainlink-framework/types" + mock "github.com/stretchr/testify/mock" +) + +// mockNode is an autogenerated mock type for the Node type +type mockNode[CHAIN_ID types.ID, RPC any] struct { + mock.Mock +} + +type mockNode_Expecter[CHAIN_ID types.ID, RPC any] struct { + mock *mock.Mock +} + +func (_m *mockNode[CHAIN_ID, RPC]) EXPECT() *mockNode_Expecter[CHAIN_ID, RPC] { + return &mockNode_Expecter[CHAIN_ID, RPC]{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockNode_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type mockNode_Close_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) Close() *mockNode_Close_Call[CHAIN_ID, RPC] { + return &mockNode_Close_Call[CHAIN_ID, RPC]{Call: _e.mock.On("Close")} +} + +func (_c *mockNode_Close_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_Close_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_Close_Call[CHAIN_ID, RPC]) Return(_a0 error) *mockNode_Close_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNode_Close_Call[CHAIN_ID, RPC]) RunAndReturn(run func() error) *mockNode_Close_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// ConfiguredChainID provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ConfiguredChainID") + } + + var r0 CHAIN_ID + if rf, ok := ret.Get(0).(func() CHAIN_ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + return r0 +} + +// mockNode_ConfiguredChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ConfiguredChainID' +type mockNode_ConfiguredChainID_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// ConfiguredChainID is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) ConfiguredChainID() *mockNode_ConfiguredChainID_Call[CHAIN_ID, RPC] { + return &mockNode_ConfiguredChainID_Call[CHAIN_ID, RPC]{Call: _e.mock.On("ConfiguredChainID")} +} + +func (_c *mockNode_ConfiguredChainID_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_ConfiguredChainID_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_ConfiguredChainID_Call[CHAIN_ID, RPC]) Return(_a0 CHAIN_ID) *mockNode_ConfiguredChainID_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNode_ConfiguredChainID_Call[CHAIN_ID, RPC]) RunAndReturn(run func() CHAIN_ID) *mockNode_ConfiguredChainID_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// HighestUserObservations provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) HighestUserObservations() ChainInfo { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestUserObservations") + } + + var r0 ChainInfo + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + return r0 +} + +// mockNode_HighestUserObservations_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HighestUserObservations' +type mockNode_HighestUserObservations_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// HighestUserObservations is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) HighestUserObservations() *mockNode_HighestUserObservations_Call[CHAIN_ID, RPC] { + return &mockNode_HighestUserObservations_Call[CHAIN_ID, RPC]{Call: _e.mock.On("HighestUserObservations")} +} + +func (_c *mockNode_HighestUserObservations_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_HighestUserObservations_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_HighestUserObservations_Call[CHAIN_ID, RPC]) Return(_a0 ChainInfo) *mockNode_HighestUserObservations_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNode_HighestUserObservations_Call[CHAIN_ID, RPC]) RunAndReturn(run func() ChainInfo) *mockNode_HighestUserObservations_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// Name provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// mockNode_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' +type mockNode_Name_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// Name is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) Name() *mockNode_Name_Call[CHAIN_ID, RPC] { + return &mockNode_Name_Call[CHAIN_ID, RPC]{Call: _e.mock.On("Name")} +} + +func (_c *mockNode_Name_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_Name_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_Name_Call[CHAIN_ID, RPC]) Return(_a0 string) *mockNode_Name_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNode_Name_Call[CHAIN_ID, RPC]) RunAndReturn(run func() string) *mockNode_Name_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// Order provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) Order() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Order") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// mockNode_Order_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Order' +type mockNode_Order_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// Order is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) Order() *mockNode_Order_Call[CHAIN_ID, RPC] { + return &mockNode_Order_Call[CHAIN_ID, RPC]{Call: _e.mock.On("Order")} +} + +func (_c *mockNode_Order_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_Order_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_Order_Call[CHAIN_ID, RPC]) Return(_a0 int32) *mockNode_Order_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNode_Order_Call[CHAIN_ID, RPC]) RunAndReturn(run func() int32) *mockNode_Order_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// RPC provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) RPC() RPC { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RPC") + } + + var r0 RPC + if rf, ok := ret.Get(0).(func() RPC); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(RPC) + } + + return r0 +} + +// mockNode_RPC_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RPC' +type mockNode_RPC_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// RPC is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) RPC() *mockNode_RPC_Call[CHAIN_ID, RPC] { + return &mockNode_RPC_Call[CHAIN_ID, RPC]{Call: _e.mock.On("RPC")} +} + +func (_c *mockNode_RPC_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_RPC_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_RPC_Call[CHAIN_ID, RPC]) Return(_a0 RPC) *mockNode_RPC_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNode_RPC_Call[CHAIN_ID, RPC]) RunAndReturn(run func() RPC) *mockNode_RPC_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// SetPoolChainInfoProvider provides a mock function with given fields: _a0 +func (_m *mockNode[CHAIN_ID, RPC]) SetPoolChainInfoProvider(_a0 PoolChainInfoProvider) { + _m.Called(_a0) +} + +// mockNode_SetPoolChainInfoProvider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetPoolChainInfoProvider' +type mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// SetPoolChainInfoProvider is a helper method to define mock.On call +// - _a0 PoolChainInfoProvider +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) SetPoolChainInfoProvider(_a0 interface{}) *mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID, RPC] { + return &mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID, RPC]{Call: _e.mock.On("SetPoolChainInfoProvider", _a0)} +} + +func (_c *mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID, RPC]) Run(run func(_a0 PoolChainInfoProvider)) *mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(PoolChainInfoProvider)) + }) + return _c +} + +func (_c *mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID, RPC]) Return() *mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID, RPC] { + _c.Call.Return() + return _c +} + +func (_c *mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID, RPC]) RunAndReturn(run func(PoolChainInfoProvider)) *mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: _a0 +func (_m *mockNode[CHAIN_ID, RPC]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockNode_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type mockNode_Start_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - _a0 context.Context +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) Start(_a0 interface{}) *mockNode_Start_Call[CHAIN_ID, RPC] { + return &mockNode_Start_Call[CHAIN_ID, RPC]{Call: _e.mock.On("Start", _a0)} +} + +func (_c *mockNode_Start_Call[CHAIN_ID, RPC]) Run(run func(_a0 context.Context)) *mockNode_Start_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockNode_Start_Call[CHAIN_ID, RPC]) Return(_a0 error) *mockNode_Start_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNode_Start_Call[CHAIN_ID, RPC]) RunAndReturn(run func(context.Context) error) *mockNode_Start_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// State provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) State() nodeState { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for State") + } + + var r0 nodeState + if rf, ok := ret.Get(0).(func() nodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(nodeState) + } + + return r0 +} + +// mockNode_State_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'State' +type mockNode_State_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// State is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) State() *mockNode_State_Call[CHAIN_ID, RPC] { + return &mockNode_State_Call[CHAIN_ID, RPC]{Call: _e.mock.On("State")} +} + +func (_c *mockNode_State_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_State_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_State_Call[CHAIN_ID, RPC]) Return(_a0 nodeState) *mockNode_State_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNode_State_Call[CHAIN_ID, RPC]) RunAndReturn(run func() nodeState) *mockNode_State_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// StateAndLatest provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) StateAndLatest() (nodeState, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StateAndLatest") + } + + var r0 nodeState + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (nodeState, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() nodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(nodeState) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + +// mockNode_StateAndLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StateAndLatest' +type mockNode_StateAndLatest_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// StateAndLatest is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) StateAndLatest() *mockNode_StateAndLatest_Call[CHAIN_ID, RPC] { + return &mockNode_StateAndLatest_Call[CHAIN_ID, RPC]{Call: _e.mock.On("StateAndLatest")} +} + +func (_c *mockNode_StateAndLatest_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_StateAndLatest_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_StateAndLatest_Call[CHAIN_ID, RPC]) Return(_a0 nodeState, _a1 ChainInfo) *mockNode_StateAndLatest_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *mockNode_StateAndLatest_Call[CHAIN_ID, RPC]) RunAndReturn(run func() (nodeState, ChainInfo)) *mockNode_StateAndLatest_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// String provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// mockNode_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' +type mockNode_String_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// String is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) String() *mockNode_String_Call[CHAIN_ID, RPC] { + return &mockNode_String_Call[CHAIN_ID, RPC]{Call: _e.mock.On("String")} +} + +func (_c *mockNode_String_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_String_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_String_Call[CHAIN_ID, RPC]) Return(_a0 string) *mockNode_String_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockNode_String_Call[CHAIN_ID, RPC]) RunAndReturn(run func() string) *mockNode_String_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) UnsubscribeAllExceptAliveLoop() { + _m.Called() +} + +// mockNode_UnsubscribeAllExceptAliveLoop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnsubscribeAllExceptAliveLoop' +type mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// UnsubscribeAllExceptAliveLoop is a helper method to define mock.On call +func (_e *mockNode_Expecter[CHAIN_ID, RPC]) UnsubscribeAllExceptAliveLoop() *mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID, RPC] { + return &mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID, RPC]{Call: _e.mock.On("UnsubscribeAllExceptAliveLoop")} +} + +func (_c *mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID, RPC]) Run(run func()) *mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID, RPC]) Return() *mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID, RPC] { + _c.Call.Return() + return _c +} + +func (_c *mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID, RPC]) RunAndReturn(run func()) *mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// newMockNode creates a new instance of mockNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockNode[CHAIN_ID types.ID, RPC any](t interface { + mock.TestingT + Cleanup(func()) +}) *mockNode[CHAIN_ID, RPC] { + mock := &mockNode[CHAIN_ID, RPC]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multinode/mock_pool_chain_info_provider_test.go b/multinode/mock_pool_chain_info_provider_test.go new file mode 100644 index 0000000..c44f10b --- /dev/null +++ b/multinode/mock_pool_chain_info_provider_test.go @@ -0,0 +1,132 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package client + +import mock "github.com/stretchr/testify/mock" + +// mockPoolChainInfoProvider is an autogenerated mock type for the PoolChainInfoProvider type +type mockPoolChainInfoProvider struct { + mock.Mock +} + +type mockPoolChainInfoProvider_Expecter struct { + mock *mock.Mock +} + +func (_m *mockPoolChainInfoProvider) EXPECT() *mockPoolChainInfoProvider_Expecter { + return &mockPoolChainInfoProvider_Expecter{mock: &_m.Mock} +} + +// HighestUserObservations provides a mock function with given fields: +func (_m *mockPoolChainInfoProvider) HighestUserObservations() ChainInfo { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestUserObservations") + } + + var r0 ChainInfo + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + return r0 +} + +// mockPoolChainInfoProvider_HighestUserObservations_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HighestUserObservations' +type mockPoolChainInfoProvider_HighestUserObservations_Call struct { + *mock.Call +} + +// HighestUserObservations is a helper method to define mock.On call +func (_e *mockPoolChainInfoProvider_Expecter) HighestUserObservations() *mockPoolChainInfoProvider_HighestUserObservations_Call { + return &mockPoolChainInfoProvider_HighestUserObservations_Call{Call: _e.mock.On("HighestUserObservations")} +} + +func (_c *mockPoolChainInfoProvider_HighestUserObservations_Call) Run(run func()) *mockPoolChainInfoProvider_HighestUserObservations_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockPoolChainInfoProvider_HighestUserObservations_Call) Return(_a0 ChainInfo) *mockPoolChainInfoProvider_HighestUserObservations_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockPoolChainInfoProvider_HighestUserObservations_Call) RunAndReturn(run func() ChainInfo) *mockPoolChainInfoProvider_HighestUserObservations_Call { + _c.Call.Return(run) + return _c +} + +// LatestChainInfo provides a mock function with given fields: +func (_m *mockPoolChainInfoProvider) LatestChainInfo() (int, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestChainInfo") + } + + var r0 int + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (int, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + +// mockPoolChainInfoProvider_LatestChainInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestChainInfo' +type mockPoolChainInfoProvider_LatestChainInfo_Call struct { + *mock.Call +} + +// LatestChainInfo is a helper method to define mock.On call +func (_e *mockPoolChainInfoProvider_Expecter) LatestChainInfo() *mockPoolChainInfoProvider_LatestChainInfo_Call { + return &mockPoolChainInfoProvider_LatestChainInfo_Call{Call: _e.mock.On("LatestChainInfo")} +} + +func (_c *mockPoolChainInfoProvider_LatestChainInfo_Call) Run(run func()) *mockPoolChainInfoProvider_LatestChainInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockPoolChainInfoProvider_LatestChainInfo_Call) Return(_a0 int, _a1 ChainInfo) *mockPoolChainInfoProvider_LatestChainInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *mockPoolChainInfoProvider_LatestChainInfo_Call) RunAndReturn(run func() (int, ChainInfo)) *mockPoolChainInfoProvider_LatestChainInfo_Call { + _c.Call.Return(run) + return _c +} + +// newMockPoolChainInfoProvider creates a new instance of mockPoolChainInfoProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockPoolChainInfoProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *mockPoolChainInfoProvider { + mock := &mockPoolChainInfoProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multinode/mock_rpc_client_test.go b/multinode/mock_rpc_client_test.go new file mode 100644 index 0000000..6168480 --- /dev/null +++ b/multinode/mock_rpc_client_test.go @@ -0,0 +1,508 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package client + +import ( + context "context" + + types "github.com/smartcontractkit/chainlink-framework/types" + mock "github.com/stretchr/testify/mock" +) + +// mockRPCClient is an autogenerated mock type for the RPCClient type +type mockRPCClient[CHAIN_ID types.ID, HEAD Head] struct { + mock.Mock +} + +type mockRPCClient_Expecter[CHAIN_ID types.ID, HEAD Head] struct { + mock *mock.Mock +} + +func (_m *mockRPCClient[CHAIN_ID, HEAD]) EXPECT() *mockRPCClient_Expecter[CHAIN_ID, HEAD] { + return &mockRPCClient_Expecter[CHAIN_ID, HEAD]{mock: &_m.Mock} +} + +// ChainID provides a mock function with given fields: ctx +func (_m *mockRPCClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 CHAIN_ID + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockRPCClient_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type mockRPCClient_ChainID_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// ChainID is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) ChainID(ctx interface{}) *mockRPCClient_ChainID_Call[CHAIN_ID, HEAD] { + return &mockRPCClient_ChainID_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("ChainID", ctx)} +} + +func (_c *mockRPCClient_ChainID_Call[CHAIN_ID, HEAD]) Run(run func(ctx context.Context)) *mockRPCClient_ChainID_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPCClient_ChainID_Call[CHAIN_ID, HEAD]) Return(_a0 CHAIN_ID, _a1 error) *mockRPCClient_ChainID_Call[CHAIN_ID, HEAD] { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *mockRPCClient_ChainID_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (CHAIN_ID, error)) *mockRPCClient_ChainID_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: +func (_m *mockRPCClient[CHAIN_ID, HEAD]) Close() { + _m.Called() +} + +// mockRPCClient_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type mockRPCClient_Close_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) Close() *mockRPCClient_Close_Call[CHAIN_ID, HEAD] { + return &mockRPCClient_Close_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("Close")} +} + +func (_c *mockRPCClient_Close_Call[CHAIN_ID, HEAD]) Run(run func()) *mockRPCClient_Close_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockRPCClient_Close_Call[CHAIN_ID, HEAD]) Return() *mockRPCClient_Close_Call[CHAIN_ID, HEAD] { + _c.Call.Return() + return _c +} + +func (_c *mockRPCClient_Close_Call[CHAIN_ID, HEAD]) RunAndReturn(run func()) *mockRPCClient_Close_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// Dial provides a mock function with given fields: ctx +func (_m *mockRPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Dial") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockRPCClient_Dial_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Dial' +type mockRPCClient_Dial_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// Dial is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) Dial(ctx interface{}) *mockRPCClient_Dial_Call[CHAIN_ID, HEAD] { + return &mockRPCClient_Dial_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("Dial", ctx)} +} + +func (_c *mockRPCClient_Dial_Call[CHAIN_ID, HEAD]) Run(run func(ctx context.Context)) *mockRPCClient_Dial_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPCClient_Dial_Call[CHAIN_ID, HEAD]) Return(_a0 error) *mockRPCClient_Dial_Call[CHAIN_ID, HEAD] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockRPCClient_Dial_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) error) *mockRPCClient_Dial_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// GetInterceptedChainInfo provides a mock function with given fields: +func (_m *mockRPCClient[CHAIN_ID, HEAD]) GetInterceptedChainInfo() (ChainInfo, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetInterceptedChainInfo") + } + + var r0 ChainInfo + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (ChainInfo, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + +// mockRPCClient_GetInterceptedChainInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInterceptedChainInfo' +type mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// GetInterceptedChainInfo is a helper method to define mock.On call +func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) GetInterceptedChainInfo() *mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID, HEAD] { + return &mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("GetInterceptedChainInfo")} +} + +func (_c *mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID, HEAD]) Run(run func()) *mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID, HEAD]) Return(latest ChainInfo, highestUserObservations ChainInfo) *mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID, HEAD] { + _c.Call.Return(latest, highestUserObservations) + return _c +} + +func (_c *mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID, HEAD]) RunAndReturn(run func() (ChainInfo, ChainInfo)) *mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// IsSyncing provides a mock function with given fields: ctx +func (_m *mockRPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for IsSyncing") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockRPCClient_IsSyncing_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsSyncing' +type mockRPCClient_IsSyncing_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// IsSyncing is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) IsSyncing(ctx interface{}) *mockRPCClient_IsSyncing_Call[CHAIN_ID, HEAD] { + return &mockRPCClient_IsSyncing_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("IsSyncing", ctx)} +} + +func (_c *mockRPCClient_IsSyncing_Call[CHAIN_ID, HEAD]) Run(run func(ctx context.Context)) *mockRPCClient_IsSyncing_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPCClient_IsSyncing_Call[CHAIN_ID, HEAD]) Return(_a0 bool, _a1 error) *mockRPCClient_IsSyncing_Call[CHAIN_ID, HEAD] { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *mockRPCClient_IsSyncing_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (bool, error)) *mockRPCClient_IsSyncing_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// Ping provides a mock function with given fields: _a0 +func (_m *mockRPCClient[CHAIN_ID, HEAD]) Ping(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Ping") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockRPCClient_Ping_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Ping' +type mockRPCClient_Ping_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// Ping is a helper method to define mock.On call +// - _a0 context.Context +func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) Ping(_a0 interface{}) *mockRPCClient_Ping_Call[CHAIN_ID, HEAD] { + return &mockRPCClient_Ping_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("Ping", _a0)} +} + +func (_c *mockRPCClient_Ping_Call[CHAIN_ID, HEAD]) Run(run func(_a0 context.Context)) *mockRPCClient_Ping_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPCClient_Ping_Call[CHAIN_ID, HEAD]) Return(_a0 error) *mockRPCClient_Ping_Call[CHAIN_ID, HEAD] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockRPCClient_Ping_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) error) *mockRPCClient_Ping_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// SubscribeToFinalizedHeads provides a mock function with given fields: ctx +func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToFinalizedHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockRPCClient_SubscribeToFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToFinalizedHeads' +type mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// SubscribeToFinalizedHeads is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx interface{}) *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + return &mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("SubscribeToFinalizedHeads", ctx)} +} + +func (_c *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) Run(run func(ctx context.Context)) *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) Return(_a0 <-chan HEAD, _a1 types.Subscription, _a2 error) *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// SubscribeToHeads provides a mock function with given fields: ctx +func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockRPCClient_SubscribeToHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToHeads' +type mockRPCClient_SubscribeToHeads_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// SubscribeToHeads is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) SubscribeToHeads(ctx interface{}) *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + return &mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("SubscribeToHeads", ctx)} +} + +func (_c *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) Run(run func(ctx context.Context)) *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) Return(_a0 <-chan HEAD, _a1 types.Subscription, _a2 error) *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// UnsubscribeAllExcept provides a mock function with given fields: subs +func (_m *mockRPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subscription) { + _va := make([]interface{}, len(subs)) + for _i := range subs { + _va[_i] = subs[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + _m.Called(_ca...) +} + +// mockRPCClient_UnsubscribeAllExcept_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnsubscribeAllExcept' +type mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// UnsubscribeAllExcept is a helper method to define mock.On call +// - subs ...types.Subscription +func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...interface{}) *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD] { + return &mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("UnsubscribeAllExcept", + append([]interface{}{}, subs...)...)} +} + +func (_c *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]) Run(run func(subs ...types.Subscription)) *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]types.Subscription, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(types.Subscription) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]) Return() *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD] { + _c.Call.Return() + return _c +} + +func (_c *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(...types.Subscription)) *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// newMockRPCClient creates a new instance of mockRPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockRPCClient[CHAIN_ID types.ID, HEAD Head](t interface { + mock.TestingT + Cleanup(func()) +}) *mockRPCClient[CHAIN_ID, HEAD] { + mock := &mockRPCClient[CHAIN_ID, HEAD]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multinode/mock_send_only_client_test.go b/multinode/mock_send_only_client_test.go new file mode 100644 index 0000000..46c0de3 --- /dev/null +++ b/multinode/mock_send_only_client_test.go @@ -0,0 +1,171 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package client + +import ( + context "context" + + types "github.com/smartcontractkit/chainlink-framework/types" + mock "github.com/stretchr/testify/mock" +) + +// mockSendOnlyClient is an autogenerated mock type for the sendOnlyClient type +type mockSendOnlyClient[CHAIN_ID types.ID] struct { + mock.Mock +} + +type mockSendOnlyClient_Expecter[CHAIN_ID types.ID] struct { + mock *mock.Mock +} + +func (_m *mockSendOnlyClient[CHAIN_ID]) EXPECT() *mockSendOnlyClient_Expecter[CHAIN_ID] { + return &mockSendOnlyClient_Expecter[CHAIN_ID]{mock: &_m.Mock} +} + +// ChainID provides a mock function with given fields: _a0 +func (_m *mockSendOnlyClient[CHAIN_ID]) ChainID(_a0 context.Context) (CHAIN_ID, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 CHAIN_ID + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockSendOnlyClient_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' +type mockSendOnlyClient_ChainID_Call[CHAIN_ID types.ID] struct { + *mock.Call +} + +// ChainID is a helper method to define mock.On call +// - _a0 context.Context +func (_e *mockSendOnlyClient_Expecter[CHAIN_ID]) ChainID(_a0 interface{}) *mockSendOnlyClient_ChainID_Call[CHAIN_ID] { + return &mockSendOnlyClient_ChainID_Call[CHAIN_ID]{Call: _e.mock.On("ChainID", _a0)} +} + +func (_c *mockSendOnlyClient_ChainID_Call[CHAIN_ID]) Run(run func(_a0 context.Context)) *mockSendOnlyClient_ChainID_Call[CHAIN_ID] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockSendOnlyClient_ChainID_Call[CHAIN_ID]) Return(_a0 CHAIN_ID, _a1 error) *mockSendOnlyClient_ChainID_Call[CHAIN_ID] { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *mockSendOnlyClient_ChainID_Call[CHAIN_ID]) RunAndReturn(run func(context.Context) (CHAIN_ID, error)) *mockSendOnlyClient_ChainID_Call[CHAIN_ID] { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: +func (_m *mockSendOnlyClient[CHAIN_ID]) Close() { + _m.Called() +} + +// mockSendOnlyClient_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type mockSendOnlyClient_Close_Call[CHAIN_ID types.ID] struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *mockSendOnlyClient_Expecter[CHAIN_ID]) Close() *mockSendOnlyClient_Close_Call[CHAIN_ID] { + return &mockSendOnlyClient_Close_Call[CHAIN_ID]{Call: _e.mock.On("Close")} +} + +func (_c *mockSendOnlyClient_Close_Call[CHAIN_ID]) Run(run func()) *mockSendOnlyClient_Close_Call[CHAIN_ID] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockSendOnlyClient_Close_Call[CHAIN_ID]) Return() *mockSendOnlyClient_Close_Call[CHAIN_ID] { + _c.Call.Return() + return _c +} + +func (_c *mockSendOnlyClient_Close_Call[CHAIN_ID]) RunAndReturn(run func()) *mockSendOnlyClient_Close_Call[CHAIN_ID] { + _c.Call.Return(run) + return _c +} + +// Dial provides a mock function with given fields: ctx +func (_m *mockSendOnlyClient[CHAIN_ID]) Dial(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Dial") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockSendOnlyClient_Dial_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Dial' +type mockSendOnlyClient_Dial_Call[CHAIN_ID types.ID] struct { + *mock.Call +} + +// Dial is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockSendOnlyClient_Expecter[CHAIN_ID]) Dial(ctx interface{}) *mockSendOnlyClient_Dial_Call[CHAIN_ID] { + return &mockSendOnlyClient_Dial_Call[CHAIN_ID]{Call: _e.mock.On("Dial", ctx)} +} + +func (_c *mockSendOnlyClient_Dial_Call[CHAIN_ID]) Run(run func(ctx context.Context)) *mockSendOnlyClient_Dial_Call[CHAIN_ID] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockSendOnlyClient_Dial_Call[CHAIN_ID]) Return(_a0 error) *mockSendOnlyClient_Dial_Call[CHAIN_ID] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockSendOnlyClient_Dial_Call[CHAIN_ID]) RunAndReturn(run func(context.Context) error) *mockSendOnlyClient_Dial_Call[CHAIN_ID] { + _c.Call.Return(run) + return _c +} + +// newMockSendOnlyClient creates a new instance of mockSendOnlyClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockSendOnlyClient[CHAIN_ID types.ID](t interface { + mock.TestingT + Cleanup(func()) +}) *mockSendOnlyClient[CHAIN_ID] { + mock := &mockSendOnlyClient[CHAIN_ID]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multinode/mock_send_only_node_test.go b/multinode/mock_send_only_node_test.go new file mode 100644 index 0000000..83a8c2d --- /dev/null +++ b/multinode/mock_send_only_node_test.go @@ -0,0 +1,353 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package client + +import ( + context "context" + + types "github.com/smartcontractkit/chainlink-framework/types" + mock "github.com/stretchr/testify/mock" +) + +// mockSendOnlyNode is an autogenerated mock type for the SendOnlyNode type +type mockSendOnlyNode[CHAIN_ID types.ID, RPC any] struct { + mock.Mock +} + +type mockSendOnlyNode_Expecter[CHAIN_ID types.ID, RPC any] struct { + mock *mock.Mock +} + +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) EXPECT() *mockSendOnlyNode_Expecter[CHAIN_ID, RPC] { + return &mockSendOnlyNode_Expecter[CHAIN_ID, RPC]{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockSendOnlyNode_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type mockSendOnlyNode_Close_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *mockSendOnlyNode_Expecter[CHAIN_ID, RPC]) Close() *mockSendOnlyNode_Close_Call[CHAIN_ID, RPC] { + return &mockSendOnlyNode_Close_Call[CHAIN_ID, RPC]{Call: _e.mock.On("Close")} +} + +func (_c *mockSendOnlyNode_Close_Call[CHAIN_ID, RPC]) Run(run func()) *mockSendOnlyNode_Close_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockSendOnlyNode_Close_Call[CHAIN_ID, RPC]) Return(_a0 error) *mockSendOnlyNode_Close_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockSendOnlyNode_Close_Call[CHAIN_ID, RPC]) RunAndReturn(run func() error) *mockSendOnlyNode_Close_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// ConfiguredChainID provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ConfiguredChainID") + } + + var r0 CHAIN_ID + if rf, ok := ret.Get(0).(func() CHAIN_ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + return r0 +} + +// mockSendOnlyNode_ConfiguredChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ConfiguredChainID' +type mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// ConfiguredChainID is a helper method to define mock.On call +func (_e *mockSendOnlyNode_Expecter[CHAIN_ID, RPC]) ConfiguredChainID() *mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID, RPC] { + return &mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID, RPC]{Call: _e.mock.On("ConfiguredChainID")} +} + +func (_c *mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID, RPC]) Run(run func()) *mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID, RPC]) Return(_a0 CHAIN_ID) *mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID, RPC]) RunAndReturn(run func() CHAIN_ID) *mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// Name provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// mockSendOnlyNode_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' +type mockSendOnlyNode_Name_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// Name is a helper method to define mock.On call +func (_e *mockSendOnlyNode_Expecter[CHAIN_ID, RPC]) Name() *mockSendOnlyNode_Name_Call[CHAIN_ID, RPC] { + return &mockSendOnlyNode_Name_Call[CHAIN_ID, RPC]{Call: _e.mock.On("Name")} +} + +func (_c *mockSendOnlyNode_Name_Call[CHAIN_ID, RPC]) Run(run func()) *mockSendOnlyNode_Name_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockSendOnlyNode_Name_Call[CHAIN_ID, RPC]) Return(_a0 string) *mockSendOnlyNode_Name_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockSendOnlyNode_Name_Call[CHAIN_ID, RPC]) RunAndReturn(run func() string) *mockSendOnlyNode_Name_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// RPC provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) RPC() RPC { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RPC") + } + + var r0 RPC + if rf, ok := ret.Get(0).(func() RPC); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(RPC) + } + + return r0 +} + +// mockSendOnlyNode_RPC_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RPC' +type mockSendOnlyNode_RPC_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// RPC is a helper method to define mock.On call +func (_e *mockSendOnlyNode_Expecter[CHAIN_ID, RPC]) RPC() *mockSendOnlyNode_RPC_Call[CHAIN_ID, RPC] { + return &mockSendOnlyNode_RPC_Call[CHAIN_ID, RPC]{Call: _e.mock.On("RPC")} +} + +func (_c *mockSendOnlyNode_RPC_Call[CHAIN_ID, RPC]) Run(run func()) *mockSendOnlyNode_RPC_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockSendOnlyNode_RPC_Call[CHAIN_ID, RPC]) Return(_a0 RPC) *mockSendOnlyNode_RPC_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockSendOnlyNode_RPC_Call[CHAIN_ID, RPC]) RunAndReturn(run func() RPC) *mockSendOnlyNode_RPC_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: _a0 +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockSendOnlyNode_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type mockSendOnlyNode_Start_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - _a0 context.Context +func (_e *mockSendOnlyNode_Expecter[CHAIN_ID, RPC]) Start(_a0 interface{}) *mockSendOnlyNode_Start_Call[CHAIN_ID, RPC] { + return &mockSendOnlyNode_Start_Call[CHAIN_ID, RPC]{Call: _e.mock.On("Start", _a0)} +} + +func (_c *mockSendOnlyNode_Start_Call[CHAIN_ID, RPC]) Run(run func(_a0 context.Context)) *mockSendOnlyNode_Start_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockSendOnlyNode_Start_Call[CHAIN_ID, RPC]) Return(_a0 error) *mockSendOnlyNode_Start_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockSendOnlyNode_Start_Call[CHAIN_ID, RPC]) RunAndReturn(run func(context.Context) error) *mockSendOnlyNode_Start_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// State provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) State() nodeState { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for State") + } + + var r0 nodeState + if rf, ok := ret.Get(0).(func() nodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(nodeState) + } + + return r0 +} + +// mockSendOnlyNode_State_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'State' +type mockSendOnlyNode_State_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// State is a helper method to define mock.On call +func (_e *mockSendOnlyNode_Expecter[CHAIN_ID, RPC]) State() *mockSendOnlyNode_State_Call[CHAIN_ID, RPC] { + return &mockSendOnlyNode_State_Call[CHAIN_ID, RPC]{Call: _e.mock.On("State")} +} + +func (_c *mockSendOnlyNode_State_Call[CHAIN_ID, RPC]) Run(run func()) *mockSendOnlyNode_State_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockSendOnlyNode_State_Call[CHAIN_ID, RPC]) Return(_a0 nodeState) *mockSendOnlyNode_State_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockSendOnlyNode_State_Call[CHAIN_ID, RPC]) RunAndReturn(run func() nodeState) *mockSendOnlyNode_State_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// String provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// mockSendOnlyNode_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' +type mockSendOnlyNode_String_Call[CHAIN_ID types.ID, RPC any] struct { + *mock.Call +} + +// String is a helper method to define mock.On call +func (_e *mockSendOnlyNode_Expecter[CHAIN_ID, RPC]) String() *mockSendOnlyNode_String_Call[CHAIN_ID, RPC] { + return &mockSendOnlyNode_String_Call[CHAIN_ID, RPC]{Call: _e.mock.On("String")} +} + +func (_c *mockSendOnlyNode_String_Call[CHAIN_ID, RPC]) Run(run func()) *mockSendOnlyNode_String_Call[CHAIN_ID, RPC] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockSendOnlyNode_String_Call[CHAIN_ID, RPC]) Return(_a0 string) *mockSendOnlyNode_String_Call[CHAIN_ID, RPC] { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockSendOnlyNode_String_Call[CHAIN_ID, RPC]) RunAndReturn(run func() string) *mockSendOnlyNode_String_Call[CHAIN_ID, RPC] { + _c.Call.Return(run) + return _c +} + +// newMockSendOnlyNode creates a new instance of mockSendOnlyNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockSendOnlyNode[CHAIN_ID types.ID, RPC any](t interface { + mock.TestingT + Cleanup(func()) +}) *mockSendOnlyNode[CHAIN_ID, RPC] { + mock := &mockSendOnlyNode[CHAIN_ID, RPC]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multinode/mocks/config.go b/multinode/mocks/config.go new file mode 100644 index 0000000..95b57cc --- /dev/null +++ b/multinode/mocks/config.go @@ -0,0 +1,31 @@ +package mocks + +import "time" + +type ChainConfig struct { + IsFinalityTagEnabled bool + FinalityDepthVal uint32 + NoNewHeadsThresholdVal time.Duration + FinalizedBlockOffsetVal uint32 + NoNewFinalizedHeadsThresholdVal time.Duration +} + +func (t ChainConfig) NodeNoNewHeadsThreshold() time.Duration { + return t.NoNewHeadsThresholdVal +} + +func (t ChainConfig) FinalityDepth() uint32 { + return t.FinalityDepthVal +} + +func (t ChainConfig) FinalityTagEnabled() bool { + return t.IsFinalityTagEnabled +} + +func (t ChainConfig) FinalizedBlockOffset() uint32 { + return t.FinalizedBlockOffsetVal +} + +func (t ChainConfig) NoNewFinalizedHeadsThreshold() time.Duration { + return t.NoNewFinalizedHeadsThresholdVal +} diff --git a/multinode/models.go b/multinode/models.go new file mode 100644 index 0000000..526bb25 --- /dev/null +++ b/multinode/models.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "fmt" +) + +type SendTxReturnCode int + +// SendTxReturnCode is a generalized client error that dictates what should be the next action, depending on the RPC error response. +const ( + Successful SendTxReturnCode = iota + 1 + Fatal // Unrecoverable error. Most likely the attempt should be thrown away. + Retryable // The error returned by the RPC indicates that if we retry with the same attempt, the tx will eventually go through. + Underpriced // Attempt was underpriced. New estimation is needed with bumped gas price. + Unknown // Tx failed with an error response that is not recognized by the client. + Unsupported // Attempt failed with an error response that is not supported by the client for the given chain. + TransactionAlreadyKnown // The transaction that was sent has already been received by the RPC. + InsufficientFunds // Tx was rejected due to insufficient funds. + ExceedsMaxFee // Attempt's fee was higher than the node's limit and got rejected. + FeeOutOfValidRange // This error is returned when we use a fee price suggested from an RPC, but the network rejects the attempt due to an invalid range(mostly used by L2 chains). Retry by requesting a new suggested fee price. + TerminallyStuck // The error returned when a transaction is or could get terminally stuck in the mempool without any chance of inclusion. + sendTxReturnCodeLen // tracks the number of errors. Must always be last +) + +// sendTxSevereErrors - error codes which signal that transaction would never be accepted in its current form by the node +var sendTxSevereErrors = []SendTxReturnCode{Fatal, Underpriced, Unsupported, ExceedsMaxFee, FeeOutOfValidRange, Unknown} + +// sendTxSuccessfulCodes - error codes which signal that transaction was accepted by the node +var sendTxSuccessfulCodes = []SendTxReturnCode{Successful, TransactionAlreadyKnown} + +func (c SendTxReturnCode) String() string { + switch c { + case Successful: + return "Successful" + case Fatal: + return "Fatal" + case Retryable: + return "Retryable" + case Underpriced: + return "Underpriced" + case Unknown: + return "Unknown" + case Unsupported: + return "Unsupported" + case TransactionAlreadyKnown: + return "TransactionAlreadyKnown" + case InsufficientFunds: + return "InsufficientFunds" + case ExceedsMaxFee: + return "ExceedsMaxFee" + case FeeOutOfValidRange: + return "FeeOutOfValidRange" + case TerminallyStuck: + return "TerminallyStuck" + default: + return fmt.Sprintf("SendTxReturnCode(%d)", c) + } +} + +type NodeTier int + +const ( + Primary = NodeTier(iota) + Secondary +) + +func (n NodeTier) String() string { + switch n { + case Primary: + return "primary" + case Secondary: + return "secondary" + default: + return fmt.Sprintf("NodeTier(%d)", n) + } +} + +// syncStatus - defines problems related to RPC's state synchronization. Can be used as a bitmask to define multiple issues +type syncStatus int + +const ( + // syncStatusSynced - RPC is fully synced + syncStatusSynced = 0 + // syncStatusNotInSyncWithPool - RPC is lagging behind the highest block observed within the pool of RPCs + syncStatusNotInSyncWithPool syncStatus = 1 << iota + // syncStatusNoNewHead - RPC failed to produce a new head for too long + syncStatusNoNewHead + // syncStatusNoNewFinalizedHead - RPC failed to produce a new finalized head for too long + syncStatusNoNewFinalizedHead + syncStatusLen +) + +func (s syncStatus) String() string { + if s == syncStatusSynced { + return "Synced" + } + var result bytes.Buffer + for i := syncStatusNotInSyncWithPool; i < syncStatusLen; i = i << 1 { + if i&s == 0 { + continue + } + result.WriteString(i.string()) + result.WriteString(",") + } + result.Truncate(result.Len() - 1) + return result.String() +} + +func (s syncStatus) string() string { + switch s { + case syncStatusNotInSyncWithPool: + return "NotInSyncWithRPCPool" + case syncStatusNoNewHead: + return "NoNewHead" + case syncStatusNoNewFinalizedHead: + return "NoNewFinalizedHead" + default: + return fmt.Sprintf("syncStatus(%d)", s) + } +} diff --git a/multinode/models_test.go b/multinode/models_test.go new file mode 100644 index 0000000..a10592c --- /dev/null +++ b/multinode/models_test.go @@ -0,0 +1,50 @@ +package client + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSendTxReturnCode_String(t *testing.T) { + // ensure all the SendTxReturnCodes have proper name + for c := 1; c < int(sendTxReturnCodeLen); c++ { + strC := SendTxReturnCode(c).String() + if strings.Contains(strC, "SendTxReturnCode(") { + t.Errorf("Expected %s to have a proper string representation", strC) + } + } +} + +func TestSyncStatus_String(t *testing.T) { + t.Run("All of the statuses have proper string representation", func(t *testing.T) { + for i := syncStatusNotInSyncWithPool; i < syncStatusLen; i <<= 1 { + // ensure that i's string representation is not equal to `syncStatus(%d)` + assert.NotContains(t, i.String(), "syncStatus(") + } + }) + t.Run("Unwraps mask", func(t *testing.T) { + testCases := []struct { + Mask syncStatus + ExpectedStr string + }{ + { + ExpectedStr: "Synced", + }, + { + Mask: syncStatusNotInSyncWithPool | syncStatusNoNewHead, + ExpectedStr: "NotInSyncWithRPCPool,NoNewHead", + }, + { + Mask: syncStatusNotInSyncWithPool | syncStatusNoNewHead | syncStatusNoNewFinalizedHead, + ExpectedStr: "NotInSyncWithRPCPool,NoNewHead,NoNewFinalizedHead", + }, + } + for _, testCase := range testCases { + t.Run(testCase.ExpectedStr, func(t *testing.T) { + assert.Equal(t, testCase.ExpectedStr, testCase.Mask.String()) + }) + } + }) +} diff --git a/multinode/multi_node.go b/multinode/multi_node.go new file mode 100644 index 0000000..9851289 --- /dev/null +++ b/multinode/multi_node.go @@ -0,0 +1,364 @@ +package client + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +var ( + // PromMultiNodeRPCNodeStates reports current RPC node state + PromMultiNodeRPCNodeStates = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "multi_node_states", + Help: "The number of RPC nodes currently in the given state for the given chain", + }, []string{"network", "chainId", "state"}) + ErroringNodeError = fmt.Errorf("no live nodes available") +) + +// MultiNode is a generalized multi node client interface that includes methods to interact with different chains. +// It also handles multiple node RPC connections simultaneously. +type MultiNode[ + CHAIN_ID types.ID, + RPC any, +] struct { + services.Service + eng *services.Engine + + primaryNodes []Node[CHAIN_ID, RPC] + sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC] + chainID CHAIN_ID + lggr logger.SugaredLogger + selectionMode string + nodeSelector NodeSelector[CHAIN_ID, RPC] + leaseDuration time.Duration + leaseTicker *time.Ticker + chainFamily string + reportInterval time.Duration + deathDeclarationDelay time.Duration + + activeMu sync.RWMutex + activeNode Node[CHAIN_ID, RPC] +} + +func NewMultiNode[ + CHAIN_ID types.ID, + RPC any, +]( + lggr logger.Logger, + selectionMode string, // type of the "best" RPC selector (e.g HighestHead, RoundRobin, etc.) + leaseDuration time.Duration, // defines interval on which new "best" RPC should be selected + primaryNodes []Node[CHAIN_ID, RPC], + sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC], + chainID CHAIN_ID, // configured chain ID (used to verify that passed primaryNodes belong to the same chain) + chainFamily string, // name of the chain family - used in the metrics + deathDeclarationDelay time.Duration, +) *MultiNode[CHAIN_ID, RPC] { + nodeSelector := newNodeSelector(selectionMode, primaryNodes) + // Prometheus' default interval is 15s, set this to under 7.5s to avoid + // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) + const reportInterval = 6500 * time.Millisecond + c := &MultiNode[CHAIN_ID, RPC]{ + primaryNodes: primaryNodes, + sendOnlyNodes: sendOnlyNodes, + chainID: chainID, + selectionMode: selectionMode, + nodeSelector: nodeSelector, + leaseDuration: leaseDuration, + chainFamily: chainFamily, + reportInterval: reportInterval, + deathDeclarationDelay: deathDeclarationDelay, + } + c.Service, c.eng = services.Config{ + Name: "MultiNode", + Start: c.start, + Close: c.close, + }.NewServiceEngine(logger.With(lggr, "chainID", chainID.String())) + c.lggr = c.eng.SugaredLogger + + c.lggr.Debugf("The MultiNode is configured to use NodeSelectionMode: %s", selectionMode) + + return c +} + +func (c *MultiNode[CHAIN_ID, RPC]) ChainID() CHAIN_ID { + return c.chainID +} + +func (c *MultiNode[CHAIN_ID, RPC]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC, isSendOnly bool)) error { + return c.eng.IfNotStopped(func() error { + callsCompleted := 0 + for _, n := range c.primaryNodes { + select { + case <-ctx.Done(): + return ctx.Err() + default: + if n.State() != nodeStateAlive { + continue + } + do(ctx, n.RPC(), false) + callsCompleted++ + } + } + + for _, n := range c.sendOnlyNodes { + select { + case <-ctx.Done(): + return ctx.Err() + default: + if n.State() != nodeStateAlive { + continue + } + do(ctx, n.RPC(), true) + } + } + if callsCompleted == 0 { + return ErroringNodeError + } + return nil + }) +} + +func (c *MultiNode[CHAIN_ID, RPC]) NodeStates() map[string]string { + states := map[string]string{} + for _, n := range c.primaryNodes { + states[n.Name()] = n.State().String() + } + for _, n := range c.sendOnlyNodes { + states[n.Name()] = n.State().String() + } + return states +} + +// Start starts every node in the pool +// +// Nodes handle their own redialing and runloops, so this function does not +// return any error if the nodes aren't available +func (c *MultiNode[CHAIN_ID, RPC]) start(ctx context.Context) error { + if len(c.primaryNodes) == 0 { + return fmt.Errorf("no available nodes for chain %s", c.chainID.String()) + } + var ms services.MultiStart + for _, n := range c.primaryNodes { + if n.ConfiguredChainID().String() != c.chainID.String() { + return ms.CloseBecause(fmt.Errorf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", n.String(), n.ConfiguredChainID().String(), c.chainID.String())) + } + n.SetPoolChainInfoProvider(c) + // node will handle its own redialing and automatic recovery + if err := ms.Start(ctx, n); err != nil { + return err + } + } + for _, s := range c.sendOnlyNodes { + if s.ConfiguredChainID().String() != c.chainID.String() { + return ms.CloseBecause(fmt.Errorf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", s.String(), s.ConfiguredChainID().String(), c.chainID.String())) + } + if err := ms.Start(ctx, s); err != nil { + return err + } + } + c.eng.Go(c.runLoop) + + if c.leaseDuration.Seconds() > 0 && c.selectionMode != NodeSelectionModeRoundRobin { + c.lggr.Infof("The MultiNode will switch to best node every %s", c.leaseDuration.String()) + c.eng.Go(c.checkLeaseLoop) + } else { + c.lggr.Info("Best node switching is disabled") + } + + return nil +} + +// Close tears down the MultiNode and closes all nodes +func (c *MultiNode[CHAIN_ID, RPC]) close() error { + return services.CloseAll(services.MultiCloser(c.primaryNodes), services.MultiCloser(c.sendOnlyNodes)) +} + +// SelectRPC returns an RPC of an active node. If there are no active nodes it returns an error. +// Call this method from your chain-specific client implementation to access any chain-specific rpc calls. +func (c *MultiNode[CHAIN_ID, RPC]) SelectRPC() (rpc RPC, err error) { + n, err := c.selectNode() + if err != nil { + return rpc, err + } + return n.RPC(), nil +} + +// selectNode returns the active Node, if it is still nodeStateAlive, otherwise it selects a new one from the NodeSelector. +func (c *MultiNode[CHAIN_ID, RPC]) selectNode() (node Node[CHAIN_ID, RPC], err error) { + c.activeMu.RLock() + node = c.activeNode + c.activeMu.RUnlock() + if node != nil && node.State() == nodeStateAlive { + return // still alive + } + + // select a new one + c.activeMu.Lock() + defer c.activeMu.Unlock() + node = c.activeNode + if node != nil && node.State() == nodeStateAlive { + return // another goroutine beat us here + } + + var prevNodeName string + if c.activeNode != nil { + prevNodeName = c.activeNode.String() + c.activeNode.UnsubscribeAllExceptAliveLoop() + } + c.activeNode = c.nodeSelector.Select() + if c.activeNode == nil { + c.lggr.Criticalw("No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name()) + c.eng.EmitHealthErr(fmt.Errorf("no live nodes available for chain %s", c.chainID.String())) + return nil, ErroringNodeError + } + + c.lggr.Debugw("Switched to a new active node due to prev node heath issues", "prevNode", prevNodeName, "newNode", c.activeNode.String()) + return c.activeNode, err +} + +// LatestChainInfo - returns number of live nodes available in the pool, so we can prevent the last alive node in a pool from being marked as out-of-sync. +// Return highest ChainInfo most recently received by the alive nodes. +// E.g. If Node A's the most recent block is 10 and highest 15 and for Node B it's - 12 and 14. This method will return 12. +func (c *MultiNode[CHAIN_ID, RPC]) LatestChainInfo() (int, ChainInfo) { + var nLiveNodes int + ch := ChainInfo{ + TotalDifficulty: big.NewInt(0), + } + for _, n := range c.primaryNodes { + if s, nodeChainInfo := n.StateAndLatest(); s == nodeStateAlive { + nLiveNodes++ + ch.BlockNumber = max(ch.BlockNumber, nodeChainInfo.BlockNumber) + ch.FinalizedBlockNumber = max(ch.FinalizedBlockNumber, nodeChainInfo.FinalizedBlockNumber) + ch.TotalDifficulty = MaxTotalDifficulty(ch.TotalDifficulty, nodeChainInfo.TotalDifficulty) + } + } + return nLiveNodes, ch +} + +// HighestUserObservations - returns highest ChainInfo ever observed by any user of the MultiNode +func (c *MultiNode[CHAIN_ID, RPC]) HighestUserObservations() ChainInfo { + ch := ChainInfo{ + TotalDifficulty: big.NewInt(0), + } + for _, n := range c.primaryNodes { + nodeChainInfo := n.HighestUserObservations() + ch.BlockNumber = max(ch.BlockNumber, nodeChainInfo.BlockNumber) + ch.FinalizedBlockNumber = max(ch.FinalizedBlockNumber, nodeChainInfo.FinalizedBlockNumber) + ch.TotalDifficulty = MaxTotalDifficulty(ch.TotalDifficulty, nodeChainInfo.TotalDifficulty) + } + return ch +} + +func (c *MultiNode[CHAIN_ID, RPC]) checkLease() { + bestNode := c.nodeSelector.Select() + for _, n := range c.primaryNodes { + // Terminate client subscriptions. Services are responsible for reconnecting, which will be routed to the new + // best node. Only terminate connections with more than 1 subscription to account for the aliveLoop subscription + if n.State() == nodeStateAlive && n != bestNode { + c.lggr.Infof("Switching to best node from %q to %q", n.String(), bestNode.String()) + n.UnsubscribeAllExceptAliveLoop() + } + } + + c.activeMu.Lock() + defer c.activeMu.Unlock() + if bestNode != c.activeNode { + if c.activeNode != nil { + c.activeNode.UnsubscribeAllExceptAliveLoop() + } + c.activeNode = bestNode + } +} + +func (c *MultiNode[CHAIN_ID, RPC]) checkLeaseLoop(ctx context.Context) { + c.leaseTicker = time.NewTicker(c.leaseDuration) + defer c.leaseTicker.Stop() + + for { + select { + case <-c.leaseTicker.C: + c.checkLease() + case <-ctx.Done(): + return + } + } +} + +func (c *MultiNode[CHAIN_ID, RPC]) runLoop(ctx context.Context) { + nodeStates := make([]nodeWithState, len(c.primaryNodes)) + for i, n := range c.primaryNodes { + nodeStates[i] = nodeWithState{ + Node: n.String(), + State: n.State().String(), + DeadSince: nil, + } + } + + c.report(nodeStates) + + monitor := services.NewTicker(c.reportInterval) + defer monitor.Stop() + + for { + select { + case <-monitor.C: + c.report(nodeStates) + case <-ctx.Done(): + return + } + } +} + +type nodeWithState struct { + Node string + State string + DeadSince *time.Time +} + +func (c *MultiNode[CHAIN_ID, RPC]) report(nodesStateInfo []nodeWithState) { + start := time.Now() + var dead int + counts := make(map[nodeState]int) + for i, n := range c.primaryNodes { + state := n.State() + counts[state]++ + nodesStateInfo[i].State = state.String() + if state == nodeStateAlive { + nodesStateInfo[i].DeadSince = nil + continue + } + + if nodesStateInfo[i].DeadSince == nil { + nodesStateInfo[i].DeadSince = &start + } + + if start.Sub(*nodesStateInfo[i].DeadSince) >= c.deathDeclarationDelay { + dead++ + } + } + for _, state := range allNodeStates { + count := counts[state] + PromMultiNodeRPCNodeStates.WithLabelValues(c.chainFamily, c.chainID.String(), state.String()).Set(float64(count)) + } + + total := len(c.primaryNodes) + live := total - dead + c.lggr.Tracew(fmt.Sprintf("MultiNode state: %d/%d nodes are alive", live, total), "nodeStates", nodesStateInfo) + if total == dead { + rerr := fmt.Errorf("no primary nodes available: 0/%d nodes are alive", total) + c.lggr.Criticalw(rerr.Error(), "nodeStates", nodesStateInfo) + c.eng.EmitHealthErr(rerr) + } else if dead > 0 { + c.lggr.Errorw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodesStateInfo) + } +} diff --git a/multinode/multi_node_test.go b/multinode/multi_node_test.go new file mode 100644 index 0000000..935483b --- /dev/null +++ b/multinode/multi_node_test.go @@ -0,0 +1,517 @@ +package client + +import ( + "fmt" + "math/big" + "math/rand" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +type multiNodeRPCClient RPCClient[types.ID, types.Head[Hashable]] + +type testMultiNode struct { + *MultiNode[types.ID, multiNodeRPCClient] +} + +type multiNodeOpts struct { + logger logger.Logger + selectionMode string + leaseDuration time.Duration + nodes []Node[types.ID, multiNodeRPCClient] + sendonlys []SendOnlyNode[types.ID, multiNodeRPCClient] + chainID types.ID + chainFamily string + deathDeclarationDelay time.Duration +} + +func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { + if opts.logger == nil { + opts.logger = logger.Test(t) + } + + result := NewMultiNode[types.ID, multiNodeRPCClient]( + opts.logger, opts.selectionMode, opts.leaseDuration, opts.nodes, opts.sendonlys, opts.chainID, opts.chainFamily, opts.deathDeclarationDelay) + return testMultiNode{ + result, + } +} + +func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, multiNodeRPCClient] { + return newNodeWithState(t, chainID, nodeStateAlive) +} + +func newNodeWithState(t *testing.T, chainID types.ID, state nodeState) *mockNode[types.ID, multiNodeRPCClient] { + node := newMockNode[types.ID, multiNodeRPCClient](t) + node.On("ConfiguredChainID").Return(chainID).Once() + node.On("Start", mock.Anything).Return(nil).Once() + node.On("Close").Return(nil).Once() + node.On("String").Return(fmt.Sprintf("healthy_node_%d", rand.Int())).Maybe() + node.On("SetPoolChainInfoProvider", mock.Anything).Once() + node.On("State").Return(state).Maybe() + return node +} + +func TestMultiNode_Dial(t *testing.T) { + t.Parallel() + + newMockNode := newMockNode[types.ID, multiNodeRPCClient] + newMockSendOnlyNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient] + + t.Run("Fails without nodes", func(t *testing.T) { + t.Parallel() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: types.RandomID(), + }) + err := mn.Start(tests.Context(t)) + assert.ErrorContains(t, err, fmt.Sprintf("no available nodes for chain %s", mn.chainID)) + }) + t.Run("Fails with wrong node's chainID", func(t *testing.T) { + t.Parallel() + node := newMockNode(t) + multiNodeChainID := types.NewIDFromInt(10) + nodeChainID := types.NewIDFromInt(11) + node.On("ConfiguredChainID").Return(nodeChainID).Twice() + const nodeName = "nodeName" + node.On("String").Return(nodeName).Once() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: multiNodeChainID, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, + }) + err := mn.Start(tests.Context(t)) + assert.ErrorContains(t, err, fmt.Sprintf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", nodeName, nodeChainID, mn.chainID)) + }) + t.Run("Fails if node fails", func(t *testing.T) { + t.Parallel() + node := newMockNode(t) + chainID := types.RandomID() + node.On("ConfiguredChainID").Return(chainID).Once() + expectedError := errors.New("failed to start node") + node.On("Start", mock.Anything).Return(expectedError).Once() + node.On("SetPoolChainInfoProvider", mock.Anything).Once() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, + }) + err := mn.Start(tests.Context(t)) + assert.ErrorIs(t, err, expectedError) + }) + + t.Run("Closes started nodes on failure", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node1 := newHealthyNode(t, chainID) + node2 := newMockNode(t) + node2.On("ConfiguredChainID").Return(chainID).Once() + expectedError := errors.New("failed to start node") + node2.On("Start", mock.Anything).Return(expectedError).Once() + node2.On("SetPoolChainInfoProvider", mock.Anything).Once() + + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, + }) + err := mn.Start(tests.Context(t)) + assert.ErrorIs(t, err, expectedError) + }) + t.Run("Fails with wrong send only node's chainID", func(t *testing.T) { + t.Parallel() + multiNodeChainID := types.NewIDFromInt(10) + node := newHealthyNode(t, multiNodeChainID) + sendOnly := newMockSendOnlyNode(t) + sendOnlyChainID := types.NewIDFromInt(11) + sendOnly.On("ConfiguredChainID").Return(sendOnlyChainID).Twice() + const sendOnlyName = "sendOnlyNodeName" + sendOnly.On("String").Return(sendOnlyName).Once() + + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: multiNodeChainID, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly}, + }) + err := mn.Start(tests.Context(t)) + assert.ErrorContains(t, err, fmt.Sprintf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", sendOnlyName, sendOnlyChainID, mn.chainID)) + }) + + newHealthySendOnly := func(t *testing.T, chainID types.ID) *mockSendOnlyNode[types.ID, multiNodeRPCClient] { + node := newMockSendOnlyNode(t) + node.On("ConfiguredChainID").Return(chainID).Once() + node.On("Start", mock.Anything).Return(nil).Once() + node.On("Close").Return(nil).Once() + return node + } + t.Run("Fails on send only node failure", func(t *testing.T) { + t.Parallel() + chainID := types.NewIDFromInt(10) + node := newHealthyNode(t, chainID) + sendOnly1 := newHealthySendOnly(t, chainID) + sendOnly2 := newMockSendOnlyNode(t) + sendOnly2.On("ConfiguredChainID").Return(chainID).Once() + expectedError := errors.New("failed to start send only node") + sendOnly2.On("Start", mock.Anything).Return(expectedError).Once() + + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly1, sendOnly2}, + }) + err := mn.Start(tests.Context(t)) + assert.ErrorIs(t, err, expectedError) + }) + t.Run("Starts successfully with healthy nodes", func(t *testing.T) { + t.Parallel() + chainID := types.NewIDFromInt(10) + node := newHealthyNode(t, chainID) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newHealthySendOnly(t, chainID)}, + }) + servicetest.Run(t, mn) + selectedNode, err := mn.selectNode() + require.NoError(t, err) + assert.Equal(t, node, selectedNode) + }) +} + +func TestMultiNode_Report(t *testing.T) { + t.Parallel() + t.Run("Dial starts periodical reporting", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node1 := newHealthyNode(t, chainID) + node2 := newNodeWithState(t, chainID, nodeStateOutOfSync) + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, + logger: lggr, + }) + mn.reportInterval = tests.TestInterval + mn.deathDeclarationDelay = tests.TestInterval + servicetest.Run(t, mn) + tests.AssertLogCountEventually(t, observedLogs, "At least one primary node is dead: 1/2 nodes are alive", 2) + }) + t.Run("Report critical error on all node failure", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node := newNodeWithState(t, chainID, nodeStateOutOfSync) + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, + logger: lggr, + }) + mn.reportInterval = tests.TestInterval + mn.deathDeclarationDelay = tests.TestInterval + servicetest.Run(t, mn) + tests.AssertLogCountEventually(t, observedLogs, "no primary nodes available: 0/1 nodes are alive", 2) + err := mn.HealthReport()["MultiNode"] + require.Error(t, err) + assert.Contains(t, err.Error(), "no primary nodes available: 0/1 nodes are alive") + }) +} + +func TestMultiNode_CheckLease(t *testing.T) { + t.Parallel() + t.Run("Round robin disables lease check", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node := newHealthyNode(t, chainID) + lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + logger: lggr, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, + }) + servicetest.Run(t, mn) + tests.RequireLogMessage(t, observedLogs, "Best node switching is disabled") + }) + t.Run("Misconfigured lease check period won't start", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node := newHealthyNode(t, chainID) + lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeHighestHead, + chainID: chainID, + logger: lggr, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, + leaseDuration: 0, + }) + servicetest.Run(t, mn) + tests.RequireLogMessage(t, observedLogs, "Best node switching is disabled") + }) + t.Run("Lease check updates active node", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node := newHealthyNode(t, chainID) + node.On("UnsubscribeAllExceptAliveLoop") + bestNode := newHealthyNode(t, chainID) + nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) + nodeSelector.On("Select").Return(bestNode) + lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeHighestHead, + chainID: chainID, + logger: lggr, + nodes: []Node[types.ID, multiNodeRPCClient]{node, bestNode}, + leaseDuration: tests.TestInterval, + }) + mn.nodeSelector = nodeSelector + servicetest.Run(t, mn) + tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("Switching to best node from %q to %q", node.String(), bestNode.String())) + tests.AssertEventually(t, func() bool { + mn.activeMu.RLock() + active := mn.activeNode + mn.activeMu.RUnlock() + return bestNode == active + }) + }) + t.Run("NodeStates returns proper states", func(t *testing.T) { + t.Parallel() + chainID := types.NewIDFromInt(10) + nodes := map[string]nodeState{ + "node_1": nodeStateAlive, + "node_2": nodeStateUnreachable, + "node_3": nodeStateDialed, + } + + opts := multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + } + + expectedResult := map[string]string{} + for name, state := range nodes { + node := newMockNode[types.ID, multiNodeRPCClient](t) + node.On("State").Return(state).Once() + node.On("Name").Return(name).Once() + opts.nodes = append(opts.nodes, node) + + sendOnly := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t) + sendOnlyName := "send_only_" + name + sendOnly.On("State").Return(state).Once() + sendOnly.On("Name").Return(sendOnlyName).Once() + opts.sendonlys = append(opts.sendonlys, sendOnly) + + expectedResult[name] = state.String() + expectedResult[sendOnlyName] = state.String() + } + + mn := newTestMultiNode(t, opts) + states := mn.NodeStates() + assert.Equal(t, expectedResult, states) + }) +} + +func TestMultiNode_selectNode(t *testing.T) { + t.Parallel() + t.Run("Returns same node, if it's still healthy", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node1 := newMockNode[types.ID, multiNodeRPCClient](t) + node1.On("State").Return(nodeStateAlive).Once() + node1.On("String").Return("node1").Maybe() + node2 := newMockNode[types.ID, multiNodeRPCClient](t) + node2.On("String").Return("node2").Maybe() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, + }) + nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) + nodeSelector.On("Select").Return(node1).Once() + mn.nodeSelector = nodeSelector + prevActiveNode, err := mn.selectNode() + require.NoError(t, err) + require.Equal(t, node1.String(), prevActiveNode.String()) + newActiveNode, err := mn.selectNode() + require.NoError(t, err) + require.Equal(t, prevActiveNode.String(), newActiveNode.String()) + }) + t.Run("Updates node if active is not healthy", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + oldBest := newMockNode[types.ID, multiNodeRPCClient](t) + oldBest.On("String").Return("oldBest").Maybe() + oldBest.On("UnsubscribeAllExceptAliveLoop") + newBest := newMockNode[types.ID, multiNodeRPCClient](t) + newBest.On("String").Return("newBest").Maybe() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, multiNodeRPCClient]{oldBest, newBest}, + }) + nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) + nodeSelector.On("Select").Return(oldBest).Once() + mn.nodeSelector = nodeSelector + activeNode, err := mn.selectNode() + require.NoError(t, err) + require.Equal(t, oldBest.String(), activeNode.String()) + // old best died, so we should replace it + oldBest.On("State").Return(nodeStateOutOfSync).Twice() + nodeSelector.On("Select").Return(newBest).Once() + newActiveNode, err := mn.selectNode() + require.NoError(t, err) + require.Equal(t, newBest.String(), newActiveNode.String()) + }) + t.Run("No active nodes - reports critical error", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + logger: lggr, + }) + nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) + nodeSelector.On("Select").Return(nil).Once() + nodeSelector.On("Name").Return("MockedNodeSelector").Once() + mn.nodeSelector = nodeSelector + node, err := mn.selectNode() + require.EqualError(t, err, ErroringNodeError.Error()) + require.Nil(t, node) + tests.RequireLogMessage(t, observedLogs, "No live RPC nodes available") + }) +} + +func TestMultiNode_ChainInfo(t *testing.T) { + t.Parallel() + type nodeParams struct { + LatestChainInfo ChainInfo + HighestUserObservations ChainInfo + State nodeState + } + testCases := []struct { + Name string + ExpectedNLiveNodes int + ExpectedLatestChainInfo ChainInfo + ExpectedHighestUserObservations ChainInfo + NodeParams []nodeParams + }{ + { + Name: "no nodes", + ExpectedLatestChainInfo: ChainInfo{ + TotalDifficulty: big.NewInt(0), + }, + ExpectedHighestUserObservations: ChainInfo{ + TotalDifficulty: big.NewInt(0), + }, + }, + { + Name: "Best node is not healthy", + ExpectedNLiveNodes: 3, + ExpectedLatestChainInfo: ChainInfo{ + BlockNumber: 20, + FinalizedBlockNumber: 10, + TotalDifficulty: big.NewInt(10), + }, + ExpectedHighestUserObservations: ChainInfo{ + BlockNumber: 1005, + FinalizedBlockNumber: 995, + TotalDifficulty: big.NewInt(2005), + }, + NodeParams: []nodeParams{ + { + State: nodeStateOutOfSync, + LatestChainInfo: ChainInfo{ + BlockNumber: 1000, + FinalizedBlockNumber: 990, + TotalDifficulty: big.NewInt(2000), + }, + HighestUserObservations: ChainInfo{ + BlockNumber: 1005, + FinalizedBlockNumber: 995, + TotalDifficulty: big.NewInt(2005), + }, + }, + { + State: nodeStateAlive, + LatestChainInfo: ChainInfo{ + BlockNumber: 20, + FinalizedBlockNumber: 10, + TotalDifficulty: big.NewInt(9), + }, + HighestUserObservations: ChainInfo{ + BlockNumber: 25, + FinalizedBlockNumber: 15, + TotalDifficulty: big.NewInt(14), + }, + }, + { + State: nodeStateAlive, + LatestChainInfo: ChainInfo{ + BlockNumber: 19, + FinalizedBlockNumber: 9, + TotalDifficulty: big.NewInt(10), + }, + HighestUserObservations: ChainInfo{ + BlockNumber: 24, + FinalizedBlockNumber: 14, + TotalDifficulty: big.NewInt(15), + }, + }, + { + State: nodeStateAlive, + LatestChainInfo: ChainInfo{ + BlockNumber: 11, + FinalizedBlockNumber: 1, + TotalDifficulty: nil, + }, + HighestUserObservations: ChainInfo{ + BlockNumber: 16, + FinalizedBlockNumber: 6, + TotalDifficulty: nil, + }, + }, + }, + }, + } + + chainID := types.RandomID() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + }) + for i := range testCases { + tc := testCases[i] + t.Run(tc.Name, func(t *testing.T) { + for _, params := range tc.NodeParams { + node := newMockNode[types.ID, multiNodeRPCClient](t) + mn.primaryNodes = append(mn.primaryNodes, node) + node.On("StateAndLatest").Return(params.State, params.LatestChainInfo) + node.On("HighestUserObservations").Return(params.HighestUserObservations) + } + + nNodes, latestChainInfo := mn.LatestChainInfo() + assert.Equal(t, tc.ExpectedNLiveNodes, nNodes) + assert.Equal(t, tc.ExpectedLatestChainInfo, latestChainInfo) + + highestChainInfo := mn.HighestUserObservations() + assert.Equal(t, tc.ExpectedHighestUserObservations, highestChainInfo) + }) + } +} diff --git a/multinode/node.go b/multinode/node.go new file mode 100644 index 0000000..850edc0 --- /dev/null +++ b/multinode/node.go @@ -0,0 +1,336 @@ +package client + +import ( + "context" + "errors" + "fmt" + "net/url" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +const QueryTimeout = 10 * time.Second + +var errInvalidChainID = errors.New("invalid chain id") + +var ( + promPoolRPCNodeVerifies = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_verifies", + Help: "The total number of chain types.ID verifications for the given RPC node", + }, []string{"network", "chainID", "nodeName"}) + promPoolRPCNodeVerifiesFailed = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_verifies_failed", + Help: "The total number of failed chain types.ID verifications for the given RPC node", + }, []string{"network", "chainID", "nodeName"}) + promPoolRPCNodeVerifiesSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_verifies_success", + Help: "The total number of successful chain types.ID verifications for the given RPC node", + }, []string{"network", "chainID", "nodeName"}) +) + +type NodeConfig interface { + PollFailureThreshold() uint32 + PollInterval() time.Duration + SelectionMode() string + SyncThreshold() uint32 + NodeIsSyncingEnabled() bool + FinalizedBlockPollInterval() time.Duration + EnforceRepeatableRead() bool + DeathDeclarationDelay() time.Duration + NewHeadsPollInterval() time.Duration +} + +type ChainConfig interface { + NodeNoNewHeadsThreshold() time.Duration + NoNewFinalizedHeadsThreshold() time.Duration + FinalityDepth() uint32 + FinalityTagEnabled() bool + FinalizedBlockOffset() uint32 +} + +type Node[ + CHAIN_ID types.ID, + RPC any, +] interface { + // State returns most accurate state of the Node on the moment of call. + // While some of the checks may be performed in the background and State may return cached value, critical, like + // `FinalizedBlockOutOfSync`, must be executed upon every call. + State() nodeState + // StateAndLatest returns nodeState with the latest ChainInfo observed by Node during current lifecycle. + StateAndLatest() (nodeState, ChainInfo) + // HighestUserObservations - returns highest ChainInfo ever observed by underlying RPC excluding results of health check requests + HighestUserObservations() ChainInfo + SetPoolChainInfoProvider(PoolChainInfoProvider) + // Name is a unique identifier for this node. + Name() string + // String - returns string representation of the node, useful for debugging (name + URLS used to connect to the RPC) + String() string + RPC() RPC + // UnsubscribeAllExceptAliveLoop - closes all subscriptions except the aliveLoop subscription + UnsubscribeAllExceptAliveLoop() + ConfiguredChainID() CHAIN_ID + // Order - returns priority order configured for the RPC + Order() int32 + // Start - starts health checks + Start(context.Context) error + Close() error +} + +type node[ + CHAIN_ID types.ID, + HEAD Head, + RPC RPCClient[CHAIN_ID, HEAD], +] struct { + services.StateMachine + lfcLog logger.Logger + name string + id int + chainID CHAIN_ID + nodePoolCfg NodeConfig + chainCfg ChainConfig + order int32 + chainFamily string + + ws *url.URL + http *url.URL + + rpc RPC + + stateMu sync.RWMutex // protects state* fields + state nodeState + + poolInfoProvider PoolChainInfoProvider + + stopCh services.StopChan + // wg waits for subsidiary goroutines + wg sync.WaitGroup + + healthCheckSubs []types.Subscription +} + +func NewNode[ + CHAIN_ID types.ID, + HEAD Head, + RPC RPCClient[CHAIN_ID, HEAD], +]( + nodeCfg NodeConfig, + chainCfg ChainConfig, + lggr logger.Logger, + wsuri *url.URL, + httpuri *url.URL, + name string, + id int, + chainID CHAIN_ID, + nodeOrder int32, + rpc RPC, + chainFamily string, +) Node[CHAIN_ID, RPC] { + n := new(node[CHAIN_ID, HEAD, RPC]) + n.name = name + n.id = id + n.chainID = chainID + n.nodePoolCfg = nodeCfg + n.chainCfg = chainCfg + n.order = nodeOrder + if wsuri != nil { + n.ws = wsuri + } + if httpuri != nil { + n.http = httpuri + } + n.stopCh = make(services.StopChan) + lggr = logger.Named(lggr, "Node") + lggr = logger.With(lggr, + "nodeTier", Primary.String(), + "nodeName", name, + "node", n.String(), + "chainID", chainID, + "nodeOrder", n.order, + ) + n.lfcLog = logger.Named(lggr, "Lifecycle") + n.rpc = rpc + n.chainFamily = chainFamily + return n +} + +func (n *node[CHAIN_ID, HEAD, RPC]) String() string { + s := fmt.Sprintf("(%s)%s", Primary.String(), n.name) + if n.ws != nil { + s = s + fmt.Sprintf(":%s", n.ws.String()) + } + if n.http != nil { + s = s + fmt.Sprintf(":%s", n.http.String()) + } + return s +} + +func (n *node[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() (chainID CHAIN_ID) { + return n.chainID +} + +func (n *node[CHAIN_ID, HEAD, RPC]) Name() string { + return n.name +} + +func (n *node[CHAIN_ID, HEAD, RPC]) RPC() RPC { + return n.rpc +} + +// unsubscribeAllExceptAliveLoop is not thread-safe; it should only be called +// while holding the stateMu lock. +func (n *node[CHAIN_ID, HEAD, RPC]) unsubscribeAllExceptAliveLoop() { + n.rpc.UnsubscribeAllExcept(n.healthCheckSubs...) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) UnsubscribeAllExceptAliveLoop() { + n.stateMu.Lock() + defer n.stateMu.Unlock() + n.unsubscribeAllExceptAliveLoop() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) Close() error { + return n.StopOnce(n.name, n.close) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) close() error { + defer func() { + n.wg.Wait() + n.rpc.Close() + }() + + n.stateMu.Lock() + defer n.stateMu.Unlock() + + close(n.stopCh) + n.state = nodeStateClosed + return nil +} + +// Start dials and verifies the node +// Should only be called once in a node's lifecycle +// Return value is necessary to conform to interface but this will never +// actually return an error. +func (n *node[CHAIN_ID, HEAD, RPC]) Start(startCtx context.Context) error { + return n.StartOnce(n.name, func() error { + n.start(startCtx) + return nil + }) +} + +// start initially dials the node and verifies chain types.ID +// This spins off lifecycle goroutines. +// Not thread-safe. +// Node lifecycle is synchronous: only one goroutine should be running at a +// time. +func (n *node[CHAIN_ID, HEAD, RPC]) start(startCtx context.Context) { + if n.state != nodeStateUndialed { + panic(fmt.Sprintf("cannot dial node with state %v", n.state)) + } + + if err := n.rpc.Dial(startCtx); err != nil { + n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err) + n.declareUnreachable() + return + } + n.setState(nodeStateDialed) + + state := n.verifyConn(startCtx, n.lfcLog) + n.declareState(state) +} + +// verifyChainID checks that connection to the node matches the given chain types.ID +// Not thread-safe +// Pure verifyChainID: does not mutate node "state" field. +func (n *node[CHAIN_ID, HEAD, RPC]) verifyChainID(callerCtx context.Context, lggr logger.Logger) nodeState { + promPoolRPCNodeVerifies.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() + promFailed := func() { + promPoolRPCNodeVerifiesFailed.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() + } + + st := n.getCachedState() + switch st { + case nodeStateClosed: + // The node is already closed, and any subsequent transition is invalid. + // To make spotting such transitions a bit easier, return the invalid node state. + return nodeStateLen + case nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing: + default: + panic(fmt.Sprintf("cannot verify node in state %v", st)) + } + + var chainID CHAIN_ID + var err error + if chainID, err = n.rpc.ChainID(callerCtx); err != nil { + promFailed() + lggr.Errorw("Failed to verify chain types.ID for node", "err", err, "nodeState", n.getCachedState()) + return nodeStateUnreachable + } else if chainID.String() != n.chainID.String() { + promFailed() + err = fmt.Errorf( + "rpc ChainID doesn't match local chain types.ID: RPC types.ID=%s, local types.ID=%s, node name=%s: %w", + chainID.String(), + n.chainID.String(), + n.name, + errInvalidChainID, + ) + lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain types.ID", "err", err, "nodeState", n.getCachedState()) + return nodeStateInvalidChainID + } + + promPoolRPCNodeVerifiesSuccess.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() + + return nodeStateAlive +} + +// createVerifiedConn - establishes new connection with the RPC and verifies that it's valid: chainID matches, and it's not syncing. +// Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. +func (n *node[CHAIN_ID, HEAD, RPC]) createVerifiedConn(ctx context.Context, lggr logger.Logger) nodeState { + if err := n.rpc.Dial(ctx); err != nil { + n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err, "nodeState", n.getCachedState()) + return nodeStateUnreachable + } + + return n.verifyConn(ctx, lggr) +} + +// verifyConn - verifies that current connection is valid: chainID matches, and it's not syncing. +// Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. +func (n *node[CHAIN_ID, HEAD, RPC]) verifyConn(ctx context.Context, lggr logger.Logger) nodeState { + state := n.verifyChainID(ctx, lggr) + if state != nodeStateAlive { + return state + } + + if n.nodePoolCfg.NodeIsSyncingEnabled() { + isSyncing, err := n.rpc.IsSyncing(ctx) + if err != nil { + lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.getCachedState()) + return nodeStateUnreachable + } + + if isSyncing { + lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.getCachedState()) + return nodeStateSyncing + } + } + + return nodeStateAlive +} + +func (n *node[CHAIN_ID, HEAD, RPC]) Order() int32 { + return n.order +} + +func (n *node[CHAIN_ID, HEAD, RPC]) newCtx() (context.Context, context.CancelFunc) { + ctx, cancel := n.stopCh.NewCtx() + ctx = CtxAddHealthCheckFlag(ctx) + return ctx, cancel +} diff --git a/multinode/node_fsm.go b/multinode/node_fsm.go new file mode 100644 index 0000000..b707e9f --- /dev/null +++ b/multinode/node_fsm.go @@ -0,0 +1,377 @@ +package client + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + promPoolRPCNodeTransitionsToAlive = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_alive", + Help: transitionString(nodeStateAlive), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToInSync = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_in_sync", + Help: fmt.Sprintf("%s to %s", transitionString(nodeStateOutOfSync), nodeStateAlive), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToOutOfSync = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_out_of_sync", + Help: transitionString(nodeStateOutOfSync), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToUnreachable = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_unreachable", + Help: transitionString(nodeStateUnreachable), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToInvalidChainID = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_invalid_chain_id", + Help: transitionString(nodeStateInvalidChainID), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToUnusable = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_unusable", + Help: transitionString(nodeStateUnusable), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToSyncing = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_syncing", + Help: transitionString(nodeStateSyncing), + }, []string{"chainID", "nodeName"}) +) + +// nodeState represents the current state of the node +// Node is a FSM (finite state machine) +type nodeState int + +func (n nodeState) String() string { + switch n { + case nodeStateUndialed: + return "Undialed" + case nodeStateDialed: + return "Dialed" + case nodeStateInvalidChainID: + return "InvalidChainID" + case nodeStateAlive: + return "Alive" + case nodeStateUnreachable: + return "Unreachable" + case nodeStateUnusable: + return "Unusable" + case nodeStateOutOfSync: + return "OutOfSync" + case nodeStateClosed: + return "Closed" + case nodeStateSyncing: + return "Syncing" + case nodeStateFinalizedBlockOutOfSync: + return "FinalizedBlockOutOfSync" + default: + return fmt.Sprintf("nodeState(%d)", n) + } +} + +// GoString prints a prettier state +func (n nodeState) GoString() string { + return fmt.Sprintf("nodeState%s(%d)", n.String(), n) +} + +const ( + // nodeStateUndialed is the first state of a virgin node + nodeStateUndialed = nodeState(iota) + // nodeStateDialed is after a node has successfully dialed but before it has verified the correct chain ID + nodeStateDialed + // nodeStateInvalidChainID is after chain ID verification failed + nodeStateInvalidChainID + // nodeStateAlive is a healthy node after chain ID verification succeeded + nodeStateAlive + // nodeStateUnreachable is a node that cannot be dialed or has disconnected + nodeStateUnreachable + // nodeStateOutOfSync is a node that is accepting connections but exceeded + // the failure threshold without sending any new heads. It will be + // disconnected, then put into a revive loop and re-awakened after redial + // if a new head arrives + nodeStateOutOfSync + // nodeStateUnusable is a sendonly node that has an invalid URL that can never be reached + nodeStateUnusable + // nodeStateClosed is after the connection has been closed and the node is at the end of its lifecycle + nodeStateClosed + // nodeStateSyncing is a node that is actively back-filling blockchain. Usually, it's a newly set up node that is + // still syncing the chain. The main difference from `nodeStateOutOfSync` is that it represents state relative + // to other primary nodes configured in the MultiNode. In contrast, `nodeStateSyncing` represents the internal state of + // the node (RPC). + nodeStateSyncing + // nodeStateFinalizedBlockOutOfSync - node is lagging behind on latest finalized block + nodeStateFinalizedBlockOutOfSync + // nodeStateLen tracks the number of states + nodeStateLen +) + +// allNodeStates represents all possible states a node can be in +var allNodeStates []nodeState + +func init() { + for s := nodeState(0); s < nodeStateLen; s++ { + allNodeStates = append(allNodeStates, s) + } +} + +// FSM methods + +// State allows reading the current state of the node. +func (n *node[CHAIN_ID, HEAD, RPC]) State() nodeState { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + return n.recalculateState() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) getCachedState() nodeState { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + return n.state +} + +func (n *node[CHAIN_ID, HEAD, RPC]) recalculateState() nodeState { + if n.state != nodeStateAlive { + return n.state + } + + // double check that node is not lagging on finalized block + if n.nodePoolCfg.EnforceRepeatableRead() && n.isFinalizedBlockOutOfSync() { + return nodeStateFinalizedBlockOutOfSync + } + + return nodeStateAlive +} + +func (n *node[CHAIN_ID, HEAD, RPC]) isFinalizedBlockOutOfSync() bool { + if n.poolInfoProvider == nil { + return false + } + + highestObservedByCaller := n.poolInfoProvider.HighestUserObservations() + latest, rpcHighest := n.rpc.GetInterceptedChainInfo() + isOutOfSync := false + if n.chainCfg.FinalityTagEnabled() { + isOutOfSync = latest.FinalizedBlockNumber < highestObservedByCaller.FinalizedBlockNumber-int64(n.chainCfg.FinalizedBlockOffset()) + } else { + isOutOfSync = latest.BlockNumber < highestObservedByCaller.BlockNumber-int64(n.chainCfg.FinalizedBlockOffset()) + } + + if isOutOfSync { + n.lfcLog.Debugw("finalized block is out of sync", "rpcLatestChainInfo", latest, "rpcHighest", rpcHighest, "highestObservedByCaller", highestObservedByCaller) + } + + return isOutOfSync +} + +// StateAndLatest returns nodeState with the latest ChainInfo observed by Node during current lifecycle. +func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, ChainInfo) { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + latest, _ := n.rpc.GetInterceptedChainInfo() + return n.recalculateState(), latest +} + +// HighestUserObservations - returns highest ChainInfo ever observed by external user of the Node +func (n *node[CHAIN_ID, HEAD, RPC]) HighestUserObservations() ChainInfo { + _, highestUserObservations := n.rpc.GetInterceptedChainInfo() + return highestUserObservations +} +func (n *node[CHAIN_ID, HEAD, RPC]) SetPoolChainInfoProvider(poolInfoProvider PoolChainInfoProvider) { + n.poolInfoProvider = poolInfoProvider +} + +// setState is only used by internal state management methods. +// This is low-level; care should be taken by the caller to ensure the new state is a valid transition. +// State changes should always be synchronous: only one goroutine at a time should change state. +// n.stateMu should not be locked for long periods of time because external clients expect a timely response from n.State() +func (n *node[CHAIN_ID, HEAD, RPC]) setState(s nodeState) { + n.stateMu.Lock() + defer n.stateMu.Unlock() + n.state = s +} + +// declareXXX methods change the state and pass conrol off the new state +// management goroutine + +func (n *node[CHAIN_ID, HEAD, RPC]) declareAlive() { + n.transitionToAlive(func() { + n.lfcLog.Infow("RPC Node is online", "nodeState", n.state) + n.wg.Add(1) + go n.aliveLoop() + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToAlive(fn func()) { + promPoolRPCNodeTransitionsToAlive.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateDialed, nodeStateInvalidChainID, nodeStateSyncing: + n.state = nodeStateAlive + default: + panic(transitionFail(n.state, nodeStateAlive)) + } + fn() +} + +// declareInSync puts a node back into Alive state, allowing it to be used by +// pool consumers again +func (n *node[CHAIN_ID, HEAD, RPC]) declareInSync() { + n.transitionToInSync(func() { + n.lfcLog.Infow("RPC Node is back in sync", "nodeState", n.state) + n.wg.Add(1) + go n.aliveLoop() + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInSync(fn func()) { + promPoolRPCNodeTransitionsToAlive.WithLabelValues(n.chainID.String(), n.name).Inc() + promPoolRPCNodeTransitionsToInSync.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateOutOfSync, nodeStateSyncing: + n.state = nodeStateAlive + default: + panic(transitionFail(n.state, nodeStateAlive)) + } + fn() +} + +// declareOutOfSync puts a node into OutOfSync state, disconnecting all current +// clients and making it unavailable for use until back in-sync. +func (n *node[CHAIN_ID, HEAD, RPC]) declareOutOfSync(syncIssues syncStatus) { + n.transitionToOutOfSync(func() { + n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state, "syncIssues", syncIssues) + n.wg.Add(1) + go n.outOfSyncLoop(syncIssues) + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToOutOfSync(fn func()) { + promPoolRPCNodeTransitionsToOutOfSync.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateAlive: + n.rpc.Close() + n.state = nodeStateOutOfSync + default: + panic(transitionFail(n.state, nodeStateOutOfSync)) + } + fn() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) declareUnreachable() { + n.transitionToUnreachable(func() { + n.lfcLog.Errorw("RPC Node is unreachable", "nodeState", n.state) + n.wg.Add(1) + go n.unreachableLoop() + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToUnreachable(fn func()) { + promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing: + n.rpc.Close() + n.state = nodeStateUnreachable + default: + panic(transitionFail(n.state, nodeStateUnreachable)) + } + fn() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) declareState(state nodeState) { + if n.getCachedState() == nodeStateClosed { + return + } + switch state { + case nodeStateInvalidChainID: + n.declareInvalidChainID() + case nodeStateUnreachable: + n.declareUnreachable() + case nodeStateSyncing: + n.declareSyncing() + case nodeStateAlive: + n.declareAlive() + default: + panic(fmt.Sprintf("%#v state declaration is not implemented", state)) + } +} + +func (n *node[CHAIN_ID, HEAD, RPC]) declareInvalidChainID() { + n.transitionToInvalidChainID(func() { + n.lfcLog.Errorw("RPC Node has the wrong chain ID", "nodeState", n.state) + n.wg.Add(1) + go n.invalidChainIDLoop() + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInvalidChainID(fn func()) { + promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing: + n.rpc.Close() + n.state = nodeStateInvalidChainID + default: + panic(transitionFail(n.state, nodeStateInvalidChainID)) + } + fn() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) declareSyncing() { + n.transitionToSyncing(func() { + n.lfcLog.Errorw("RPC Node is syncing", "nodeState", n.state) + n.wg.Add(1) + go n.syncingLoop() + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToSyncing(fn func()) { + promPoolRPCNodeTransitionsToSyncing.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID: + n.rpc.Close() + n.state = nodeStateSyncing + default: + panic(transitionFail(n.state, nodeStateSyncing)) + } + + if !n.nodePoolCfg.NodeIsSyncingEnabled() { + panic("unexpected transition to nodeStateSyncing, while it's disabled") + } + fn() +} + +func transitionString(state nodeState) string { + return fmt.Sprintf("Total number of times node has transitioned to %s", state) +} + +func transitionFail(from nodeState, to nodeState) string { + return fmt.Sprintf("cannot transition from %#v to %#v", from, to) +} diff --git a/multinode/node_fsm_test.go b/multinode/node_fsm_test.go new file mode 100644 index 0000000..af317a6 --- /dev/null +++ b/multinode/node_fsm_test.go @@ -0,0 +1,131 @@ +package client + +import ( + "slices" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +type fnMock struct{ calls int } + +func (fm *fnMock) Fn() { + fm.calls++ +} + +func (fm *fnMock) AssertNotCalled(t *testing.T) { + assert.Equal(t, 0, fm.calls) +} + +func (fm *fnMock) AssertCalled(t *testing.T) { + assert.Greater(t, fm.calls, 0) +} + +func TestUnit_Node_StateTransitions(t *testing.T) { + t.Parallel() + + t.Run("setState", func(t *testing.T) { + n := newTestNode(t, testNodeOpts{rpc: nil, config: testNodeConfig{nodeIsSyncingEnabled: true}}) + assert.Equal(t, nodeStateUndialed, n.State()) + n.setState(nodeStateAlive) + assert.Equal(t, nodeStateAlive, n.State()) + n.setState(nodeStateUndialed) + assert.Equal(t, nodeStateUndialed, n.State()) + }) + + t.Run("transitionToAlive", func(t *testing.T) { + const destinationState = nodeStateAlive + allowedStates := []nodeState{nodeStateDialed, nodeStateInvalidChainID, nodeStateSyncing} + rpc := newMockRPCClient[types.ID, Head](t) + testTransition(t, rpc, testNode.transitionToAlive, destinationState, allowedStates...) + }) + + t.Run("transitionToInSync", func(t *testing.T) { + const destinationState = nodeStateAlive + allowedStates := []nodeState{nodeStateOutOfSync, nodeStateSyncing} + rpc := newMockRPCClient[types.ID, Head](t) + testTransition(t, rpc, testNode.transitionToInSync, destinationState, allowedStates...) + }) + t.Run("transitionToOutOfSync", func(t *testing.T) { + const destinationState = nodeStateOutOfSync + allowedStates := []nodeState{nodeStateAlive} + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("Close") + testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) + }) + t.Run("transitionToUnreachable", func(t *testing.T) { + const destinationState = nodeStateUnreachable + allowedStates := []nodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing} + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("Close") + testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) + }) + t.Run("transitionToInvalidChain", func(t *testing.T) { + const destinationState = nodeStateInvalidChainID + allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing} + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("Close") + testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) + }) + t.Run("transitionToSyncing", func(t *testing.T) { + const destinationState = nodeStateSyncing + allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID} + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("Close") + testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) + }) + t.Run("transitionToSyncing panics if nodeIsSyncing is disabled", func(t *testing.T) { + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("Close") + node := newTestNode(t, testNodeOpts{rpc: rpc}) + node.setState(nodeStateDialed) + fn := new(fnMock) + defer fn.AssertNotCalled(t) + assert.PanicsWithValue(t, "unexpected transition to nodeStateSyncing, while it's disabled", func() { + node.transitionToSyncing(fn.Fn) + }) + }) +} + +func testTransition(t *testing.T, rpc *mockRPCClient[types.ID, Head], transition func(node testNode, fn func()), destinationState nodeState, allowedStates ...nodeState) { + node := newTestNode(t, testNodeOpts{rpc: rpc, config: testNodeConfig{nodeIsSyncingEnabled: true}}) + for _, allowedState := range allowedStates { + m := new(fnMock) + node.setState(allowedState) + transition(node, m.Fn) + assert.Equal(t, destinationState, node.State(), "Expected node to successfully transition from %s to %s state", allowedState, destinationState) + m.AssertCalled(t) + } + // noop on attempt to transition from Closed state + m := new(fnMock) + node.setState(nodeStateClosed) + transition(node, m.Fn) + m.AssertNotCalled(t) + assert.Equal(t, nodeStateClosed, node.State(), "Expected node to remain in closed state on transition attempt") + + for _, nodeState := range allNodeStates { + if slices.Contains(allowedStates, nodeState) || nodeState == nodeStateClosed { + continue + } + + m := new(fnMock) + node.setState(nodeState) + assert.Panics(t, func() { + transition(node, m.Fn) + }, "Expected transition from `%s` to `%s` to panic", nodeState, destinationState) + m.AssertNotCalled(t) + assert.Equal(t, nodeState, node.State(), "Expected node to remain in initial state on invalid transition") + } +} + +func TestNodeState_String(t *testing.T) { + t.Run("Ensure all states are meaningful when converted to string", func(t *testing.T) { + for _, ns := range allNodeStates { + // ensure that string representation is not nodeState(%d) + assert.NotContains(t, ns.String(), strconv.FormatInt(int64(ns), 10), "Expected node state to have readable name") + } + }) +} diff --git a/multinode/node_lifecycle.go b/multinode/node_lifecycle.go new file mode 100644 index 0000000..aaf62f1 --- /dev/null +++ b/multinode/node_lifecycle.go @@ -0,0 +1,700 @@ +package client + +import ( + "context" + "fmt" + "math" + "math/big" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/utils" + bigmath "github.com/smartcontractkit/chainlink-common/pkg/utils/big_math" + + "github.com/smartcontractkit/chainlink-framework/types" + iutils "github.com/smartcontractkit/chainlink-framework/utils" +) + +var ( + promPoolRPCNodeHighestSeenBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "pool_rpc_node_highest_seen_block", + Help: "The highest seen block for the given RPC node", + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeHighestFinalizedBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "pool_rpc_node_highest_finalized_block", + Help: "The highest seen finalized block for the given RPC node", + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeNumSeenBlocks = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_seen_blocks", + Help: "The total number of new blocks seen by the given RPC node", + }, []string{"chainID", "nodeName"}) + promPoolRPCNodePolls = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_polls_total", + Help: "The total number of poll checks for the given RPC node", + }, []string{"chainID", "nodeName"}) + promPoolRPCNodePollsFailed = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_polls_failed", + Help: "The total number of failed poll checks for the given RPC node", + }, []string{"chainID", "nodeName"}) + promPoolRPCNodePollsSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_polls_success", + Help: "The total number of successful poll checks for the given RPC node", + }, []string{"chainID", "nodeName"}) +) + +// zombieNodeCheckInterval controls how often to re-check to see if we need to +// state change in case we have to force a state transition due to no available +// nodes. +// NOTE: This only applies to out-of-sync nodes if they are the last available node +func zombieNodeCheckInterval(noNewHeadsThreshold time.Duration) time.Duration { + interval := noNewHeadsThreshold + if interval <= 0 || interval > QueryTimeout { + interval = QueryTimeout + } + return utils.WithJitter(interval) +} + +const ( + msgCannotDisable = "but cannot disable this connection because there are no other RPC endpoints, or all other RPC endpoints are dead." + msgDegradedState = "Chainlink is now operating in a degraded state and urgent action is required to resolve the issue" +) + +// Node is a FSM +// Each state has a loop that goes with it, which monitors the node and moves it into another state as necessary. +// Only one loop must run at a time. +// Each loop passes control onto the next loop as it exits, except when the node is Closed which terminates the loop permanently. + +// This handles node lifecycle for the ALIVE state +// Should only be run ONCE per node, after a successful Dial +func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { + defer n.wg.Done() + ctx, cancel := n.newCtx() + defer cancel() + + { + // sanity check + state := n.getCachedState() + switch state { + case nodeStateAlive: + case nodeStateClosed: + return + default: + panic(fmt.Sprintf("aliveLoop can only run for node in Alive state, got: %s", state)) + } + } + + noNewHeadsTimeoutThreshold := n.chainCfg.NodeNoNewHeadsThreshold() + noNewFinalizedBlocksTimeoutThreshold := n.chainCfg.NoNewFinalizedHeadsThreshold() + pollFailureThreshold := n.nodePoolCfg.PollFailureThreshold() + pollInterval := n.nodePoolCfg.PollInterval() + + lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) + lggr.Tracew("Alive loop starting", "nodeState", n.getCachedState()) + + headsSub, err := n.registerNewSubscription(ctx, lggr.With("subscriptionType", "heads"), + n.chainCfg.NodeNoNewHeadsThreshold(), n.rpc.SubscribeToHeads) + if err != nil { + lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.getCachedState(), "err", err) + n.declareUnreachable() + return + } + + defer n.unsubscribeHealthChecks() + + var pollCh <-chan time.Time + if pollInterval > 0 { + lggr.Debug("Polling enabled") + pollT := time.NewTicker(pollInterval) + defer pollT.Stop() + pollCh = pollT.C + if pollFailureThreshold > 0 { + // polling can be enabled with no threshold to enable polling but + // the node will not be marked offline regardless of the number of + // poll failures + lggr.Debug("Polling liveness checking enabled") + } + } else { + lggr.Debug("Polling disabled") + } + + var finalizedHeadsSub headSubscription[HEAD] + if n.chainCfg.FinalityTagEnabled() { + finalizedHeadsSub, err = n.registerNewSubscription(ctx, lggr.With("subscriptionType", "finalizedHeads"), + n.chainCfg.NoNewFinalizedHeadsThreshold(), n.rpc.SubscribeToFinalizedHeads) + if err != nil { + lggr.Errorw("Failed to subscribe to finalized heads", "err", err) + n.declareUnreachable() + return + } + } + + // Get the latest chain info to use as local highest + localHighestChainInfo, _ := n.rpc.GetInterceptedChainInfo() + var pollFailures uint32 + + for { + select { + case <-ctx.Done(): + return + case <-pollCh: + promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Tracew("Pinging RPC", "nodeState", n.State(), "pollFailures", pollFailures) + pollCtx, cancel := context.WithTimeout(ctx, pollInterval) + err = n.RPC().Ping(pollCtx) + cancel() + if err != nil { + // prevent overflow + if pollFailures < math.MaxUint32 { + promPoolRPCNodePollsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() + pollFailures++ + } + lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "nodeState", n.getCachedState()) + } else { + lggr.Debugw("Ping successful", "nodeState", n.State()) + promPoolRPCNodePollsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() + pollFailures = 0 + } + if pollFailureThreshold > 0 && pollFailures >= pollFailureThreshold { + lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.getCachedState()) + if n.poolInfoProvider != nil { + if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 { + lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) + continue + } + } + n.declareUnreachable() + return + } + if outOfSync, liveNodes := n.isOutOfSyncWithPool(); outOfSync { + // note: there must be another live node for us to be out of sync + if liveNodes < 2 { + lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) + continue + } + n.declareOutOfSync(syncStatusNotInSyncWithPool) + return + } + case bh, open := <-headsSub.Heads: + if !open { + lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) + n.declareUnreachable() + return + } + receivedNewHead := n.onNewHead(lggr, &localHighestChainInfo, bh) + if receivedNewHead && noNewHeadsTimeoutThreshold > 0 { + headsSub.ResetTimer(noNewHeadsTimeoutThreshold) + } + case err = <-headsSub.Errors: + lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.getCachedState()) + n.declareUnreachable() + return + case <-headsSub.NoNewHeads: + // We haven't received a head on the channel for at least the + // threshold amount of time, mark it broken + lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, localHighestChainInfo.BlockNumber), "nodeState", n.getCachedState(), "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) + if n.poolInfoProvider != nil { + if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 { + lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) + // We don't necessarily want to wait the full timeout to check again, we should + // check regularly and log noisily in this state + headsSub.ResetTimer(zombieNodeCheckInterval(noNewHeadsTimeoutThreshold)) + continue + } + } + n.declareOutOfSync(syncStatusNoNewHead) + return + case latestFinalized, open := <-finalizedHeadsSub.Heads: + if !open { + lggr.Errorw("Finalized heads subscription channel unexpectedly closed") + n.declareUnreachable() + return + } + + receivedNewHead := n.onNewFinalizedHead(lggr, &localHighestChainInfo, latestFinalized) + if receivedNewHead && noNewFinalizedBlocksTimeoutThreshold > 0 { + finalizedHeadsSub.ResetTimer(noNewFinalizedBlocksTimeoutThreshold) + } + case <-finalizedHeadsSub.NoNewHeads: + // We haven't received a finalized head on the channel for at least the + // threshold amount of time, mark it broken + lggr.Errorw(fmt.Sprintf("RPC's finalized state is out of sync; no new finalized heads received for %s (last finalized head received was %v)", noNewFinalizedBlocksTimeoutThreshold, localHighestChainInfo.FinalizedBlockNumber), "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber) + if n.poolInfoProvider != nil { + if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 { + lggr.Criticalf("RPC's finalized state is out of sync; %s %s", msgCannotDisable, msgDegradedState) + // We don't necessarily want to wait the full timeout to check again, we should + // check regularly and log noisily in this state + finalizedHeadsSub.ResetTimer(zombieNodeCheckInterval(noNewFinalizedBlocksTimeoutThreshold)) + continue + } + } + n.declareOutOfSync(syncStatusNoNewFinalizedHead) + return + case <-finalizedHeadsSub.Errors: + lggr.Errorw("Finalized heads subscription was terminated", "err", err) + n.declareUnreachable() + return + } + } +} + +func (n *node[CHAIN_ID, HEAD, RPC]) unsubscribeHealthChecks() { + n.stateMu.Lock() + for _, sub := range n.healthCheckSubs { + sub.Unsubscribe() + } + n.healthCheckSubs = []types.Subscription{} + n.stateMu.Unlock() +} + +type headSubscription[HEAD any] struct { + Heads <-chan HEAD + Errors <-chan error + NoNewHeads <-chan time.Time + + noNewHeadsTicker *time.Ticker + sub types.Subscription + cleanUpTasks []func() +} + +func (sub *headSubscription[HEAD]) ResetTimer(duration time.Duration) { + sub.noNewHeadsTicker.Reset(duration) +} + +func (sub *headSubscription[HEAD]) Unsubscribe() { + for _, doCleanUp := range sub.cleanUpTasks { + doCleanUp() + } +} + +func (n *node[CHAIN_ID, HEAD, PRC]) registerNewSubscription(ctx context.Context, lggr logger.SugaredLogger, + noNewDataThreshold time.Duration, newSub func(ctx context.Context) (<-chan HEAD, types.Subscription, error)) (headSubscription[HEAD], error) { + result := headSubscription[HEAD]{} + var err error + var sub types.Subscription + result.Heads, sub, err = newSub(ctx) + if err != nil { + return result, err + } + + result.Errors = sub.Err() + lggr.Debug("Successfully subscribed") + + result.sub = sub + n.stateMu.Lock() + n.healthCheckSubs = append(n.healthCheckSubs, sub) + n.stateMu.Unlock() + + result.cleanUpTasks = append(result.cleanUpTasks, sub.Unsubscribe) + + if noNewDataThreshold > 0 { + lggr.Debugw("Subscription liveness checking enabled") + result.noNewHeadsTicker = time.NewTicker(noNewDataThreshold) + result.NoNewHeads = result.noNewHeadsTicker.C + result.cleanUpTasks = append(result.cleanUpTasks, result.noNewHeadsTicker.Stop) + } else { + lggr.Debug("Subscription liveness checking disabled") + } + + return result, nil +} + +func (n *node[CHAIN_ID, HEAD, RPC]) onNewFinalizedHead(lggr logger.SugaredLogger, chainInfo *ChainInfo, latestFinalized HEAD) bool { + if !latestFinalized.IsValid() { + lggr.Warn("Latest finalized block is not valid") + return false + } + + latestFinalizedBN := latestFinalized.BlockNumber() + lggr.Debugw("Got latest finalized head", "latestFinalized", latestFinalized) + if latestFinalizedBN <= chainInfo.FinalizedBlockNumber { + lggr.Debugw("Ignoring previously seen finalized block number") + return false + } + + promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) + chainInfo.FinalizedBlockNumber = latestFinalizedBN + return true +} + +func (n *node[CHAIN_ID, HEAD, RPC]) onNewHead(lggr logger.SugaredLogger, chainInfo *ChainInfo, head HEAD) bool { + if !head.IsValid() { + lggr.Warn("Latest head is not valid") + return false + } + + promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Debugw("Got head", "head", head) + lggr = lggr.With("latestReceivedBlockNumber", chainInfo.BlockNumber, "blockNumber", head.BlockNumber(), "nodeState", n.getCachedState()) + if head.BlockNumber() <= chainInfo.BlockNumber { + lggr.Debugw("Ignoring previously seen block number") + return false + } + + promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(head.BlockNumber())) + chainInfo.BlockNumber = head.BlockNumber() + + if !n.chainCfg.FinalityTagEnabled() { + latestFinalizedBN := max(head.BlockNumber()-int64(n.chainCfg.FinalityDepth()), 0) + if latestFinalizedBN > chainInfo.FinalizedBlockNumber { + promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) + chainInfo.FinalizedBlockNumber = latestFinalizedBN + } + } + + return true +} + +const ( + msgReceivedBlock = "Received block for RPC node, waiting until back in-sync to mark as live again" + msgReceivedFinalizedBlock = "Received new finalized block for RPC node, waiting until back in-sync to mark as live again" + msgInSync = "RPC node back in sync" +) + +// isOutOfSyncWithPool returns outOfSync true if num or td is more than SyncThresold behind the best node. +// Always returns outOfSync false for SyncThreshold 0. +// liveNodes is only included when outOfSync is true. +func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSyncWithPool() (outOfSync bool, liveNodes int) { + if n.poolInfoProvider == nil { + n.lfcLog.Warn("skipping sync state against the pool - should only occur in tests") + return // skip for tests + } + threshold := n.nodePoolCfg.SyncThreshold() + if threshold == 0 { + return // disabled + } + // Check against best node + ln, ci := n.poolInfoProvider.LatestChainInfo() + localChainInfo, _ := n.rpc.GetInterceptedChainInfo() + mode := n.nodePoolCfg.SelectionMode() + switch mode { + case NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel: + outOfSync = localChainInfo.BlockNumber < ci.BlockNumber-int64(threshold) + case NodeSelectionModeTotalDifficulty: + bigThreshold := big.NewInt(int64(threshold)) + outOfSync = localChainInfo.TotalDifficulty.Cmp(bigmath.Sub(ci.TotalDifficulty, bigThreshold)) < 0 + default: + panic("unrecognized NodeSelectionMode: " + mode) + } + + if outOfSync && n.getCachedState() == nodeStateAlive { + n.lfcLog.Errorw("RPC endpoint has fallen behind", "blockNumber", localChainInfo.BlockNumber, "bestLatestBlockNumber", ci.BlockNumber, "totalDifficulty", localChainInfo.TotalDifficulty) + } + return outOfSync, ln +} + +// outOfSyncLoop takes an OutOfSync node and waits until isOutOfSync returns false to go back to live status +func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(syncIssues syncStatus) { + defer n.wg.Done() + ctx, cancel := n.newCtx() + defer cancel() + + { + // sanity check + state := n.getCachedState() + switch state { + case nodeStateOutOfSync: + case nodeStateClosed: + return + default: + panic(fmt.Sprintf("outOfSyncLoop can only run for node in OutOfSync state, got: %s", state)) + } + } + + outOfSyncAt := time.Now() + + // set logger name to OutOfSync or FinalizedBlockOutOfSync + lggr := logger.Sugared(logger.Named(n.lfcLog, n.getCachedState().String())).With("nodeState", n.getCachedState()) + lggr.Debugw("Trying to revive out-of-sync RPC node") + + // Need to redial since out-of-sync nodes are automatically disconnected + state := n.createVerifiedConn(ctx, lggr) + if state != nodeStateAlive { + n.declareState(state) + return + } + + noNewHeadsTimeoutThreshold := n.chainCfg.NodeNoNewHeadsThreshold() + headsSub, err := n.registerNewSubscription(ctx, lggr.With("subscriptionType", "heads"), + noNewHeadsTimeoutThreshold, n.rpc.SubscribeToHeads) + if err != nil { + lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "err", err) + n.declareUnreachable() + return + } + + defer n.unsubscribeHealthChecks() + + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node") + + noNewFinalizedBlocksTimeoutThreshold := n.chainCfg.NoNewFinalizedHeadsThreshold() + var finalizedHeadsSub headSubscription[HEAD] + if n.chainCfg.FinalityTagEnabled() { + finalizedHeadsSub, err = n.registerNewSubscription(ctx, lggr.With("subscriptionType", "finalizedHeads"), + noNewFinalizedBlocksTimeoutThreshold, n.rpc.SubscribeToFinalizedHeads) + if err != nil { + lggr.Errorw("Subscribe to finalized heads failed on out-of-sync RPC node", "err", err) + n.declareUnreachable() + return + } + + lggr.Tracew("Successfully subscribed to finalized heads feed on out-of-sync RPC node") + } + + _, localHighestChainInfo := n.rpc.GetInterceptedChainInfo() + for { + if syncIssues == syncStatusSynced { + // back in-sync! flip back into alive loop + lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt))) + n.declareInSync() + return + } + + select { + case <-ctx.Done(): + return + case head, open := <-headsSub.Heads: + if !open { + lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) + n.declareUnreachable() + return + } + + if !n.onNewHead(lggr, &localHighestChainInfo, head) { + continue + } + + // received a new head - clear NoNewHead flag + syncIssues &= ^syncStatusNoNewHead + if outOfSync, _ := n.isOutOfSyncWithPool(); !outOfSync { + // we caught up with the pool - clear NotInSyncWithPool flag + syncIssues &= ^syncStatusNotInSyncWithPool + } else { + // we've received new head, but lagging behind the pool, add NotInSyncWithPool flag to prevent false transition to alive + syncIssues |= syncStatusNotInSyncWithPool + } + + if noNewHeadsTimeoutThreshold > 0 { + headsSub.ResetTimer(noNewHeadsTimeoutThreshold) + } + + lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "syncIssues", syncIssues) + case <-time.After(zombieNodeCheckInterval(noNewHeadsTimeoutThreshold)): + if n.poolInfoProvider != nil { + if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 1 { + lggr.Criticalw("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state", "syncIssues", syncIssues) + n.declareInSync() + return + } + } + case err := <-headsSub.Errors: + lggr.Errorw("Subscription was terminated", "err", err) + n.declareUnreachable() + return + case <-headsSub.NoNewHeads: + // we are not resetting the timer, as there is no need to add syncStatusNoNewHead until it's removed on new head. + syncIssues |= syncStatusNoNewHead + lggr.Debugw(fmt.Sprintf("No new heads received for %s. Node stays out-of-sync due to sync issues: %s", noNewHeadsTimeoutThreshold, syncIssues)) + case latestFinalized, open := <-finalizedHeadsSub.Heads: + if !open { + lggr.Errorw("Finalized heads subscription channel unexpectedly closed") + n.declareUnreachable() + return + } + if !latestFinalized.IsValid() { + lggr.Warn("Latest finalized block is not valid") + continue + } + + receivedNewHead := n.onNewFinalizedHead(lggr, &localHighestChainInfo, latestFinalized) + if !receivedNewHead { + continue + } + + // on new finalized head remove NoNewFinalizedHead flag from the mask + syncIssues &= ^syncStatusNoNewFinalizedHead + if noNewFinalizedBlocksTimeoutThreshold > 0 { + finalizedHeadsSub.ResetTimer(noNewFinalizedBlocksTimeoutThreshold) + } + + var highestSeen ChainInfo + if n.poolInfoProvider != nil { + highestSeen = n.poolInfoProvider.HighestUserObservations() + } + + lggr.Debugw(msgReceivedFinalizedBlock, "blockNumber", latestFinalized.BlockNumber(), "poolHighestBlockNumber", highestSeen.FinalizedBlockNumber, "syncIssues", syncIssues) + case err := <-finalizedHeadsSub.Errors: + lggr.Errorw("Finalized head subscription was terminated", "err", err) + n.declareUnreachable() + return + case <-finalizedHeadsSub.NoNewHeads: + // we are not resetting the timer, as there is no need to add syncStatusNoNewFinalizedHead until it's removed on new finalized head. + syncIssues |= syncStatusNoNewFinalizedHead + lggr.Debugw(fmt.Sprintf("No new finalized heads received for %s. Node stays out-of-sync due to sync issues: %s", noNewFinalizedBlocksTimeoutThreshold, syncIssues)) + } + } +} + +func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { + defer n.wg.Done() + ctx, cancel := n.newCtx() + defer cancel() + + { + // sanity check + state := n.getCachedState() + switch state { + case nodeStateUnreachable: + case nodeStateClosed: + return + default: + panic(fmt.Sprintf("unreachableLoop can only run for node in Unreachable state, got: %s", state)) + } + } + + unreachableAt := time.Now() + + lggr := logger.Sugared(logger.Named(n.lfcLog, "Unreachable")) + lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.getCachedState()) + + dialRetryBackoff := iutils.NewRedialBackoff() + + for { + select { + case <-ctx.Done(): + return + case <-time.After(dialRetryBackoff.Duration()): + lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.getCachedState()) + + err := n.rpc.Dial(ctx) + if err != nil { + lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "nodeState", n.getCachedState()) + continue + } + + n.setState(nodeStateDialed) + + state := n.verifyConn(ctx, lggr) + switch state { + case nodeStateUnreachable: + n.setState(nodeStateUnreachable) + continue + case nodeStateAlive: + lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "nodeState", n.getCachedState()) + fallthrough + default: + n.declareState(state) + return + } + } + } +} + +func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { + defer n.wg.Done() + ctx, cancel := n.newCtx() + defer cancel() + + { + // sanity check + state := n.getCachedState() + switch state { + case nodeStateInvalidChainID: + case nodeStateClosed: + return + default: + panic(fmt.Sprintf("invalidChainIDLoop can only run for node in InvalidChainID state, got: %s", state)) + } + } + + invalidAt := time.Now() + + lggr := logger.Named(n.lfcLog, "InvalidChainID") + + // Need to redial since invalid chain ID nodes are automatically disconnected + state := n.createVerifiedConn(ctx, lggr) + if state != nodeStateInvalidChainID { + n.declareState(state) + return + } + + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "nodeState", n.getCachedState()) + + chainIDRecheckBackoff := iutils.NewRedialBackoff() + + for { + select { + case <-ctx.Done(): + return + case <-time.After(chainIDRecheckBackoff.Duration()): + state := n.verifyConn(ctx, lggr) + switch state { + case nodeStateInvalidChainID: + continue + case nodeStateAlive: + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "nodeState", n.getCachedState()) + fallthrough + default: + n.declareState(state) + return + } + } + } +} + +func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { + defer n.wg.Done() + ctx, cancel := n.newCtx() + defer cancel() + + { + // sanity check + state := n.getCachedState() + switch state { + case nodeStateSyncing: + case nodeStateClosed: + return + default: + panic(fmt.Sprintf("syncingLoop can only run for node in NodeStateSyncing state, got: %s", state)) + } + } + + syncingAt := time.Now() + + lggr := logger.Sugared(logger.Named(n.lfcLog, "Syncing")) + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with syncing status", n.String()), "nodeState", n.getCachedState()) + // Need to redial since syncing nodes are automatically disconnected + state := n.createVerifiedConn(ctx, lggr) + if state != nodeStateSyncing { + n.declareState(state) + return + } + + recheckBackoff := iutils.NewRedialBackoff() + + for { + select { + case <-ctx.Done(): + return + case <-time.After(recheckBackoff.Duration()): + lggr.Tracew("Trying to recheck if the node is still syncing", "nodeState", n.getCachedState()) + isSyncing, err := n.rpc.IsSyncing(ctx) + if err != nil { + lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.getCachedState()) + n.declareUnreachable() + return + } + + if isSyncing { + lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.getCachedState()) + continue + } + + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was syncing for %s", time.Since(syncingAt)), "nodeState", n.getCachedState()) + n.declareAlive() + return + } + } +} diff --git a/multinode/node_lifecycle_test.go b/multinode/node_lifecycle_test.go new file mode 100644 index 0000000..d357ef8 --- /dev/null +++ b/multinode/node_lifecycle_test.go @@ -0,0 +1,1983 @@ +package client + +import ( + "errors" + "fmt" + "github.com/smartcontractkit/chainlink-framework/types" + "math/big" + "sync" + "sync/atomic" + "testing" + + "github.com/cometbft/cometbft/libs/rand" + prom "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + clientMocks "github.com/smartcontractkit/chainlink-framework/multinode/mocks" + "github.com/smartcontractkit/chainlink-framework/types/mocks" +) + +func newSub(t *testing.T) *mocks.Subscription { + sub := mocks.NewSubscription(t) + sub.On("Err").Return((<-chan error)(nil)).Maybe() + sub.On("Unsubscribe") + return sub +} + +func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { + t.Parallel() + + newDialedNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil) + + node.setState(nodeStateDialed) + return node + } + + t.Run("returns on closed", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateClosed) + node.wg.Add(1) + node.aliveLoop() + }) + t.Run("if initial subscribe fails, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + + expectedError := errors.New("failed to subscribe to rpc") + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("if remote RPC connection is closed transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + lggr: lggr, + }) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + defer func() { assert.NoError(t, node.close()) }() + + sub := mocks.NewSubscription(t) + errChan := make(chan error) + close(errChan) + sub.On("Err").Return((<-chan error)(errChan)).Once() + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, sub, nil).Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") + assert.Equal(t, nodeStateUnreachable, node.State()) + }) + + newSubscribedNode := func(t *testing.T, opts testNodeOpts) testNode { + sub := newSub(t) + opts.rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + return newDialedNode(t, opts) + } + t.Run("Stays alive and waits for signal", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Subscription liveness checking disabled") + tests.AssertLogEventually(t, observedLogs, "Polling disabled") + assert.Equal(t, nodeStateAlive, node.State()) + }) + t.Run("stays alive while below pollFailureThreshold and resets counter on success", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const pollFailureThreshold = 3 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollFailureThreshold: pollFailureThreshold, + pollInterval: tests.TestInterval, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + pollError := errors.New("failed to get ClientVersion") + // 1. Return error several times, but below threshold + rpc.On("Ping", mock.Anything).Return(pollError).Run(func(_ mock.Arguments) { + // stays healthy while below threshold + assert.Equal(t, nodeStateAlive, node.State()) + }).Times(pollFailureThreshold - 1) + // 2. Successful call that is expected to reset counter + rpc.On("Ping", mock.Anything).Return(nil).Once() + // 3. Return error. If we have not reset the timer, we'll transition to nonAliveState + rpc.On("Ping", mock.Anything).Return(pollError).Once() + // 4. Once during the call, check if node is alive + var ensuredAlive atomic.Bool + rpc.On("Ping", mock.Anything).Return(nil).Run(func(_ mock.Arguments) { + if ensuredAlive.Load() { + return + } + ensuredAlive.Store(true) + assert.Equal(t, nodeStateAlive, node.State()) + }).Once() + // redundant call to stay in alive state + rpc.On("Ping", mock.Anything).Return(nil) + node.declareAlive() + tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) + tests.AssertLogCountEventually(t, observedLogs, "Ping successful", 2) + assert.True(t, ensuredAlive.Load(), "expected to ensure that node was alive") + }) + t.Run("with threshold poll failures, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const pollFailureThreshold = 3 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollFailureThreshold: pollFailureThreshold, + pollInterval: tests.TestInterval, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + pollError := errors.New("failed to get ClientVersion") + rpc.On("Ping", mock.Anything).Return(pollError) + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) + }) + t.Run("with threshold poll failures, but we are the last node alive, forcibly keeps it alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const pollFailureThreshold = 3 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollFailureThreshold: pollFailureThreshold, + pollInterval: tests.TestInterval, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: 20, + }).Once() + node.SetPoolChainInfoProvider(poolInfo) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: 20}, ChainInfo{BlockNumber: 20}) + pollError := errors.New("failed to get ClientVersion") + rpc.On("Ping", mock.Anything).Return(pollError) + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailureThreshold)) + assert.Equal(t, nodeStateAlive, node.State()) + }) + t.Run("when behind more than SyncThreshold, transitions to out of sync", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const syncThreshold = 10 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollInterval: tests.TestInterval, + syncThreshold: syncThreshold, + selectionMode: NodeSelectionModeRoundRobin, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + rpc.On("Ping", mock.Anything).Return(nil) + const mostRecentBlock = 20 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: mostRecentBlock}, ChainInfo{BlockNumber: 30}) + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(10, ChainInfo{ + BlockNumber: syncThreshold + mostRecentBlock + 1, + TotalDifficulty: big.NewInt(10), + }) + node.SetPoolChainInfoProvider(poolInfo) + // tries to redial in outOfSync + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateOutOfSync, node.State()) + }).Once() + rpc.On("Close").Maybe() + rpc.On("Dial", mock.Anything).Run(func(_ mock.Arguments) { + require.Equal(t, nodeStateOutOfSync, node.State()) + }).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") + }) + t.Run("when behind more than SyncThreshold but we are the last live node, forcibly stays alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const syncThreshold = 10 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollInterval: tests.TestInterval, + syncThreshold: syncThreshold, + selectionMode: NodeSelectionModeRoundRobin, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + rpc.On("Ping", mock.Anything).Return(nil) + const mostRecentBlock = 20 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: mostRecentBlock}, ChainInfo{BlockNumber: 30}) + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: syncThreshold + mostRecentBlock + 1, + TotalDifficulty: big.NewInt(10), + }) + node.SetPoolChainInfoProvider(poolInfo) + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState)) + }) + t.Run("when behind but SyncThreshold=0, stay alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollInterval: tests.TestInterval, + syncThreshold: 0, + selectionMode: NodeSelectionModeRoundRobin, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + rpc.On("Ping", mock.Anything).Return(nil) + const mostRecentBlock = 20 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: mostRecentBlock}, ChainInfo{BlockNumber: 30}) + node.declareAlive() + tests.AssertLogCountEventually(t, observedLogs, "Ping successful", 2) + assert.Equal(t, nodeStateAlive, node.State()) + }) + t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + NoNewHeadsThresholdVal: tests.TestInterval, + }, + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + // tries to redial in outOfSync + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateOutOfSync, node.State()) + }).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertEventually(t, func() bool { + // right after outOfSync we'll transfer to unreachable due to returned error on Dial + // we check that we were in out of sync state on first Dial call + return node.State() == nodeStateUnreachable + }) + }) + t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + NoNewHeadsThresholdVal: tests.TestInterval, + }, + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: 20, + TotalDifficulty: big.NewInt(10), + }).Once() + node.SetPoolChainInfoProvider(poolInfo) + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState)) + assert.Equal(t, nodeStateAlive, node.State()) + }) + + t.Run("rpc closed head channel", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + sub := newSub(t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + close(ch) + }).Return((<-chan Head)(ch), sub, nil).Once() + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newDialedNode(t, testNodeOpts{ + lggr: lggr, + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + NoNewHeadsThresholdVal: tests.TestInterval, + }, + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") + assert.Equal(t, nodeStateUnreachable, node.State()) + }) + t.Run("If finality tag is not enabled updates finalized block metric using finality depth and latest head", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + sub := newSub(t) + const blockNumber = 1000 + const finalityDepth = 10 + const expectedBlock = 990 + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + go writeHeads(t, ch, head{BlockNumber: blockNumber - 1}, head{BlockNumber: blockNumber}, head{BlockNumber: blockNumber - 1}) + }).Return((<-chan Head)(ch), sub, nil).Once() + name := "node-" + rand.Str(5) + node := newDialedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{FinalityDepthVal: finalityDepth}, + rpc: rpc, + name: name, + chainID: big.NewInt(1), + }) + defer func() { assert.NoError(t, node.close()) }() + node.declareAlive() + tests.AssertEventually(t, func() bool { + metric, err := promPoolRPCNodeHighestFinalizedBlock.GetMetricWithLabelValues(big.NewInt(1).String(), name) + require.NoError(t, err) + var m = &prom.Metric{} + require.NoError(t, metric.Write(m)) + return float64(expectedBlock) == m.Gauge.GetValue() + }) + }) + t.Run("If fails to subscribe to latest finalized blocks, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + sub := newSub(t) + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + expectedError := errors.New("failed to subscribe to finalized heads") + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(nil, sub, expectedError).Once() + lggr, _ := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + config: testNodeConfig{ + finalizedBlockPollInterval: tests.TestInterval, + }, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + node.declareAlive() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("Logs warning if latest finalized block is not valid", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + sub := newSub(t) + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + ch := make(chan Head, 1) + head := newMockHead(t) + head.On("IsValid").Return(false) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { + ch <- head + }).Return((<-chan Head)(ch), sub, nil).Once() + + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Latest finalized block is not valid") + }) + t.Run("On new finalized block updates corresponding metric", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + const expectedBlock = 1101 + const finalityDepth = 10 + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), newSub(t), nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + name := "node-" + rand.Str(5) + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + FinalityDepthVal: finalityDepth, + IsFinalityTagEnabled: true, + }, + rpc: rpc, + name: name, + chainID: big.NewInt(1), + }) + defer func() { assert.NoError(t, node.close()) }() + node.declareAlive() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + writeHeads(t, ch, head{BlockNumber: expectedBlock - 1}, head{BlockNumber: expectedBlock}, head{BlockNumber: expectedBlock - 1}) + }() + tests.AssertEventually(t, func() bool { + metric, err := promPoolRPCNodeHighestFinalizedBlock.GetMetricWithLabelValues(big.NewInt(1).String(), name) + require.NoError(t, err) + var m = &prom.Metric{} + require.NoError(t, metric.Write(m)) + return float64(expectedBlock) == m.Gauge.GetValue() + }) + }) + t.Run("If finalized heads channel is closed, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + ch := make(chan Head) + close(ch) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), newSub(t), nil).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Finalized heads subscription channel unexpectedly closed") + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) + }) + t.Run("when no new finalized heads received for threshold, transitions to out of sync", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + ch := make(chan Head, 1) + ch <- head{BlockNumber: 10}.ToMockHead(t) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), newSub(t), nil).Once() + lggr, observed := logger.TestObserved(t, zap.DebugLevel) + noNewFinalizedHeadsThreshold := tests.TestInterval + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeadsThreshold, + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + // tries to redial in outOfSync + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateOutOfSync, node.State()) + }).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observed, fmt.Sprintf("RPC's finalized state is out of sync; no new finalized heads received for %s (last finalized head received was 10)", noNewFinalizedHeadsThreshold)) + tests.AssertEventually(t, func() bool { + // right after outOfSync we'll transfer to unreachable due to returned error on Dial + // we check that we were in out of sync state on first Dial call + return node.State() == nodeStateUnreachable + }) + }) + t.Run("when no new finalized heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), newSub(t), nil).Once() + lggr, observed := logger.TestObserved(t, zap.DebugLevel) + noNewFinalizedHeadsThreshold := tests.TestInterval + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeadsThreshold, + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: 20, + TotalDifficulty: big.NewInt(10), + }).Once() + node.SetPoolChainInfoProvider(poolInfo) + node.declareAlive() + tests.AssertLogEventually(t, observed, fmt.Sprintf("RPC's finalized state is out of sync; %s %s", msgCannotDisable, msgDegradedState)) + assert.Equal(t, nodeStateAlive, node.State()) + }) + t.Run("If finalized subscription returns an error, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + sub := mocks.NewSubscription(t) + errCh := make(chan error, 1) + errCh <- errors.New("subscription failed") + sub.On("Err").Return((<-chan error)(errCh)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(nil), sub, nil).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Finalized heads subscription was terminated") + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) + }) +} + +type head struct { + BlockNumber int64 + BlockDifficulty *big.Int +} + +func (h head) ToMockHead(t *testing.T) *mockHead { + m := newMockHead(t) + m.On("BlockNumber").Return(h.BlockNumber).Maybe() + m.On("BlockDifficulty").Return(h.BlockDifficulty).Maybe() + m.On("IsValid").Return(true).Maybe() + return m +} + +func writeHeads(t *testing.T, ch chan<- Head, heads ...head) { + for _, head := range heads { + h := head.ToMockHead(t) + select { + case ch <- h: + case <-tests.Context(t).Done(): + return + } + } +} + +func setupRPCForAliveLoop(t *testing.T, rpc *mockRPCClient[types.ID, Head]) { + rpc.On("Dial", mock.Anything).Return(nil).Maybe() + aliveSubscription := mocks.NewSubscription(t) + aliveSubscription.On("Err").Return(nil).Maybe() + aliveSubscription.On("Unsubscribe").Maybe() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() + rpc.On("SetAliveLoopSub", mock.Anything).Maybe() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Maybe() +} + +func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { + t.Parallel() + + newAliveNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil) + node.setState(nodeStateAlive) + return node + } + + t.Run("returns on closed", func(t *testing.T) { + t.Parallel() + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateClosed) + node.wg.Add(1) + node.outOfSyncLoop(syncStatusNotInSyncWithPool) + }) + t.Run("on old blocks stays outOfSync and returns on close", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr := logger.Test(t) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: 0}, ChainInfo{BlockNumber: 13}).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + heads := []head{{BlockNumber: 7}, {BlockNumber: 11}, {BlockNumber: 13}} + ch := make(chan Head) + var wg sync.WaitGroup + wg.Add(1) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + go func() { + defer wg.Done() + writeHeads(t, ch, heads...) + }() + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + + node.declareOutOfSync(syncStatusNoNewHead) + // wait until all heads are consumed + wg.Wait() + assert.Equal(t, nodeStateOutOfSync, node.State()) + }) + t.Run("if initial dial fails, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + + expectedError := errors.New("failed to dial rpc") + // might be called again in unreachable loop, so no need to set once + rpc.On("Dial", mock.Anything).Return(expectedError) + + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("if fail to get chainID, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + chainID := types.RandomID() + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: chainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(chainID, nil) + // for out-of-sync + rpc.On("Dial", mock.Anything).Return(nil).Once() + // for unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + sub := mocks.NewSubscription(t) + errChan := make(chan error, 1) + errChan <- errors.New("subscription was terminate") + sub.On("Err").Return((<-chan error)(errChan)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) + + expectedError := errors.New("failed to get chain types.ID") + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError) + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("if chainID does not match, transitions to invalidChainID", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + // one for out-of-sync & one for invalid chainID + rpc.On("Dial", mock.Anything).Return(nil).Twice() + + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("if syncing, transitions to syncing", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + config: testNodeConfig{nodeIsSyncingEnabled: true}, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + + // might be called multiple times + rpc.On("IsSyncing", mock.Anything).Return(true, nil) + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateSyncing + }) + }) + t.Run("if fails to fetch syncing status, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + config: testNodeConfig{nodeIsSyncingEnabled: true}, + }) + defer func() { assert.NoError(t, node.close()) }() + + // one for out-of-sync + rpc.On("Dial", mock.Anything).Return(nil).Once() + + // for unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + // might be called multiple times + rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing")) + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("if fails to subscribe, becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + expectedError := errors.New("failed to subscribe") + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() + + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on subscription termination becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + sub := mocks.NewSubscription(t) + errChan := make(chan error, 1) + errChan <- errors.New("subscription was terminate") + sub.On("Err").Return((<-chan error)(errChan)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + + sub := newSub(t) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + close(ch) + }).Return((<-chan Head)(ch), sub, nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("becomes alive if it receives a newer head", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + const highestBlock = 1000 + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}, head{BlockNumber: highestBlock + 1}) + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: highestBlock}, ChainInfo{BlockNumber: highestBlock}) + setupRPCForAliveLoop(t, rpc) + + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, msgReceivedBlock) + tests.AssertLogEventually(t, observedLogs, msgInSync) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) + t.Run("becomes alive if there is no other nodes", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + NoNewHeadsThresholdVal: tests.TestInterval, + }, + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(0, ChainInfo{ + BlockNumber: 100, + TotalDifficulty: big.NewInt(200), + }) + node.SetPoolChainInfoProvider(poolInfo) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), outOfSyncSubscription, nil).Once() + setupRPCForAliveLoop(t, rpc) + + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, "RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) + t.Run("Stays out-of-sync if received new head, but lags behind pool", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + NoNewHeadsThresholdVal: tests.TestInterval, + }, + config: testNodeConfig{ + syncThreshold: 1, + selectionMode: NodeSelectionModeHighestHead, + }, + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + poolInfo := newMockPoolChainInfoProvider(t) + const highestBlock = 20 + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: highestBlock * 2, + TotalDifficulty: big.NewInt(200), + }) + node.SetPoolChainInfoProvider(poolInfo) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{BlockNumber: highestBlock}) + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}, head{BlockNumber: highestBlock + 1}) + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, msgReceivedBlock) + tests.AssertLogEventually(t, observedLogs, "No new heads received for") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateOutOfSync + }) + }) + + // creates RPC mock with all calls necessary to create heads subscription that won't produce any events + newRPCWithNoOpHeads := func(t *testing.T, chainID types.ID) *mockRPCClient[types.ID, Head] { + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(chainID, nil).Once() + sub := newSub(t) + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + return rpc + } + + t.Run("if fails to subscribe to finalized, becomes unreachable", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(nil), nil, errors.New("failed to subscribe")).Once() + // unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on subscription termination becomes unreachable", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + sub := mocks.NewSubscription(t) + errChan := make(chan error, 1) + errChan <- errors.New("subscription was terminate") + sub.On("Err").Return((<-chan error)(errChan)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) + + // unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, "Finalized head subscription was terminated") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + sub := newSub(t) + + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { + close(ch) + }).Return((<-chan Head)(ch), sub, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) + // unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, "Finalized heads subscription channel unexpectedly closed") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("becomes alive on new finalized block", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr := logger.Test(t) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + NoNewFinalizedHeadsThresholdVal: tests.TestInterval, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + const highestBlock = 13 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{FinalizedBlockNumber: highestBlock}, ChainInfo{FinalizedBlockNumber: highestBlock}) + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + setupRPCForAliveLoop(t, rpc) + + node.declareOutOfSync(syncStatusNoNewFinalizedHead) + heads := []head{{BlockNumber: highestBlock - 1}, {BlockNumber: highestBlock}} + writeHeads(t, ch, heads...) + assert.Equal(t, nodeStateOutOfSync, node.State()) + writeHeads(t, ch, head{BlockNumber: highestBlock + 1}) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) + t.Run("adds finalized block is not increasing flag, if there is no new finalized heads for too long", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr, observed := logger.TestObserved(t, zap.DebugLevel) + const noNewFinalizedHeads = tests.TestInterval + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeads, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + const highestBlock = 13 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{FinalizedBlockNumber: highestBlock}) + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + node.declareOutOfSync(syncStatusNotInSyncWithPool) + heads := []head{{BlockNumber: highestBlock - 1}, {BlockNumber: highestBlock}} + writeHeads(t, ch, heads...) + assert.Equal(t, nodeStateOutOfSync, node.State()) + tests.AssertLogEventually(t, observed, fmt.Sprintf("No new finalized heads received for %s. Node stays "+ + "out-of-sync due to sync issues: NotInSyncWithRPCPool,NoNewFinalizedHead", noNewFinalizedHeads)) + }) +} + +func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { + t.Parallel() + + newAliveNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil) + + node.setState(nodeStateAlive) + return node + } + t.Run("returns on closed", func(t *testing.T) { + t.Parallel() + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateClosed) + node.wg.Add(1) + node.unreachableLoop() + }) + t.Run("on failed redial, keeps trying", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) + node.declareUnreachable() + tests.AssertLogCountEventually(t, observedLogs, "Failed to redial RPC node; still unreachable", 2) + }) + t.Run("on failed chainID verification, keep trying", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateDialed, node.State()) + }).Return(nodeChainID, errors.New("failed to get chain id")) + node.declareUnreachable() + tests.AssertLogCountEventually(t, observedLogs, "Failed to verify chain types.ID for node", 2) + }) + t.Run("on chain types.ID mismatch transitions to invalidChainID", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + + node.declareUnreachable() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("on syncing status check failure, keeps trying", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + config: testNodeConfig{nodeIsSyncingEnabled: true}, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateDialed, node.State()) + }).Return(nodeChainID, nil) + rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing status")) + node.declareUnreachable() + tests.AssertLogCountEventually(t, observedLogs, "Unexpected error while verifying RPC node synchronization status", 2) + }) + t.Run("on syncing, transitions to syncing state", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + config: testNodeConfig{nodeIsSyncingEnabled: true}, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + rpc.On("IsSyncing", mock.Anything).Return(true, nil) + + setupRPCForAliveLoop(t, rpc) + + node.declareUnreachable() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateSyncing + }) + }) + t.Run("on successful verification becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + config: testNodeConfig{nodeIsSyncingEnabled: true}, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + rpc.On("IsSyncing", mock.Anything).Return(false, nil) + setupRPCForAliveLoop(t, rpc) + + node.declareUnreachable() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) + t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + + setupRPCForAliveLoop(t, rpc) + + node.declareUnreachable() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) +} + +func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { + t.Parallel() + newDialedNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil) + + node.setState(nodeStateDialed) + return node + } + t.Run("returns on closed", func(t *testing.T) { + t.Parallel() + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateClosed) + node.wg.Add(1) + node.invalidChainIDLoop() + }) + t.Run("on invalid dial becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) + rpc.On("Close") + + node.declareInvalidChainID() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, errors.New("failed to get chain id")) + // once for chainID and maybe another one for unreachable + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + + node.declareInvalidChainID() + tests.AssertLogEventually(t, observedLogs, "Failed to verify chain types.ID for node") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on chainID mismatch keeps trying", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + + node.declareInvalidChainID() + tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain types.ID", 2) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + setupRPCForAliveLoop(t, rpc) + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + node.declareInvalidChainID() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) + t.Run("on successful verification becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + config: testNodeConfig{nodeIsSyncingEnabled: true}, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + rpc.On("IsSyncing", mock.Anything).Return(false, nil).Once() + + setupRPCForAliveLoop(t, rpc) + + node.declareInvalidChainID() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) +} + +func TestUnit_NodeLifecycle_start(t *testing.T) { + t.Parallel() + + newNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil) + + return node + } + t.Run("if fails on initial dial, becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("if chainID verification fails, becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateDialed, node.State()) + }).Return(nodeChainID, errors.New("failed to get chain id")) + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertLogEventually(t, observedLogs, "Failed to verify chain types.ID for node") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on chain types.ID mismatch transitions to invalidChainID", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("if syncing verification fails, becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + config: testNodeConfig{nodeIsSyncingEnabled: true}, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + + rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateDialed, node.State()) + }).Return(nodeChainID, nil).Once() + rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing status")) + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")) + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertLogEventually(t, observedLogs, "Unexpected error while verifying RPC node synchronization status") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on isSyncing transitions to syncing", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + config: testNodeConfig{nodeIsSyncingEnabled: true}, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + rpc.On("IsSyncing", mock.Anything).Return(true, nil) + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateSyncing + }) + }) + t.Run("on successful verification becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + config: testNodeConfig{nodeIsSyncingEnabled: true}, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + rpc.On("IsSyncing", mock.Anything).Return(false, nil) + setupRPCForAliveLoop(t, rpc) + + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) + t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + setupRPCForAliveLoop(t, rpc) + + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) +} + +func TestUnit_NodeLifecycle_outOfSyncWithPool(t *testing.T) { + t.Parallel() + t.Run("skip if nLiveNodes is not configured", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{}) + outOfSync, liveNodes := node.isOutOfSyncWithPool() + assert.Equal(t, false, outOfSync) + assert.Equal(t, 0, liveNodes) + }) + t.Run("skip if syncThreshold is not configured", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{}) + poolInfo := newMockPoolChainInfoProvider(t) + node.SetPoolChainInfoProvider(poolInfo) + outOfSync, liveNodes := node.isOutOfSyncWithPool() + assert.Equal(t, false, outOfSync) + assert.Equal(t, 0, liveNodes) + }) + t.Run("panics on invalid selection mode", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{ + config: testNodeConfig{syncThreshold: 1}, + }) + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{}).Once() + node.SetPoolChainInfoProvider(poolInfo) + assert.Panics(t, func() { + _, _ = node.isOutOfSyncWithPool() + }) + }) + t.Run("block height selection mode", func(t *testing.T) { + const syncThreshold = 10 + const highestBlock = 1000 + const nodesNum = 20 + const totalDifficulty = 3000 + testCases := []struct { + name string + blockNumber int64 + outOfSync bool + }{ + { + name: "below threshold", + blockNumber: highestBlock - syncThreshold - 1, + outOfSync: true, + }, + { + name: "equal to threshold", + blockNumber: highestBlock - syncThreshold, + outOfSync: false, + }, + { + name: "equal to highest block", + blockNumber: highestBlock, + outOfSync: false, + }, + { + name: "higher than highest block", + blockNumber: highestBlock, + outOfSync: false, + }, + } + + for _, selectionMode := range []string{NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel} { + node := newTestNode(t, testNodeOpts{ + config: testNodeConfig{ + syncThreshold: syncThreshold, + selectionMode: selectionMode, + }, + }) + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(nodesNum, ChainInfo{ + BlockNumber: highestBlock, + TotalDifficulty: big.NewInt(totalDifficulty), + }) + node.SetPoolChainInfoProvider(poolInfo) + for _, td := range []int64{totalDifficulty - syncThreshold - 1, totalDifficulty - syncThreshold, totalDifficulty, totalDifficulty + 1} { + for _, testCase := range testCases { + t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: total difficulty: %d", testCase.name, selectionMode, td), func(t *testing.T) { + chainInfo := ChainInfo{BlockNumber: testCase.blockNumber, TotalDifficulty: big.NewInt(td)} + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(chainInfo, ChainInfo{}).Once() + node.rpc = rpc + outOfSync, liveNodes := node.isOutOfSyncWithPool() + assert.Equal(t, nodesNum, liveNodes) + assert.Equal(t, testCase.outOfSync, outOfSync) + }) + } + } + } + }) + t.Run("total difficulty selection mode", func(t *testing.T) { + const syncThreshold = 10 + const highestBlock = 1000 + const nodesNum = 20 + const totalDifficulty = 3000 + testCases := []struct { + name string + totalDifficulty int64 + outOfSync bool + }{ + { + name: "below threshold", + totalDifficulty: totalDifficulty - syncThreshold - 1, + outOfSync: true, + }, + { + name: "equal to threshold", + totalDifficulty: totalDifficulty - syncThreshold, + outOfSync: false, + }, + { + name: "equal to highest block", + totalDifficulty: totalDifficulty, + outOfSync: false, + }, + { + name: "higher than highest block", + totalDifficulty: totalDifficulty, + outOfSync: false, + }, + } + + node := newTestNode(t, testNodeOpts{ + config: testNodeConfig{ + syncThreshold: syncThreshold, + selectionMode: NodeSelectionModeTotalDifficulty, + }, + }) + + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(nodesNum, ChainInfo{ + BlockNumber: highestBlock, + TotalDifficulty: big.NewInt(totalDifficulty), + }) + node.SetPoolChainInfoProvider(poolInfo) + for _, hb := range []int64{highestBlock - syncThreshold - 1, highestBlock - syncThreshold, highestBlock, highestBlock + 1} { + for _, testCase := range testCases { + t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: highest block: %d", testCase.name, NodeSelectionModeTotalDifficulty, hb), func(t *testing.T) { + chainInfo := ChainInfo{BlockNumber: hb, TotalDifficulty: big.NewInt(testCase.totalDifficulty)} + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(chainInfo, ChainInfo{}).Once() + node.rpc = rpc + outOfSync, liveNodes := node.isOutOfSyncWithPool() + assert.Equal(t, nodesNum, liveNodes) + assert.Equal(t, testCase.outOfSync, outOfSync) + }) + } + } + }) +} + +func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { + t.Parallel() + newDialedNode := func(t *testing.T, opts testNodeOpts) testNode { + opts.config.nodeIsSyncingEnabled = true + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil) + + node.setState(nodeStateDialed) + return node + } + t.Run("returns on closed", func(t *testing.T) { + t.Parallel() + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateClosed) + node.wg.Add(1) + node.syncingLoop() + }) + t.Run("on invalid dial becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) + + node.declareSyncing() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, errors.New("failed to get chain id")) + + // once for syncing and maybe another one for unreachable + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareSyncing() + tests.AssertLogEventually(t, observedLogs, "Failed to verify chain types.ID for node") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on chainID mismatch transitions to invalidChainID", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Twice() + + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + node.declareSyncing() + tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain types.ID", 2) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("on failed Syncing check - becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + // first one is needed to enter internal loop + rpc.On("IsSyncing", mock.Anything).Return(true, nil).Once() + rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check if syncing")).Once() + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + + node.declareSyncing() + tests.AssertLogEventually(t, observedLogs, "Unexpected error while verifying RPC node synchronization status") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on IsSyncing - keeps trying", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + rpc.On("IsSyncing", mock.Anything).Return(true, nil) + rpc.On("Dial", mock.Anything).Return(nil).Once() + + node.declareSyncing() + tests.AssertLogCountEventually(t, observedLogs, "Verification failed: Node is syncing", 2) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateSyncing + }) + }) + t.Run("on successful verification becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockRPCClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + rpc.On("IsSyncing", mock.Anything).Return(true, nil).Once() + rpc.On("IsSyncing", mock.Anything).Return(false, nil).Once() + + setupRPCForAliveLoop(t, rpc) + + node.declareSyncing() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) +} + +func TestNode_State(t *testing.T) { + t.Run("If not Alive, returns as is", func(t *testing.T) { + for state := nodeState(0); state < nodeStateLen; state++ { + if state == nodeStateAlive { + continue + } + + node := newTestNode(t, testNodeOpts{}) + node.setState(state) + assert.Equal(t, state, node.State()) + } + }) + t.Run("If repeatable read is not enforced, returns alive", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateAlive) + assert.Equal(t, nodeStateAlive, node.State()) + }) + testCases := []struct { + Name string + FinalizedBlockOffsetVal uint32 + IsFinalityTagEnabled bool + PoolChainInfo ChainInfo + NodeChainInfo ChainInfo + ExpectedState nodeState + }{ + { + Name: "If finality lag does not exceeds offset, returns alive (FinalityDepth)", + FinalizedBlockOffsetVal: 15, + PoolChainInfo: ChainInfo{ + BlockNumber: 20, + }, + NodeChainInfo: ChainInfo{ + BlockNumber: 5, + }, + ExpectedState: nodeStateAlive, + }, + { + Name: "If finality lag does not exceeds offset, returns alive (FinalityTag)", + FinalizedBlockOffsetVal: 15, + IsFinalityTagEnabled: true, + PoolChainInfo: ChainInfo{ + FinalizedBlockNumber: 20, + }, + NodeChainInfo: ChainInfo{ + FinalizedBlockNumber: 5, + }, + ExpectedState: nodeStateAlive, + }, + { + Name: "If finality lag exceeds offset, returns nodeStateFinalizedBlockOutOfSync (FinalityDepth)", + FinalizedBlockOffsetVal: 15, + PoolChainInfo: ChainInfo{ + BlockNumber: 20, + }, + NodeChainInfo: ChainInfo{ + BlockNumber: 4, + }, + ExpectedState: nodeStateFinalizedBlockOutOfSync, + }, + { + Name: "If finality lag exceeds offset, returns nodeStateFinalizedBlockOutOfSync (FinalityTag)", + FinalizedBlockOffsetVal: 15, + IsFinalityTagEnabled: true, + PoolChainInfo: ChainInfo{ + FinalizedBlockNumber: 20, + }, + NodeChainInfo: ChainInfo{ + FinalizedBlockNumber: 4, + }, + ExpectedState: nodeStateFinalizedBlockOutOfSync, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + rpc := newMockRPCClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(tc.NodeChainInfo, tc.PoolChainInfo).Once() + node := newTestNode(t, testNodeOpts{ + config: testNodeConfig{ + enforceRepeatableRead: true, + }, + chainConfig: clientMocks.ChainConfig{ + FinalizedBlockOffsetVal: tc.FinalizedBlockOffsetVal, + IsFinalityTagEnabled: tc.IsFinalityTagEnabled, + }, + rpc: rpc, + }) + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("HighestUserObservations").Return(tc.PoolChainInfo).Once() + node.SetPoolChainInfoProvider(poolInfo) + node.setState(nodeStateAlive) + assert.Equal(t, tc.ExpectedState, node.State()) + }) + } +} diff --git a/multinode/node_selector.go b/multinode/node_selector.go new file mode 100644 index 0000000..74f6d44 --- /dev/null +++ b/multinode/node_selector.go @@ -0,0 +1,43 @@ +package client + +import ( + "fmt" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +const ( + NodeSelectionModeHighestHead = "HighestHead" + NodeSelectionModeRoundRobin = "RoundRobin" + NodeSelectionModeTotalDifficulty = "TotalDifficulty" + NodeSelectionModePriorityLevel = "PriorityLevel" +) + +type NodeSelector[ + CHAIN_ID types.ID, + RPC any, +] interface { + // Select returns a Node, or nil if none can be selected. + // Implementation must be thread-safe. + Select() Node[CHAIN_ID, RPC] + // Name returns the strategy name, e.g. "HighestHead" or "RoundRobin" + Name() string +} + +func newNodeSelector[ + CHAIN_ID types.ID, + RPC any, +](selectionMode string, nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { + switch selectionMode { + case NodeSelectionModeHighestHead: + return NewHighestHeadNodeSelector[CHAIN_ID, RPC](nodes) + case NodeSelectionModeRoundRobin: + return NewRoundRobinSelector[CHAIN_ID, RPC](nodes) + case NodeSelectionModeTotalDifficulty: + return NewTotalDifficultyNodeSelector[CHAIN_ID, RPC](nodes) + case NodeSelectionModePriorityLevel: + return NewPriorityLevelNodeSelector[CHAIN_ID, RPC](nodes) + default: + panic(fmt.Sprintf("unsupported NodeSelectionMode: %s", selectionMode)) + } +} diff --git a/multinode/node_selector_highest_head.go b/multinode/node_selector_highest_head.go new file mode 100644 index 0000000..d3f8e11 --- /dev/null +++ b/multinode/node_selector_highest_head.go @@ -0,0 +1,40 @@ +package client + +import ( + "math" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +type highestHeadNodeSelector[ + CHAIN_ID types.ID, + RPC any, +] []Node[CHAIN_ID, RPC] + +func NewHighestHeadNodeSelector[ + CHAIN_ID types.ID, + RPC any, +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { + return highestHeadNodeSelector[CHAIN_ID, RPC](nodes) +} + +func (s highestHeadNodeSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { + var highestHeadNumber int64 = math.MinInt64 + var highestHeadNodes []Node[CHAIN_ID, RPC] + for _, n := range s { + state, currentChainInfo := n.StateAndLatest() + currentHeadNumber := currentChainInfo.BlockNumber + if state == nodeStateAlive && currentHeadNumber >= highestHeadNumber { + if highestHeadNumber < currentHeadNumber { + highestHeadNumber = currentHeadNumber + highestHeadNodes = nil + } + highestHeadNodes = append(highestHeadNodes, n) + } + } + return firstOrHighestPriority(highestHeadNodes) +} + +func (s highestHeadNodeSelector[CHAIN_ID, RPC]) Name() string { + return NodeSelectionModeHighestHead +} diff --git a/multinode/node_selector_highest_head_test.go b/multinode/node_selector_highest_head_test.go new file mode 100644 index 0000000..bfc2af2 --- /dev/null +++ b/multinode/node_selector_highest_head_test.go @@ -0,0 +1,176 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +func TestHighestHeadNodeSelectorName(t *testing.T) { + selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModeHighestHead, nil) + assert.Equal(t, selector.Name(), NodeSelectionModeHighestHead) +} + +func TestHighestHeadNodeSelector(t *testing.T) { + t.Parallel() + + type nodeClient RPCClient[types.ID, Head] + + var nodes []Node[types.ID, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: int64(-1)}) + } else if i == 1 { + // second node is alive, LatestReceivedBlockNumber = 1 + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(1)}) + } else { + // third node is alive, LatestReceivedBlockNumber = 2 (best node) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(2)}) + } + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + } + + selector := newNodeSelector[types.ID, nodeClient](NodeSelectionModeHighestHead, nodes) + assert.Same(t, nodes[2], selector.Select()) + + t.Run("stick to the same node", func(t *testing.T) { + node := newMockNode[types.ID, nodeClient](t) + // fourth node is alive, LatestReceivedBlockNumber = 2 (same as 3rd) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(2)}) + node.On("Order").Return(int32(1)) + nodes = append(nodes, node) + + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("another best node", func(t *testing.T) { + node := newMockNode[types.ID, nodeClient](t) + // fifth node is alive, LatestReceivedBlockNumber = 3 (better than 3rd and 4th) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) + node.On("Order").Return(int32(1)) + nodes = append(nodes, node) + + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + assert.Same(t, nodes[4], selector.Select()) + }) + + t.Run("nodes never update latest block number", func(t *testing.T) { + node1 := newMockNode[types.ID, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(-1)}) + node1.On("Order").Return(int32(1)) + node2 := newMockNode[types.ID, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(-1)}) + node2.On("Order").Return(int32(1)) + selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[types.ID, nodeClient]{node1, node2}) + assert.Same(t, node1, selector.Select()) + }) +} + +func TestHighestHeadNodeSelector_None(t *testing.T) { + t.Parallel() + + type nodeClient RPCClient[types.ID, Head] + var nodes []Node[types.ID, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: int64(-1)}) + } else { + // others are unreachable + node.On("StateAndLatest").Return(nodeStateUnreachable, ChainInfo{BlockNumber: int64(-1)}) + } + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + assert.Nil(t, selector.Select()) +} + +func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { + t.Parallel() + + type nodeClient RPCClient[types.ID, Head] + var nodes []Node[types.ID, nodeClient] + + t.Run("same head and order", func(t *testing.T) { + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, nodeClient](t) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(1)}) + node.On("Order").Return(int32(2)) + nodes = append(nodes, node) + } + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + // Should select the first node because all things are equal + assert.Same(t, nodes[0], selector.Select()) + }) + + t.Run("same head but different order", func(t *testing.T) { + node1 := newMockNode[types.ID, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) + node1.On("Order").Return(int32(3)) + + node2 := newMockNode[types.ID, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) + node2.On("Order").Return(int32(1)) + + node3 := newMockNode[types.ID, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) + node3.On("Order").Return(int32(2)) + + nodes := []Node[types.ID, nodeClient]{node1, node2, node3} + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + // Should select the second node as it has the highest priority + assert.Same(t, nodes[1], selector.Select()) + }) + + t.Run("different head but same order", func(t *testing.T) { + node1 := newMockNode[types.ID, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(1)}) + node1.On("Order").Maybe().Return(int32(3)) + + node2 := newMockNode[types.ID, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(2)}) + node2.On("Order").Maybe().Return(int32(3)) + + node3 := newMockNode[types.ID, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) + node3.On("Order").Return(int32(3)) + + nodes := []Node[types.ID, nodeClient]{node1, node2, node3} + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + // Should select the third node as it has the highest head + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("different head and different order", func(t *testing.T) { + node1 := newMockNode[types.ID, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(10)}) + node1.On("Order").Maybe().Return(int32(3)) + + node2 := newMockNode[types.ID, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(11)}) + node2.On("Order").Maybe().Return(int32(4)) + + node3 := newMockNode[types.ID, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(11)}) + node3.On("Order").Maybe().Return(int32(3)) + + node4 := newMockNode[types.ID, nodeClient](t) + node4.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(10)}) + node4.On("Order").Maybe().Return(int32(1)) + + nodes := []Node[types.ID, nodeClient]{node1, node2, node3, node4} + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + // Should select the third node as it has the highest head and will win the priority tie-breaker + assert.Same(t, nodes[2], selector.Select()) + }) +} diff --git a/multinode/node_selector_priority_level.go b/multinode/node_selector_priority_level.go new file mode 100644 index 0000000..408302a --- /dev/null +++ b/multinode/node_selector_priority_level.go @@ -0,0 +1,123 @@ +package client + +import ( + "math" + "sort" + "sync/atomic" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +type priorityLevelNodeSelector[ + CHAIN_ID types.ID, + RPC any, +] struct { + nodes []Node[CHAIN_ID, RPC] + roundRobinCount []atomic.Uint32 +} + +type nodeWithPriority[ + CHAIN_ID types.ID, + RPC any, +] struct { + node Node[CHAIN_ID, RPC] + priority int32 +} + +func NewPriorityLevelNodeSelector[ + CHAIN_ID types.ID, + RPC any, +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { + return &priorityLevelNodeSelector[CHAIN_ID, RPC]{ + nodes: nodes, + roundRobinCount: make([]atomic.Uint32, nrOfPriorityTiers(nodes)), + } +} + +func (s priorityLevelNodeSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { + nodes := s.getHighestPriorityAliveTier() + + if len(nodes) == 0 { + return nil + } + priorityLevel := nodes[len(nodes)-1].priority + + // NOTE: Inc returns the number after addition, so we must -1 to get the "current" counter + count := s.roundRobinCount[priorityLevel].Add(1) - 1 + idx := int(count % uint32(len(nodes))) + + return nodes[idx].node +} + +func (s priorityLevelNodeSelector[CHAIN_ID, RPC]) Name() string { + return NodeSelectionModePriorityLevel +} + +// getHighestPriorityAliveTier filters nodes that are not in state nodeStateAlive and +// returns only the highest tier of alive nodes +func (s priorityLevelNodeSelector[CHAIN_ID, RPC]) getHighestPriorityAliveTier() []nodeWithPriority[CHAIN_ID, RPC] { + var nodes []nodeWithPriority[CHAIN_ID, RPC] + for _, n := range s.nodes { + if n.State() == nodeStateAlive { + nodes = append(nodes, nodeWithPriority[CHAIN_ID, RPC]{n, n.Order()}) + } + } + + if len(nodes) == 0 { + return nil + } + + return removeLowerTiers(nodes) +} + +// removeLowerTiers take a slice of nodeWithPriority[CHAIN_ID, BLOCK_HASH, HEAD, RPC] and keeps only the highest tier +func removeLowerTiers[ + CHAIN_ID types.ID, + RPC any, +](nodes []nodeWithPriority[CHAIN_ID, RPC]) []nodeWithPriority[CHAIN_ID, RPC] { + sort.SliceStable(nodes, func(i, j int) bool { + return nodes[i].priority > nodes[j].priority + }) + + var nodes2 []nodeWithPriority[CHAIN_ID, RPC] + currentPriority := nodes[len(nodes)-1].priority + + for _, n := range nodes { + if n.priority == currentPriority { + nodes2 = append(nodes2, n) + } + } + + return nodes2 +} + +// nrOfPriorityTiers calculates the total number of priority tiers +func nrOfPriorityTiers[ + CHAIN_ID types.ID, + RPC any, +](nodes []Node[CHAIN_ID, RPC]) int32 { + highestPriority := int32(0) + for _, n := range nodes { + priority := n.Order() + if highestPriority < priority { + highestPriority = priority + } + } + return highestPriority + 1 +} + +// firstOrHighestPriority takes a list of nodes and returns the first one with the highest priority +func firstOrHighestPriority[ + CHAIN_ID types.ID, + RPC any, +](nodes []Node[CHAIN_ID, RPC]) Node[CHAIN_ID, RPC] { + hp := int32(math.MaxInt32) + var node Node[CHAIN_ID, RPC] + for _, n := range nodes { + if n.Order() < hp { + hp = n.Order() + node = n + } + } + return node +} diff --git a/multinode/node_selector_priority_level_test.go b/multinode/node_selector_priority_level_test.go new file mode 100644 index 0000000..89e1f37 --- /dev/null +++ b/multinode/node_selector_priority_level_test.go @@ -0,0 +1,91 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +func TestPriorityLevelNodeSelectorName(t *testing.T) { + selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModePriorityLevel, nil) + assert.Equal(t, selector.Name(), NodeSelectionModePriorityLevel) +} + +func TestPriorityLevelNodeSelector(t *testing.T) { + t.Parallel() + + type nodeClient RPCClient[types.ID, Head] + type testNode struct { + order int32 + state nodeState + } + type testCase struct { + name string + nodes []testNode + expect []int // indexes of the nodes expected to be returned by Select + } + + testCases := []testCase{ + { + name: "TwoNodesSameOrder: Highest Allowed Order", + nodes: []testNode{ + {order: 1, state: nodeStateAlive}, + {order: 1, state: nodeStateAlive}, + }, + expect: []int{0, 1, 0, 1, 0, 1}, + }, + { + name: "TwoNodesSameOrder: Lowest Allowed Order", + nodes: []testNode{ + {order: 100, state: nodeStateAlive}, + {order: 100, state: nodeStateAlive}, + }, + expect: []int{0, 1, 0, 1, 0, 1}, + }, + { + name: "NoneAvailable", + nodes: []testNode{ + {order: 1, state: nodeStateOutOfSync}, + {order: 1, state: nodeStateUnreachable}, + {order: 1, state: nodeStateUnreachable}, + }, + expect: []int{}, // no nodes should be selected + }, + { + name: "DifferentOrder", + nodes: []testNode{ + {order: 1, state: nodeStateAlive}, + {order: 2, state: nodeStateAlive}, + {order: 3, state: nodeStateAlive}, + }, + expect: []int{0, 0}, // only the highest order node should be selected + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var nodes []Node[types.ID, nodeClient] + for _, tn := range tc.nodes { + node := newMockNode[types.ID, nodeClient](t) + node.On("State").Return(tn.state) + node.On("Order").Return(tn.order) + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModePriorityLevel, nodes) + for _, idx := range tc.expect { + if idx >= len(nodes) { + t.Fatalf("Invalid node index %d in test case '%s'", idx, tc.name) + } + assert.Same(t, nodes[idx], selector.Select()) + } + + // Check for nil selection if expected slice is empty + if len(tc.expect) == 0 { + assert.Nil(t, selector.Select()) + } + }) + } +} diff --git a/multinode/node_selector_round_robin.go b/multinode/node_selector_round_robin.go new file mode 100644 index 0000000..9cc260d --- /dev/null +++ b/multinode/node_selector_round_robin.go @@ -0,0 +1,48 @@ +package client + +import ( + "sync/atomic" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +type roundRobinSelector[ + CHAIN_ID types.ID, + RPC any, +] struct { + nodes []Node[CHAIN_ID, RPC] + roundRobinCount atomic.Uint32 +} + +func NewRoundRobinSelector[ + CHAIN_ID types.ID, + RPC any, +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { + return &roundRobinSelector[CHAIN_ID, RPC]{ + nodes: nodes, + } +} + +func (s *roundRobinSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { + var liveNodes []Node[CHAIN_ID, RPC] + for _, n := range s.nodes { + if n.State() == nodeStateAlive { + liveNodes = append(liveNodes, n) + } + } + + nNodes := len(liveNodes) + if nNodes == 0 { + return nil + } + + // NOTE: Inc returns the number after addition, so we must -1 to get the "current" counter + count := s.roundRobinCount.Add(1) - 1 + idx := int(count % uint32(nNodes)) + + return liveNodes[idx] +} + +func (s *roundRobinSelector[CHAIN_ID, RPC]) Name() string { + return NodeSelectionModeRoundRobin +} diff --git a/multinode/node_selector_round_robin_test.go b/multinode/node_selector_round_robin_test.go new file mode 100644 index 0000000..d1db7d3 --- /dev/null +++ b/multinode/node_selector_round_robin_test.go @@ -0,0 +1,61 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +func TestRoundRobinNodeSelectorName(t *testing.T) { + selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModeRoundRobin, nil) + assert.Equal(t, selector.Name(), NodeSelectionModeRoundRobin) +} + +func TestRoundRobinNodeSelector(t *testing.T) { + t.Parallel() + + type nodeClient RPCClient[types.ID, Head] + var nodes []Node[types.ID, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("State").Return(nodeStateOutOfSync) + } else { + // second & third nodes are alive + node.On("State").Return(nodeStateAlive) + } + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeRoundRobin, nodes) + assert.Same(t, nodes[1], selector.Select()) + assert.Same(t, nodes[2], selector.Select()) + assert.Same(t, nodes[1], selector.Select()) + assert.Same(t, nodes[2], selector.Select()) +} + +func TestRoundRobinNodeSelector_None(t *testing.T) { + t.Parallel() + + type nodeClient RPCClient[types.ID, Head] + var nodes []Node[types.ID, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("State").Return(nodeStateOutOfSync) + } else { + // others are unreachable + node.On("State").Return(nodeStateUnreachable) + } + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeRoundRobin, nodes) + assert.Nil(t, selector.Select()) +} diff --git a/multinode/node_selector_test.go b/multinode/node_selector_test.go new file mode 100644 index 0000000..9187b11 --- /dev/null +++ b/multinode/node_selector_test.go @@ -0,0 +1,18 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +func TestNodeSelector(t *testing.T) { + // rest of the tests are located in specific node selectors tests + t.Run("panics on unknown type", func(t *testing.T) { + assert.Panics(t, func() { + _ = newNodeSelector[types.ID, RPCClient[types.ID, Head]]("unknown", nil) + }) + }) +} diff --git a/multinode/node_selector_total_difficulty.go b/multinode/node_selector_total_difficulty.go new file mode 100644 index 0000000..96d14ee --- /dev/null +++ b/multinode/node_selector_total_difficulty.go @@ -0,0 +1,53 @@ +package client + +import ( + "math/big" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +type totalDifficultyNodeSelector[ + CHAIN_ID types.ID, + RPC any, +] []Node[CHAIN_ID, RPC] + +func NewTotalDifficultyNodeSelector[ + CHAIN_ID types.ID, + RPC any, +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { + return totalDifficultyNodeSelector[CHAIN_ID, RPC](nodes) +} + +func (s totalDifficultyNodeSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { + // NodeNoNewHeadsThreshold may not be enabled, in this case all nodes have td == nil + var highestTD *big.Int + var nodes []Node[CHAIN_ID, RPC] + var aliveNodes []Node[CHAIN_ID, RPC] + + for _, n := range s { + state, currentChainInfo := n.StateAndLatest() + if state != nodeStateAlive { + continue + } + + currentTD := currentChainInfo.TotalDifficulty + aliveNodes = append(aliveNodes, n) + if currentTD != nil && (highestTD == nil || currentTD.Cmp(highestTD) >= 0) { + if highestTD == nil || currentTD.Cmp(highestTD) > 0 { + highestTD = currentTD + nodes = nil + } + nodes = append(nodes, n) + } + } + + // If all nodes have td == nil pick one from the nodes that are alive + if len(nodes) == 0 { + return firstOrHighestPriority(aliveNodes) + } + return firstOrHighestPriority(nodes) +} + +func (s totalDifficultyNodeSelector[CHAIN_ID, RPC]) Name() string { + return NodeSelectionModeTotalDifficulty +} diff --git a/multinode/node_selector_total_difficulty_test.go b/multinode/node_selector_total_difficulty_test.go new file mode 100644 index 0000000..f1d9de4 --- /dev/null +++ b/multinode/node_selector_total_difficulty_test.go @@ -0,0 +1,178 @@ +package client + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +func TestTotalDifficultyNodeSelectorName(t *testing.T) { + selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModeTotalDifficulty, nil) + assert.Equal(t, selector.Name(), NodeSelectionModeTotalDifficulty) +} + +func TestTotalDifficultyNodeSelector(t *testing.T) { + t.Parallel() + + type nodeClient RPCClient[types.ID, Head] + var nodes []Node[types.ID, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1}) + } else if i == 1 { + // second node is alive + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(7)}) + } else { + // third node is alive and best + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2, TotalDifficulty: big.NewInt(8)}) + } + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Same(t, nodes[2], selector.Select()) + + t.Run("stick to the same node", func(t *testing.T) { + node := newMockNode[types.ID, nodeClient](t) + // fourth node is alive (same as 3rd) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2, TotalDifficulty: big.NewInt(8)}) + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("another best node", func(t *testing.T) { + node := newMockNode[types.ID, nodeClient](t) + // fifth node is alive (better than 3rd and 4th) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(11)}) + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Same(t, nodes[4], selector.Select()) + }) + + t.Run("nodes never update latest block number", func(t *testing.T) { + node1 := newMockNode[types.ID, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1, TotalDifficulty: nil}) + node1.On("Order").Maybe().Return(int32(1)) + node2 := newMockNode[types.ID, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1, TotalDifficulty: nil}) + node2.On("Order").Maybe().Return(int32(1)) + nodes := []Node[types.ID, nodeClient]{node1, node2} + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Same(t, node1, selector.Select()) + }) +} + +func TestTotalDifficultyNodeSelector_None(t *testing.T) { + t.Parallel() + + type nodeClient RPCClient[types.ID, Head] + var nodes []Node[types.ID, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1, TotalDifficulty: nil}) + } else { + // others are unreachable + node.On("StateAndLatest").Return(nodeStateUnreachable, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(7)}) + } + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Nil(t, selector.Select()) +} + +func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { + t.Parallel() + + type nodeClient RPCClient[types.ID, Head] + var nodes []Node[types.ID, nodeClient] + + t.Run("same td and order", func(t *testing.T) { + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, nodeClient](t) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(10)}) + node.On("Order").Return(int32(2)) + nodes = append(nodes, node) + } + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + // Should select the first node because all things are equal + assert.Same(t, nodes[0], selector.Select()) + }) + + t.Run("same td but different order", func(t *testing.T) { + node1 := newMockNode[types.ID, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(10)}) + node1.On("Order").Return(int32(3)) + + node2 := newMockNode[types.ID, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(10)}) + node2.On("Order").Return(int32(1)) + + node3 := newMockNode[types.ID, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(10)}) + node3.On("Order").Return(int32(2)) + + nodes := []Node[types.ID, nodeClient]{node1, node2, node3} + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + // Should select the second node as it has the highest priority + assert.Same(t, nodes[1], selector.Select()) + }) + + t.Run("different td but same order", func(t *testing.T) { + node1 := newMockNode[types.ID, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(10)}) + node1.On("Order").Maybe().Return(int32(3)) + + node2 := newMockNode[types.ID, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(11)}) + node2.On("Order").Maybe().Return(int32(3)) + + node3 := newMockNode[types.ID, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(12)}) + node3.On("Order").Return(int32(3)) + + nodes := []Node[types.ID, nodeClient]{node1, node2, node3} + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + // Should select the third node as it has the highest td + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("different head and different order", func(t *testing.T) { + node1 := newMockNode[types.ID, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(100)}) + node1.On("Order").Maybe().Return(int32(4)) + + node2 := newMockNode[types.ID, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(110)}) + node2.On("Order").Maybe().Return(int32(5)) + + node3 := newMockNode[types.ID, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(110)}) + node3.On("Order").Maybe().Return(int32(1)) + + node4 := newMockNode[types.ID, nodeClient](t) + node4.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(105)}) + node4.On("Order").Maybe().Return(int32(2)) + + nodes := []Node[types.ID, nodeClient]{node1, node2, node3, node4} + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + // Should select the third node as it has the highest td and will win the priority tie-breaker + assert.Same(t, nodes[2], selector.Select()) + }) +} diff --git a/multinode/node_test.go b/multinode/node_test.go new file mode 100644 index 0000000..703caf6 --- /dev/null +++ b/multinode/node_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "net/url" + "testing" + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink-framework/multinode/mocks" + "github.com/smartcontractkit/chainlink-framework/types" +) + +type testNodeConfig struct { + pollFailureThreshold uint32 + pollInterval time.Duration + selectionMode string + syncThreshold uint32 + nodeIsSyncingEnabled bool + enforceRepeatableRead bool + finalizedBlockPollInterval time.Duration + deathDeclarationDelay time.Duration + newHeadsPollInterval time.Duration +} + +func (n testNodeConfig) NewHeadsPollInterval() time.Duration { + return n.newHeadsPollInterval +} + +func (n testNodeConfig) PollFailureThreshold() uint32 { + return n.pollFailureThreshold +} + +func (n testNodeConfig) PollInterval() time.Duration { + return n.pollInterval +} + +func (n testNodeConfig) SelectionMode() string { + return n.selectionMode +} + +func (n testNodeConfig) SyncThreshold() uint32 { + return n.syncThreshold +} + +func (n testNodeConfig) NodeIsSyncingEnabled() bool { + return n.nodeIsSyncingEnabled +} + +func (n testNodeConfig) FinalizedBlockPollInterval() time.Duration { + return n.finalizedBlockPollInterval +} + +func (n testNodeConfig) EnforceRepeatableRead() bool { + return n.enforceRepeatableRead +} + +func (n testNodeConfig) DeathDeclarationDelay() time.Duration { + return n.deathDeclarationDelay +} + +type testNode struct { + *node[types.ID, Head, RPCClient[types.ID, Head]] +} + +type testNodeOpts struct { + config testNodeConfig + chainConfig mocks.ChainConfig + lggr logger.Logger + wsuri *url.URL + httpuri *url.URL + name string + id int + chainID types.ID + nodeOrder int32 + rpc *mockRPCClient[types.ID, Head] + chainFamily string +} + +func newTestNode(t *testing.T, opts testNodeOpts) testNode { + if opts.lggr == nil { + opts.lggr = logger.Test(t) + } + + if opts.name == "" { + opts.name = "tes node" + } + + if opts.chainFamily == "" { + opts.chainFamily = "test node chain family" + } + + if opts.chainID == nil { + opts.chainID = types.RandomID() + } + + if opts.id == 0 { + opts.id = 42 + } + + nodeI := NewNode[types.ID, Head, RPCClient[types.ID, Head]](opts.config, opts.chainConfig, opts.lggr, + opts.wsuri, opts.httpuri, opts.name, opts.id, opts.chainID, opts.nodeOrder, opts.rpc, opts.chainFamily) + + return testNode{ + nodeI.(*node[types.ID, Head, RPCClient[types.ID, Head]]), + } +} diff --git a/multinode/poller.go b/multinode/poller.go new file mode 100644 index 0000000..3e250e8 --- /dev/null +++ b/multinode/poller.go @@ -0,0 +1,95 @@ +package client + +import ( + "context" + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +// Poller is a component that polls a function at a given interval +// and delivers the result to a channel. It is used by multinode to poll +// for new heads and implements the Subscription interface. +type Poller[T any] struct { + services.Service + eng *services.Engine + + pollingInterval time.Duration + pollingFunc func(ctx context.Context) (T, error) + pollingTimeout time.Duration + channel chan<- T + errCh chan error +} + +// NewPoller creates a new Poller instance and returns a channel to receive the polled data +func NewPoller[ + T any, +](pollingInterval time.Duration, pollingFunc func(ctx context.Context) (T, error), pollingTimeout time.Duration, lggr logger.Logger) (Poller[T], <-chan T) { + channel := make(chan T) + p := Poller[T]{ + pollingInterval: pollingInterval, + pollingFunc: pollingFunc, + pollingTimeout: pollingTimeout, + channel: channel, + errCh: make(chan error), + } + p.Service, p.eng = services.Config{ + Name: "Poller", + Start: p.start, + Close: p.close, + }.NewServiceEngine(lggr) + return p, channel +} + +var _ types.Subscription = &Poller[any]{} + +func (p *Poller[T]) start(ctx context.Context) error { + p.eng.Go(p.pollingLoop) + return nil +} + +// Unsubscribe cancels the sending of events to the data channel +func (p *Poller[T]) Unsubscribe() { + _ = p.Close() +} + +func (p *Poller[T]) close() error { + close(p.errCh) + close(p.channel) + return nil +} + +func (p *Poller[T]) Err() <-chan error { + return p.errCh +} + +func (p *Poller[T]) pollingLoop(ctx context.Context) { + ticker := services.NewTicker(p.pollingInterval) // reduce possibility of sending two exactly the same request at once + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + // Set polling timeout + pollingCtx, cancelPolling := context.WithTimeout(ctx, p.pollingTimeout) + // Execute polling function + result, err := p.pollingFunc(pollingCtx) + cancelPolling() + if err != nil { + p.eng.Warnf("polling error: %v", err) + continue + } + // Send result to channel or block if channel is full + select { + case p.channel <- result: + case <-ctx.Done(): + return + } + } + } +} diff --git a/multinode/poller_test.go b/multinode/poller_test.go new file mode 100644 index 0000000..930b101 --- /dev/null +++ b/multinode/poller_test.go @@ -0,0 +1,194 @@ +package client + +import ( + "context" + "fmt" + "math/big" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" +) + +func Test_Poller(t *testing.T) { + lggr := logger.Test(t) + + t.Run("Test multiple start", func(t *testing.T) { + ctx := tests.Context(t) + pollFunc := func(ctx context.Context) (Head, error) { + return nil, nil + } + + poller, _ := NewPoller[Head](time.Millisecond, pollFunc, time.Second, lggr) + err := poller.Start(ctx) + require.NoError(t, err) + + err = poller.Start(ctx) + require.Error(t, err) + poller.Unsubscribe() + }) + + t.Run("Test polling for heads", func(t *testing.T) { + ctx := tests.Context(t) + // Mock polling function that returns a new value every time it's called + var pollNumber int + pollLock := sync.Mutex{} + pollFunc := func(ctx context.Context) (Head, error) { + pollLock.Lock() + defer pollLock.Unlock() + pollNumber++ + h := head{ + BlockNumber: int64(pollNumber), + BlockDifficulty: big.NewInt(int64(pollNumber)), + } + return h.ToMockHead(t), nil + } + + // Create poller and start to receive data + poller, channel := NewPoller[Head](time.Millisecond, pollFunc, time.Second, lggr) + require.NoError(t, poller.Start(ctx)) + defer poller.Unsubscribe() + + // Receive updates from the poller + pollCount := 0 + pollMax := 50 + for ; pollCount < pollMax; pollCount++ { + h := <-channel + assert.Equal(t, int64(pollCount+1), h.BlockNumber()) + } + }) + + t.Run("Test polling errors", func(t *testing.T) { + ctx := tests.Context(t) + // Mock polling function that returns an error + var pollNumber int + pollLock := sync.Mutex{} + pollFunc := func(ctx context.Context) (Head, error) { + pollLock.Lock() + defer pollLock.Unlock() + pollNumber++ + return nil, fmt.Errorf("polling error %d", pollNumber) + } + + olggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + + // Create poller and subscribe to receive data + poller, _ := NewPoller[Head](time.Millisecond, pollFunc, time.Second, olggr) + require.NoError(t, poller.Start(ctx)) + defer poller.Unsubscribe() + + // Ensure that all errors were logged as expected + logsSeen := func() bool { + for pollCount := 0; pollCount < 50; pollCount++ { + numLogs := observedLogs.FilterMessage(fmt.Sprintf("polling error: polling error %d", pollCount+1)).Len() + if numLogs != 1 { + return false + } + } + return true + } + require.Eventually(t, logsSeen, tests.WaitTimeout(t), 100*time.Millisecond) + }) + + t.Run("Test polling timeout", func(t *testing.T) { + ctx := tests.Context(t) + pollFunc := func(ctx context.Context) (Head, error) { + if <-ctx.Done(); true { + return nil, ctx.Err() + } + return nil, nil + } + + // Set instant timeout + pollingTimeout := time.Duration(0) + + olggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + + // Create poller and subscribe to receive data + poller, _ := NewPoller[Head](time.Millisecond, pollFunc, pollingTimeout, olggr) + require.NoError(t, poller.Start(ctx)) + defer poller.Unsubscribe() + + // Ensure that timeout errors were logged as expected + logsSeen := func() bool { + return observedLogs.FilterMessage("polling error: context deadline exceeded").Len() >= 1 + } + require.Eventually(t, logsSeen, tests.WaitTimeout(t), 100*time.Millisecond) + }) + + t.Run("Test unsubscribe during polling", func(t *testing.T) { + ctx := tests.Context(t) + wait := make(chan struct{}) + closeOnce := sync.OnceFunc(func() { close(wait) }) + pollFunc := func(ctx context.Context) (Head, error) { + closeOnce() + // Block in polling function until context is cancelled + if <-ctx.Done(); true { + return nil, ctx.Err() + } + return nil, nil + } + + // Set long timeout + pollingTimeout := time.Minute + + olggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + + // Create poller and subscribe to receive data + poller, _ := NewPoller[Head](time.Millisecond, pollFunc, pollingTimeout, olggr) + require.NoError(t, poller.Start(ctx)) + + // Unsubscribe while blocked in polling function + <-wait + poller.Unsubscribe() + + // Ensure error was logged + logsSeen := func() bool { + return observedLogs.FilterMessage("polling error: context canceled").Len() >= 1 + } + require.Eventually(t, logsSeen, tests.WaitTimeout(t), 100*time.Millisecond) + }) +} + +func Test_Poller_Unsubscribe(t *testing.T) { + lggr := logger.Test(t) + pollFunc := func(ctx context.Context) (Head, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + h := head{ + BlockNumber: 0, + BlockDifficulty: big.NewInt(0), + } + return h.ToMockHead(t), nil + } + } + + t.Run("Test multiple unsubscribe", func(t *testing.T) { + ctx := tests.Context(t) + poller, channel := NewPoller[Head](time.Millisecond, pollFunc, time.Second, lggr) + err := poller.Start(ctx) + require.NoError(t, err) + + <-channel + poller.Unsubscribe() + poller.Unsubscribe() + }) + + t.Run("Read channel after unsubscribe", func(t *testing.T) { + ctx := tests.Context(t) + poller, channel := NewPoller[Head](time.Millisecond, pollFunc, time.Second, lggr) + err := poller.Start(ctx) + require.NoError(t, err) + + poller.Unsubscribe() + require.Equal(t, <-channel, nil) + }) +} diff --git a/multinode/send_only_node.go b/multinode/send_only_node.go new file mode 100644 index 0000000..e95a326 --- /dev/null +++ b/multinode/send_only_node.go @@ -0,0 +1,183 @@ +package client + +import ( + "context" + "fmt" + "net/url" + "sync" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +type sendOnlyClient[ + CHAIN_ID types.ID, +] interface { + Close() + ChainID(context.Context) (CHAIN_ID, error) + Dial(ctx context.Context) error +} + +// SendOnlyNode represents one node used as a sendonly +type SendOnlyNode[ + CHAIN_ID types.ID, + RPC any, +] interface { + // Start may attempt to connect to the node, but should only return error for misconfiguration - never for temporary errors. + Start(context.Context) error + Close() error + + ConfiguredChainID() CHAIN_ID + RPC() RPC + + String() string + // State returns nodeState + State() nodeState + // Name is a unique identifier for this node. + Name() string +} + +// It only supports sending transactions +// It must use an http(s) url +type sendOnlyNode[ + CHAIN_ID types.ID, + RPC sendOnlyClient[CHAIN_ID], +] struct { + services.StateMachine + + stateMu sync.RWMutex // protects state* fields + state nodeState + + rpc RPC + uri url.URL + log logger.Logger + name string + chainID CHAIN_ID + chStop services.StopChan + wg sync.WaitGroup +} + +// NewSendOnlyNode returns a new sendonly node +func NewSendOnlyNode[ + CHAIN_ID types.ID, + RPC sendOnlyClient[CHAIN_ID], +]( + lggr logger.Logger, + httpuri url.URL, + name string, + chainID CHAIN_ID, + rpc RPC, +) SendOnlyNode[CHAIN_ID, RPC] { + s := new(sendOnlyNode[CHAIN_ID, RPC]) + s.name = name + s.log = logger.Named(logger.Named(lggr, "SendOnlyNode"), name) + s.log = logger.With(s.log, + "nodeTier", "sendonly", + ) + s.rpc = rpc + s.uri = httpuri + s.chainID = chainID + s.chStop = make(chan struct{}) + return s +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) Start(ctx context.Context) error { + return s.StartOnce(s.name, func() error { + s.start(ctx) + return nil + }) +} + +// Start setups up and verifies the sendonly node +// Should only be called once in a node's lifecycle +func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) { + if s.State() != nodeStateUndialed { + panic(fmt.Sprintf("cannot dial node with state %v", s.state)) + } + + err := s.rpc.Dial(startCtx) + if err != nil { + promPoolRPCNodeTransitionsToUnusable.WithLabelValues(s.chainID.String(), s.name).Inc() + s.log.Errorw("Dial failed: SendOnly Node is unusable", "err", err) + s.setState(nodeStateUnusable) + return + } + s.setState(nodeStateDialed) + + if s.chainID.String() == "0" { + // Skip verification if chainID is zero + s.log.Warn("sendonly rpc ChainID verification skipped") + } else { + chainID, err := s.rpc.ChainID(startCtx) + if err != nil || chainID.String() != s.chainID.String() { + promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() + if err != nil { + promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() + s.log.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err) + s.setState(nodeStateUnreachable) + } else { + promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc() + s.log.Errorf( + "sendonly rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s", + chainID.String(), + s.chainID.String(), + s.name, + ) + s.setState(nodeStateInvalidChainID) + } + // Since it has failed, spin up the verifyLoop that will keep + // retrying until success + s.wg.Add(1) + go s.verifyLoop() + return + } + } + + promPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc() + s.setState(nodeStateAlive) + s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state) +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) Close() error { + return s.StopOnce(s.name, func() error { + s.rpc.Close() + close(s.chStop) + s.wg.Wait() + s.setState(nodeStateClosed) + return nil + }) +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID { + return s.chainID +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) RPC() RPC { + return s.rpc +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) String() string { + return fmt.Sprintf("(%s)%s:%s", Secondary.String(), s.name, s.uri.Redacted()) +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) setState(state nodeState) (changed bool) { + s.stateMu.Lock() + defer s.stateMu.Unlock() + if s.state == state { + return false + } + s.state = state + return true +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) State() nodeState { + s.stateMu.RLock() + defer s.stateMu.RUnlock() + return s.state +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) Name() string { + return s.name +} diff --git a/multinode/send_only_node_lifecycle.go b/multinode/send_only_node_lifecycle.go new file mode 100644 index 0000000..eded9d2 --- /dev/null +++ b/multinode/send_only_node_lifecycle.go @@ -0,0 +1,67 @@ +package client + +import ( + "fmt" + "time" + + "github.com/smartcontractkit/chainlink-framework/utils" +) + +// verifyLoop may only be triggered once, on Start, if initial chain ID check +// fails. +// +// It will continue checking until success and then exit permanently. +func (s *sendOnlyNode[CHAIN_ID, RPC]) verifyLoop() { + defer s.wg.Done() + ctx, cancel := s.chStop.NewCtx() + defer cancel() + + backoff := utils.NewRedialBackoff() + for { + select { + case <-ctx.Done(): + return + case <-time.After(backoff.Duration()): + } + chainID, err := s.rpc.ChainID(ctx) + if err != nil { + ok := s.IfStarted(func() { + if changed := s.setState(nodeStateUnreachable); changed { + promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() + } + }) + if !ok { + return + } + s.log.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err) + continue + } else if chainID.String() != s.chainID.String() { + ok := s.IfStarted(func() { + if changed := s.setState(nodeStateInvalidChainID); changed { + promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc() + } + }) + if !ok { + return + } + s.log.Errorf( + "sendonly rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s", + chainID.String(), + s.chainID.String(), + s.name, + ) + + continue + } + ok := s.IfStarted(func() { + if changed := s.setState(nodeStateAlive); changed { + promPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc() + } + }) + if !ok { + return + } + s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state) + return + } +} diff --git a/multinode/send_only_node_test.go b/multinode/send_only_node_test.go new file mode 100644 index 0000000..50ad0af --- /dev/null +++ b/multinode/send_only_node_test.go @@ -0,0 +1,139 @@ +package client + +import ( + "errors" + "fmt" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +func TestNewSendOnlyNode(t *testing.T) { + t.Parallel() + + urlFormat := "http://user:%s@testurl.com" + password := "pass" + u, err := url.Parse(fmt.Sprintf(urlFormat, password)) + require.NoError(t, err) + redacted := fmt.Sprintf(urlFormat, "xxxxx") + lggr := logger.Test(t) + name := "TestNewSendOnlyNode" + chainID := types.RandomID() + client := newMockSendOnlyClient[types.ID](t) + + node := NewSendOnlyNode(lggr, *u, name, chainID, client) + assert.NotNil(t, node) + + // Must contain name & url with redacted password + assert.Contains(t, node.String(), fmt.Sprintf("%s:%s", name, redacted)) + assert.Equal(t, node.ConfiguredChainID(), chainID) +} + +func TestStartSendOnlyNode(t *testing.T) { + t.Parallel() + t.Run("becomes unusable if initial dial fails", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + expectedError := errors.New("some http error") + client.On("Dial", mock.Anything).Return(expectedError).Once() + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.RandomID(), client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + require.NoError(t, err) + + assert.Equal(t, nodeStateUnusable, s.State()) + tests.RequireLogMessage(t, observedLogs, "Dial failed: SendOnly Node is unusable") + }) + t.Run("Default ChainID(0) produces warn and skips checks", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + client.On("Dial", mock.Anything).Return(nil).Once() + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.NewIDFromInt(0), client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + require.NoError(t, err) + + assert.Equal(t, nodeStateAlive, s.State()) + tests.RequireLogMessage(t, observedLogs, "sendonly rpc ChainID verification skipped") + }) + t.Run("Can recover from chainID verification failure", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + client.On("Dial", mock.Anything).Return(nil) + expectedError := errors.New("failed to get chain ID") + chainID := types.RandomID() + const failuresCount = 2 + client.On("ChainID", mock.Anything).Return(types.RandomID(), expectedError).Times(failuresCount) + client.On("ChainID", mock.Anything).Return(chainID, nil) + + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), chainID, client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + require.NoError(t, err) + + assert.Equal(t, nodeStateUnreachable, s.State()) + tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Verify failed: %v", expectedError), failuresCount) + tests.AssertEventually(t, func() bool { + return s.State() == nodeStateAlive + }) + }) + t.Run("Can recover from chainID mismatch", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + client.On("Dial", mock.Anything).Return(nil).Once() + configuredChainID := types.NewIDFromInt(11) + rpcChainID := types.NewIDFromInt(20) + const failuresCount = 2 + client.On("ChainID", mock.Anything).Return(rpcChainID, nil).Times(failuresCount) + client.On("ChainID", mock.Anything).Return(configuredChainID, nil) + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), configuredChainID, client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + require.NoError(t, err) + + assert.Equal(t, nodeStateInvalidChainID, s.State()) + tests.AssertLogCountEventually(t, observedLogs, "sendonly rpc ChainID doesn't match local chain ID", failuresCount) + tests.AssertEventually(t, func() bool { + return s.State() == nodeStateAlive + }) + }) + t.Run("Start with Random ChainID", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + client.On("Dial", mock.Anything).Return(nil).Once() + configuredChainID := types.RandomID() + client.On("ChainID", mock.Anything).Return(configuredChainID, nil) + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), configuredChainID, client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertEventually(t, func() bool { + return s.State() == nodeStateAlive + }) + assert.Equal(t, 0, observedLogs.Len()) // No warnings expected + }) +} diff --git a/multinode/transaction_sender.go b/multinode/transaction_sender.go new file mode 100644 index 0000000..bd59351 --- /dev/null +++ b/multinode/transaction_sender.go @@ -0,0 +1,284 @@ +package client + +import ( + "context" + "errors" + "math" + "slices" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +var ( + // PromMultiNodeInvariantViolations reports violation of our assumptions + PromMultiNodeInvariantViolations = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "multi_node_invariant_violations", + Help: "The number of invariant violations", + }, []string{"network", "chainId", "invariant"}) +) + +type SendTxResult interface { + Code() SendTxReturnCode + Error() error +} + +const sendTxQuorum = 0.7 + +// SendTxRPCClient - defines interface of an RPC used by TransactionSender to broadcast transaction +type SendTxRPCClient[TX any, RESULT SendTxResult] interface { + // SendTransaction errors returned should include name or other unique identifier of the RPC + SendTransaction(ctx context.Context, tx TX) RESULT +} + +func NewTransactionSender[TX any, RESULT SendTxResult, CHAIN_ID types.ID, RPC SendTxRPCClient[TX, RESULT]]( + lggr logger.Logger, + chainID CHAIN_ID, + chainFamily string, + multiNode *MultiNode[CHAIN_ID, RPC], + newResult func(err error) RESULT, + sendTxSoftTimeout time.Duration, +) *TransactionSender[TX, RESULT, CHAIN_ID, RPC] { + if sendTxSoftTimeout == 0 { + sendTxSoftTimeout = QueryTimeout / 2 + } + return &TransactionSender[TX, RESULT, CHAIN_ID, RPC]{ + chainID: chainID, + chainFamily: chainFamily, + lggr: logger.Sugared(lggr).Named("TransactionSender").With("chainID", chainID.String()), + multiNode: multiNode, + newResult: newResult, + sendTxSoftTimeout: sendTxSoftTimeout, + chStop: make(services.StopChan), + } +} + +type TransactionSender[TX any, RESULT SendTxResult, CHAIN_ID types.ID, RPC SendTxRPCClient[TX, RESULT]] struct { + services.StateMachine + chainID CHAIN_ID + chainFamily string + lggr logger.SugaredLogger + multiNode *MultiNode[CHAIN_ID, RPC] + newResult func(err error) RESULT + sendTxSoftTimeout time.Duration // defines max waiting time from first response til responses evaluation + + wg sync.WaitGroup // waits for all reporting goroutines to finish + chStop services.StopChan +} + +// SendTransaction - broadcasts transaction to all the send-only and primary nodes in MultiNode. +// A returned nil or error does not guarantee that the transaction will or won't be included. Additional checks must be +// performed to determine the final state. +// +// Send-only nodes' results are ignored as they tend to return false-positive responses. Broadcast to them is necessary +// to speed up the propagation of TX in the network. +// +// Handling of primary nodes' results consists of collection and aggregation. +// In the collection step, we gather as many results as possible while minimizing waiting time. This operation succeeds +// on one of the following conditions: +// * Received at least one success +// * Received at least one result and `sendTxSoftTimeout` expired +// * Received results from the sufficient number of nodes defined by sendTxQuorum. +// The aggregation is based on the following conditions: +// * If there is at least one success - returns success +// * If there is at least one terminal error - returns terminal error +// * If there is both success and terminal error - returns success and reports invariant violation +// * Otherwise, returns any (effectively random) of the errors. +func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) SendTransaction(ctx context.Context, tx TX) RESULT { + var result RESULT + if !txSender.IfStarted(func() { + txResults := make(chan RESULT) + txResultsToReport := make(chan RESULT) + primaryNodeWg := sync.WaitGroup{} + + healthyNodesNum := 0 + err := txSender.multiNode.DoAll(ctx, func(ctx context.Context, rpc RPC, isSendOnly bool) { + if isSendOnly { + txSender.wg.Add(1) + go func(ctx context.Context) { + ctx, cancel := txSender.chStop.Ctx(context.WithoutCancel(ctx)) + defer cancel() + defer txSender.wg.Done() + // Send-only nodes' results are ignored as they tend to return false-positive responses. + // Broadcast to them is necessary to speed up the propagation of TX in the network. + _ = txSender.broadcastTxAsync(ctx, rpc, tx) + }(ctx) + return + } + + // Primary Nodes + healthyNodesNum++ + primaryNodeWg.Add(1) + go func(ctx context.Context) { + ctx, cancel := txSender.chStop.Ctx(context.WithoutCancel(ctx)) + defer cancel() + defer primaryNodeWg.Done() + r := txSender.broadcastTxAsync(ctx, rpc, tx) + select { + case <-ctx.Done(): + txSender.lggr.Debugw("Failed to send tx results", "err", ctx.Err()) + return + case txResults <- r: + } + + select { + case <-ctx.Done(): + txSender.lggr.Debugw("Failed to send tx results to report", "err", ctx.Err()) + return + case txResultsToReport <- r: + } + }(ctx) + }) + + // This needs to be done in parallel so the reporting knows when it's done (when the channel is closed) + txSender.wg.Add(1) + go func() { + defer txSender.wg.Done() + primaryNodeWg.Wait() + close(txResultsToReport) + close(txResults) + }() + + if err != nil { + result = txSender.newResult(err) + return + } + + txSender.wg.Add(1) + go txSender.reportSendTxAnomalies(ctx, tx, txResultsToReport) + + result = txSender.collectTxResults(ctx, tx, healthyNodesNum, txResults) + }) { + result = txSender.newResult(errors.New("TransactionSender not started")) + } + + return result +} + +func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) broadcastTxAsync(ctx context.Context, rpc RPC, tx TX) RESULT { + result := rpc.SendTransaction(ctx, tx) + txSender.lggr.Debugw("Node sent transaction", "tx", tx, "err", result.Error()) + if !slices.Contains(sendTxSuccessfulCodes, result.Code()) && ctx.Err() == nil { + txSender.lggr.Warnw("RPC returned error", "tx", tx, "err", result.Error()) + } + return result +} + +func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) reportSendTxAnomalies(ctx context.Context, tx TX, txResults <-chan RESULT) { + defer txSender.wg.Done() + resultsByCode := sendTxResults[RESULT]{} + // txResults eventually will be closed + for txResult := range txResults { + resultsByCode[txResult.Code()] = append(resultsByCode[txResult.Code()], txResult) + } + + _, criticalErr := aggregateTxResults[RESULT](resultsByCode) + if criticalErr != nil && ctx.Err() == nil { + txSender.lggr.Criticalw("observed invariant violation on SendTransaction", "tx", tx, "resultsByCode", resultsByCode, "err", criticalErr) + PromMultiNodeInvariantViolations.WithLabelValues(txSender.chainFamily, txSender.chainID.String(), criticalErr.Error()).Inc() + } +} + +type sendTxResults[RESULT any] map[SendTxReturnCode][]RESULT + +func aggregateTxResults[RESULT any](resultsByCode sendTxResults[RESULT]) (result RESULT, criticalErr error) { + severeErrors, hasSevereErrors := findFirstIn(resultsByCode, sendTxSevereErrors) + successResults, hasSuccess := findFirstIn(resultsByCode, sendTxSuccessfulCodes) + if hasSuccess { + // We assume that primary node would never report false positive txResult for a transaction. + // Thus, if such case occurs it's probably due to misconfiguration or a bug and requires manual intervention. + if hasSevereErrors { + const errMsg = "found contradictions in nodes replies on SendTransaction: got success and severe error" + // return success, since at least 1 node has accepted our broadcasted Tx, and thus it can now be included onchain + return successResults[0], errors.New(errMsg) + } + + // other errors are temporary - we are safe to return success + return successResults[0], nil + } + + if hasSevereErrors { + return severeErrors[0], nil + } + + // return temporary error + for _, r := range resultsByCode { + return r[0], nil + } + + criticalErr = errors.New("expected at least one response on SendTransaction") + return result, criticalErr +} + +func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) collectTxResults(ctx context.Context, tx TX, healthyNodesNum int, txResults <-chan RESULT) RESULT { + if healthyNodesNum == 0 { + return txSender.newResult(ErroringNodeError) + } + requiredResults := int(math.Ceil(float64(healthyNodesNum) * sendTxQuorum)) + errorsByCode := sendTxResults[RESULT]{} + var softTimeoutChan <-chan time.Time + var resultsCount int +loop: + for { + select { + case <-ctx.Done(): + txSender.lggr.Debugw("Failed to collect of the results before context was done", "tx", tx, "errorsByCode", errorsByCode) + return txSender.newResult(ctx.Err()) + case r := <-txResults: + errorsByCode[r.Code()] = append(errorsByCode[r.Code()], r) + resultsCount++ + if slices.Contains(sendTxSuccessfulCodes, r.Code()) || resultsCount >= requiredResults { + break loop + } + case <-softTimeoutChan: + txSender.lggr.Debugw("Send Tx soft timeout expired - returning responses we've collected so far", "tx", tx, "resultsCount", resultsCount, "requiredResults", requiredResults) + break loop + } + + if softTimeoutChan == nil { + tm := time.NewTimer(txSender.sendTxSoftTimeout) + softTimeoutChan = tm.C + // we are fine with stopping timer at the end of function + //nolint + defer tm.Stop() + } + } + + // ignore critical error as it's reported in reportSendTxAnomalies + result, _ := aggregateTxResults(errorsByCode) + txSender.lggr.Debugw("Collected results", "errorsByCode", errorsByCode, "result", result) + return result +} + +func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) Start(ctx context.Context) error { + return txSender.StartOnce("TransactionSender", func() error { + return nil + }) +} + +func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) Close() error { + return txSender.StopOnce("TransactionSender", func() error { + txSender.lggr.Debug("Closing TransactionSender") + close(txSender.chStop) + txSender.wg.Wait() + return nil + }) +} + +// findFirstIn - returns the first existing key and value for the slice of keys +func findFirstIn[K comparable, V any](set map[K]V, keys []K) (V, bool) { + for _, k := range keys { + if v, ok := set[k]; ok { + return v, true + } + } + var zeroV V + return zeroV, false +} diff --git a/multinode/transaction_sender_test.go b/multinode/transaction_sender_test.go new file mode 100644 index 0000000..f79f3df --- /dev/null +++ b/multinode/transaction_sender_test.go @@ -0,0 +1,398 @@ +package client + +import ( + "context" + "fmt" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + "github.com/smartcontractkit/chainlink-framework/types" +) + +type TestSendTxRPCClient SendTxRPCClient[any, *sendTxResult] + +type sendTxMultiNode struct { + *MultiNode[types.ID, TestSendTxRPCClient] +} + +type sendTxRPC struct { + sendTxRun func(args mock.Arguments) + sendTxErr error +} + +type sendTxResult struct { + err error + code SendTxReturnCode +} + +var _ SendTxResult = (*sendTxResult)(nil) + +func NewSendTxResult(err error) *sendTxResult { + result := &sendTxResult{ + err: err, + } + return result +} + +func (r *sendTxResult) Error() error { + return r.err +} + +func (r *sendTxResult) Code() SendTxReturnCode { + return r.code +} + +var _ TestSendTxRPCClient = (*sendTxRPC)(nil) + +func newSendTxRPC(sendTxErr error, sendTxRun func(args mock.Arguments)) *sendTxRPC { + return &sendTxRPC{sendTxErr: sendTxErr, sendTxRun: sendTxRun} +} + +func (rpc *sendTxRPC) SendTransaction(ctx context.Context, _ any) *sendTxResult { + if rpc.sendTxRun != nil { + rpc.sendTxRun(mock.Arguments{ctx}) + } + return &sendTxResult{err: rpc.sendTxErr, code: classifySendTxError(nil, rpc.sendTxErr)} +} + +// newTestTransactionSender returns a sendTxMultiNode and TransactionSender. +// Only the TransactionSender is run via Start/Close. +func newTestTransactionSender(t *testing.T, chainID types.ID, lggr logger.Logger, + nodes []Node[types.ID, TestSendTxRPCClient], + sendOnlyNodes []SendOnlyNode[types.ID, TestSendTxRPCClient], +) (*sendTxMultiNode, *TransactionSender[any, *sendTxResult, types.ID, TestSendTxRPCClient]) { + mn := sendTxMultiNode{NewMultiNode[types.ID, TestSendTxRPCClient]( + lggr, NodeSelectionModeRoundRobin, 0, nodes, sendOnlyNodes, chainID, "chainFamily", 0)} + + txSender := NewTransactionSender[any, *sendTxResult, types.ID, TestSendTxRPCClient](lggr, chainID, mn.chainFamily, mn.MultiNode, NewSendTxResult, tests.TestInterval) + servicetest.Run(t, txSender) + return &mn, txSender +} + +func classifySendTxError(_ any, err error) SendTxReturnCode { + if err != nil { + return Fatal + } + return Successful +} + +func TestTransactionSender_SendTransaction(t *testing.T) { + t.Parallel() + + newNodeWithState := func(t *testing.T, state nodeState, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[types.ID, TestSendTxRPCClient] { + rpc := newSendTxRPC(txErr, sendTxRun) + node := newMockNode[types.ID, TestSendTxRPCClient](t) + node.On("String").Return("node name").Maybe() + node.On("RPC").Return(rpc).Maybe() + node.On("State").Return(state).Maybe() + node.On("Start", mock.Anything).Return(nil).Maybe() + node.On("Close").Return(nil).Maybe() + node.On("SetPoolChainInfoProvider", mock.Anything).Return(nil).Maybe() + return node + } + + newNode := func(t *testing.T, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[types.ID, TestSendTxRPCClient] { + return newNodeWithState(t, nodeStateAlive, txErr, sendTxRun) + } + + t.Run("Fails if there is no nodes available", func(t *testing.T) { + lggr := logger.Test(t) + _, txSender := newTestTransactionSender(t, types.RandomID(), lggr, nil, nil) + result := txSender.SendTransaction(tests.Context(t), nil) + assert.EqualError(t, result.Error(), ErroringNodeError.Error()) + }) + + t.Run("Transaction failure happy path", func(t *testing.T) { + expectedError := errors.New("transaction failed") + mainNode := newNode(t, expectedError, nil) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + + _, txSender := newTestTransactionSender(t, types.RandomID(), lggr, + []Node[types.ID, TestSendTxRPCClient]{mainNode}, + []SendOnlyNode[types.ID, TestSendTxRPCClient]{newNode(t, errors.New("unexpected error"), nil)}) + + result := txSender.SendTransaction(tests.Context(t), nil) + require.ErrorIs(t, result.Error(), expectedError) + require.Equal(t, Fatal, result.Code()) + tests.AssertLogCountEventually(t, observedLogs, "Node sent transaction", 2) + tests.AssertLogCountEventually(t, observedLogs, "RPC returned error", 2) + }) + + t.Run("Transaction success happy path", func(t *testing.T) { + mainNode := newNode(t, nil, nil) + + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + _, txSender := newTestTransactionSender(t, types.RandomID(), lggr, + []Node[types.ID, TestSendTxRPCClient]{mainNode}, + []SendOnlyNode[types.ID, TestSendTxRPCClient]{newNode(t, errors.New("unexpected error"), nil)}) + + result := txSender.SendTransaction(tests.Context(t), nil) + require.NoError(t, result.Error()) + require.Equal(t, Successful, result.Code()) + tests.AssertLogCountEventually(t, observedLogs, "Node sent transaction", 2) + tests.AssertLogCountEventually(t, observedLogs, "RPC returned error", 1) + }) + + t.Run("Context expired before collecting sufficient results", func(t *testing.T) { + testContext, testCancel := context.WithCancel(tests.Context(t)) + defer testCancel() + + mainNode := newNode(t, nil, func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + + lggr := logger.Test(t) + + _, txSender := newTestTransactionSender(t, types.RandomID(), lggr, + []Node[types.ID, TestSendTxRPCClient]{mainNode}, nil) + + requestContext, cancel := context.WithCancel(tests.Context(t)) + cancel() + result := txSender.SendTransaction(requestContext, nil) + require.EqualError(t, result.Error(), "context canceled") + }) + + t.Run("Soft timeout stops results collection", func(t *testing.T) { + chainID := types.RandomID() + expectedError := errors.New("transaction failed") + fastNode := newNode(t, expectedError, nil) + + // hold reply from the node till end of the test + testContext, testCancel := context.WithCancel(tests.Context(t)) + defer testCancel() + slowNode := newNode(t, errors.New("transaction failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + + lggr := logger.Test(t) + + _, txSender := newTestTransactionSender(t, chainID, lggr, []Node[types.ID, TestSendTxRPCClient]{fastNode, slowNode}, nil) + result := txSender.SendTransaction(tests.Context(t), nil) + require.EqualError(t, result.Error(), expectedError.Error()) + }) + t.Run("Returns success without waiting for the rest of the nodes", func(t *testing.T) { + chainID := types.RandomID() + fastNode := newNode(t, nil, nil) + // hold reply from the node till end of the test + testContext, testCancel := context.WithCancel(tests.Context(t)) + defer testCancel() + slowNode := newNode(t, errors.New("transaction failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + slowSendOnly := newNode(t, errors.New("send only failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + lggr, _ := logger.TestObserved(t, zap.WarnLevel) + _, txSender := newTestTransactionSender(t, chainID, lggr, + []Node[types.ID, TestSendTxRPCClient]{fastNode, slowNode}, + []SendOnlyNode[types.ID, TestSendTxRPCClient]{slowSendOnly}) + + result := txSender.SendTransaction(tests.Context(t), nil) + require.NoError(t, result.Error()) + require.Equal(t, Successful, result.Code()) + }) + t.Run("Fails when multinode is closed", func(t *testing.T) { + chainID := types.RandomID() + fastNode := newNode(t, nil, nil) + fastNode.On("ConfiguredChainID").Return(chainID).Maybe() + // hold reply from the node till end of the test + testContext, testCancel := context.WithCancel(tests.Context(t)) + defer testCancel() + slowNode := newNode(t, errors.New("transaction failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + slowNode.On("ConfiguredChainID").Return(chainID).Maybe() + slowSendOnly := newNode(t, errors.New("send only failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + slowSendOnly.On("ConfiguredChainID").Return(chainID).Maybe() + + lggr, _ := logger.TestObserved(t, zap.DebugLevel) + + mn, txSender := newTestTransactionSender(t, chainID, lggr, + []Node[types.ID, TestSendTxRPCClient]{fastNode, slowNode}, + []SendOnlyNode[types.ID, TestSendTxRPCClient]{slowSendOnly}) + + require.NoError(t, mn.Start(tests.Context(t))) + require.NoError(t, mn.Close()) + result := txSender.SendTransaction(tests.Context(t), nil) + require.EqualError(t, result.Error(), "service is stopped") + }) + t.Run("Fails when closed", func(t *testing.T) { + chainID := types.RandomID() + fastNode := newNode(t, nil, nil) + // hold reply from the node till end of the test + testContext, testCancel := context.WithCancel(tests.Context(t)) + defer testCancel() + slowNode := newNode(t, errors.New("transaction failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + slowSendOnly := newNode(t, errors.New("send only failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + + var txSender *TransactionSender[any, *sendTxResult, types.ID, TestSendTxRPCClient] + + t.Cleanup(func() { // after txSender.Close() + result := txSender.SendTransaction(tests.Context(t), nil) + assert.EqualError(t, result.err, "TransactionSender not started") + }) + + _, txSender = newTestTransactionSender(t, chainID, logger.Test(t), + []Node[types.ID, TestSendTxRPCClient]{fastNode, slowNode}, + []SendOnlyNode[types.ID, TestSendTxRPCClient]{slowSendOnly}) + + }) + t.Run("Returns error if there is no healthy primary nodes", func(t *testing.T) { + chainID := types.RandomID() + primary := newNodeWithState(t, nodeStateUnreachable, nil, nil) + sendOnly := newNodeWithState(t, nodeStateUnreachable, nil, nil) + + lggr := logger.Test(t) + + _, txSender := newTestTransactionSender(t, chainID, lggr, + []Node[types.ID, TestSendTxRPCClient]{primary}, + []SendOnlyNode[types.ID, TestSendTxRPCClient]{sendOnly}) + + result := txSender.SendTransaction(tests.Context(t), nil) + assert.EqualError(t, result.Error(), ErroringNodeError.Error()) + }) + + t.Run("Transaction success even if one of the nodes is unhealthy", func(t *testing.T) { + chainID := types.RandomID() + mainNode := newNode(t, nil, nil) + unexpectedCall := func(args mock.Arguments) { + panic("SendTx must not be called for unhealthy node") + } + unhealthyNode := newNodeWithState(t, nodeStateUnreachable, nil, unexpectedCall) + unhealthySendOnlyNode := newNodeWithState(t, nodeStateUnreachable, nil, unexpectedCall) + + lggr := logger.Test(t) + + _, txSender := newTestTransactionSender(t, chainID, lggr, + []Node[types.ID, TestSendTxRPCClient]{mainNode, unhealthyNode}, + []SendOnlyNode[types.ID, TestSendTxRPCClient]{unhealthySendOnlyNode}) + + result := txSender.SendTransaction(tests.Context(t), nil) + require.NoError(t, result.Error()) + require.Equal(t, Successful, result.Code()) + }) +} + +func TestTransactionSender_SendTransaction_aggregateTxResults(t *testing.T) { + t.Parallel() + // ensure failure on new SendTxReturnCode + codesToCover := map[SendTxReturnCode]struct{}{} + for code := Successful; code < sendTxReturnCodeLen; code++ { + codesToCover[code] = struct{}{} + } + + testCases := []struct { + Name string + ExpectedTxResult string + ExpectedCriticalErr string + ResultsByCode sendTxResults[*sendTxResult] + }{ + { + Name: "Returns success and logs critical error on success and Fatal", + ExpectedTxResult: "success", + ExpectedCriticalErr: "found contradictions in nodes replies on SendTransaction: got success and severe error", + ResultsByCode: sendTxResults[*sendTxResult]{ + Successful: {NewSendTxResult(errors.New("success"))}, + Fatal: {NewSendTxResult(errors.New("fatal"))}, + }, + }, + { + Name: "Returns TransactionAlreadyKnown and logs critical error on TransactionAlreadyKnown and Fatal", + ExpectedTxResult: "tx_already_known", + ExpectedCriticalErr: "found contradictions in nodes replies on SendTransaction: got success and severe error", + ResultsByCode: sendTxResults[*sendTxResult]{ + TransactionAlreadyKnown: {NewSendTxResult(errors.New("tx_already_known"))}, + Unsupported: {NewSendTxResult(errors.New("unsupported"))}, + }, + }, + { + Name: "Prefers sever error to temporary", + ExpectedTxResult: "underpriced", + ExpectedCriticalErr: "", + ResultsByCode: sendTxResults[*sendTxResult]{ + Retryable: {NewSendTxResult(errors.New("retryable"))}, + Underpriced: {NewSendTxResult(errors.New("underpriced"))}, + }, + }, + { + Name: "Returns temporary error", + ExpectedTxResult: "retryable", + ExpectedCriticalErr: "", + ResultsByCode: sendTxResults[*sendTxResult]{ + Retryable: {NewSendTxResult(errors.New("retryable"))}, + }, + }, + { + Name: "Insufficient funds is treated as error", + ExpectedTxResult: "insufficientFunds", + ExpectedCriticalErr: "", + ResultsByCode: sendTxResults[*sendTxResult]{ + InsufficientFunds: {NewSendTxResult(errors.New("insufficientFunds"))}, + }, + }, + { + Name: "Logs critical error on empty ResultsByCode", + ExpectedCriticalErr: "expected at least one response on SendTransaction", + ResultsByCode: sendTxResults[*sendTxResult]{}, + }, + { + Name: "Zk terminally stuck", + ExpectedTxResult: "not enough keccak counters to continue the execution", + ExpectedCriticalErr: "", + ResultsByCode: sendTxResults[*sendTxResult]{ + TerminallyStuck: {NewSendTxResult(errors.New("not enough keccak counters to continue the execution"))}, + }, + }, + } + + for _, testCase := range testCases { + for code := range testCase.ResultsByCode { + delete(codesToCover, code) + } + + t.Run(testCase.Name, func(t *testing.T) { + txResult, err := aggregateTxResults(testCase.ResultsByCode) + if testCase.ExpectedTxResult != "" { + require.EqualError(t, txResult.Error(), testCase.ExpectedTxResult) + } + + logger.Sugared(logger.Test(t)).Info("Map: " + fmt.Sprint(testCase.ResultsByCode)) + logger.Sugared(logger.Test(t)).Criticalw("observed invariant violation on SendTransaction", "resultsByCode", testCase.ResultsByCode, "err", err) + + if testCase.ExpectedCriticalErr == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, testCase.ExpectedCriticalErr) + } + }) + } + + // explicitly signal that following codes are properly handled in aggregateTxResults, + // but dedicated test cases won't be beneficial + for _, codeToIgnore := range []SendTxReturnCode{Unknown, ExceedsMaxFee, FeeOutOfValidRange} { + delete(codesToCover, codeToIgnore) + } + assert.Empty(t, codesToCover, "all of the SendTxReturnCode must be covered by this test") +} diff --git a/multinode/types.go b/multinode/types.go new file mode 100644 index 0000000..bfe73fa --- /dev/null +++ b/multinode/types.go @@ -0,0 +1,83 @@ +package client + +import ( + "context" + "math/big" + + "github.com/smartcontractkit/chainlink-framework/types" +) + +// RPCClient includes all the necessary generalized RPC methods used by Node to perform health checks +type RPCClient[ + CHAIN_ID types.ID, + HEAD Head, +] interface { + // ChainID - fetches ChainID from the RPC to verify that it matches config + ChainID(ctx context.Context) (CHAIN_ID, error) + // Dial - prepares the RPC for usage. Can be called on fresh or closed RPC + Dial(ctx context.Context) error + // SubscribeToHeads - returns channel and subscription for new heads. + SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) + // SubscribeToFinalizedHeads - returns channel and subscription for finalized heads. + SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) + // Ping - returns error if RPC is not reachable + Ping(context.Context) error + // IsSyncing - returns true if the RPC is in Syncing state and can not process calls + IsSyncing(ctx context.Context) (bool, error) + // UnsubscribeAllExcept - close all subscriptions except `subs` + UnsubscribeAllExcept(subs ...types.Subscription) + // Close - closes all subscriptions and aborts all RPC calls + Close() + // GetInterceptedChainInfo - returns latest and highest observed by application layer ChainInfo. + // latest ChainInfo is the most recent value received within a NodeClient's current lifecycle between Dial and DisconnectAll. + // highestUserObservations ChainInfo is the highest ChainInfo observed excluding health checks calls. + // Its values must not be reset. + // The results of corresponding calls, to get the most recent head and the latest finalized head, must be + // intercepted and reflected in ChainInfo before being returned to a caller. Otherwise, MultiNode is not able to + // provide repeatable read guarantee. + // DisconnectAll must reset latest ChainInfo to default value. + // Ensure implementation does not have a race condition when values are reset before request completion and as + // a result latest ChainInfo contains information from the previous cycle. + GetInterceptedChainInfo() (latest, highestUserObservations ChainInfo) +} + +// Head is the interface required by the NodeClient +type Head interface { + BlockNumber() int64 + BlockDifficulty() *big.Int + IsValid() bool +} + +// PoolChainInfoProvider - provides aggregation of nodes pool ChainInfo +type PoolChainInfoProvider interface { + // LatestChainInfo - returns number of live nodes available in the pool, so we can prevent the last alive node in a pool from being + // moved to out-of-sync state. It is better to have one out-of-sync node than no nodes at all. + // Returns highest latest ChainInfo within the alive nodes. E.g. most recent block number and highest block number + // observed by Node A are 10 and 15; Node B - 12 and 14. This method will return 12. + LatestChainInfo() (int, ChainInfo) + // HighestUserObservations - returns highest ChainInfo ever observed by any user of MultiNode. + HighestUserObservations() ChainInfo +} + +// ChainInfo - defines RPC's or MultiNode's view on the chain +type ChainInfo struct { + BlockNumber int64 + FinalizedBlockNumber int64 + TotalDifficulty *big.Int +} + +func MaxTotalDifficulty(a, b *big.Int) *big.Int { + if a == nil { + if b == nil { + return nil + } + + return big.NewInt(0).Set(b) + } + + if b == nil || a.Cmp(b) >= 0 { + return big.NewInt(0).Set(a) + } + + return big.NewInt(0).Set(b) +} diff --git a/multinode/types_test.go b/multinode/types_test.go new file mode 100644 index 0000000..68d7a3f --- /dev/null +++ b/multinode/types_test.go @@ -0,0 +1,34 @@ +package client + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMaxDifficulty(t *testing.T) { + cases := []struct { + A, B, Result *big.Int + }{ + { + A: nil, B: nil, Result: nil, + }, + { + A: nil, B: big.NewInt(1), Result: big.NewInt(1), + }, + { + A: big.NewInt(1), B: big.NewInt(1), Result: big.NewInt(1), + }, + { + A: big.NewInt(1), B: big.NewInt(2), Result: big.NewInt(2), + }, + } + + for _, test := range cases { + actualResult := MaxTotalDifficulty(test.A, test.B) + assert.Equal(t, test.Result, actualResult, "expected max(%v, %v) to produce %v", test.A, test.B, test.Result) + inverted := MaxTotalDifficulty(test.B, test.A) + assert.Equal(t, actualResult, inverted, "expected max(%v, %v) == max(%v, %v)", test.A, test.B, test.B, test.A) + } +} diff --git a/types/chain.go b/types/chain.go new file mode 100644 index 0000000..e928cf1 --- /dev/null +++ b/types/chain.go @@ -0,0 +1,32 @@ +package types + +import ( + "fmt" + + "github.com/smartcontractkit/chainlink-common/pkg/types" +) + +// Sequence represents the base type, for any chain's sequence object. +// It should be convertible to a string +type Sequence interface { + fmt.Stringer + Int64() int64 // needed for numeric sequence confirmation - to be removed with confirmation logic generalization: https://smartcontract-it.atlassian.net/browse/BCI-860 +} + +// ID represents the base type, for any chain's ID. +// It should be convertible to a string, that can uniquely identify this chain +type ID fmt.Stringer + +// StringID enables using string directly as a ChainID +type StringID string + +func (s StringID) String() string { + return string(s) +} + +// ChainStatusWithID compose of ChainStatus and RelayID. This is useful for +// storing the Network associated with the ChainStatus. +type ChainStatusWithID struct { + types.ChainStatus + types.RelayID +} diff --git a/types/hashable.go b/types/hashable.go new file mode 100644 index 0000000..2d16650 --- /dev/null +++ b/types/hashable.go @@ -0,0 +1,12 @@ +package types + +import "fmt" + +// A chain-agnostic generic interface to represent the following native types on various chains: +// PublicKey, Address, Account, BlockHash, TxHash +type Hashable interface { + fmt.Stringer + comparable + + Bytes() []byte +} diff --git a/types/head.go b/types/head.go new file mode 100644 index 0000000..5252c8f --- /dev/null +++ b/types/head.go @@ -0,0 +1,45 @@ +package types + +import ( + "math/big" + "time" +) + +// Head provides access to a chain's head, as needed by the TxManager. +// This is a generic interface which ALL chains will implement. +type Head[BLOCK_HASH Hashable] interface { + // BlockNumber is the head's block number + BlockNumber() int64 + + // Timestamp the time of mining of the block + GetTimestamp() time.Time + + // ChainLength returns the length of the chain followed by recursively looking up parents + ChainLength() uint32 + + // EarliestHeadInChain traverses through parents until it finds the earliest one + EarliestHeadInChain() Head[BLOCK_HASH] + + // Parent is the head's parent block + GetParent() Head[BLOCK_HASH] + + // Hash is the head's block hash + BlockHash() BLOCK_HASH + GetParentHash() BLOCK_HASH + + // HashAtHeight returns the hash of the block at the given height, if it is in the chain. + // If not in chain, returns the zero hash + HashAtHeight(blockNum int64) BLOCK_HASH + + // HeadAtHeight returns head at specified height or an error, if one does not exist in provided chain. + HeadAtHeight(blockNum int64) (Head[BLOCK_HASH], error) + + // Returns the total difficulty of the block. For chains who do not have a concept of block + // difficulty, return 0. + BlockDifficulty() *big.Int + // IsValid returns true if the head is valid. + IsValid() bool + + // Returns the latest finalized based on finality tag or depth + LatestFinalizedHead() Head[BLOCK_HASH] +} diff --git a/types/mocks/head.go b/types/mocks/head.go new file mode 100644 index 0000000..bc59669 --- /dev/null +++ b/types/mocks/head.go @@ -0,0 +1,601 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + time "time" + + mock "github.com/stretchr/testify/mock" + + types "github.com/smartcontractkit/chainlink-framework/types" +) + +// Head is an autogenerated mock type for the Head type +type Head[BLOCK_HASH types.Hashable] struct { + mock.Mock +} + +type Head_Expecter[BLOCK_HASH types.Hashable] struct { + mock *mock.Mock +} + +func (_m *Head[BLOCK_HASH]) EXPECT() *Head_Expecter[BLOCK_HASH] { + return &Head_Expecter[BLOCK_HASH]{mock: &_m.Mock} +} + +// BlockDifficulty provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) BlockDifficulty() *big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockDifficulty") + } + + var r0 *big.Int + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// Head_BlockDifficulty_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockDifficulty' +type Head_BlockDifficulty_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// BlockDifficulty is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) BlockDifficulty() *Head_BlockDifficulty_Call[BLOCK_HASH] { + return &Head_BlockDifficulty_Call[BLOCK_HASH]{Call: _e.mock.On("BlockDifficulty")} +} + +func (_c *Head_BlockDifficulty_Call[BLOCK_HASH]) Run(run func()) *Head_BlockDifficulty_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_BlockDifficulty_Call[BLOCK_HASH]) Return(_a0 *big.Int) *Head_BlockDifficulty_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_BlockDifficulty_Call[BLOCK_HASH]) RunAndReturn(run func() *big.Int) *Head_BlockDifficulty_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// BlockHash provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) BlockHash() BLOCK_HASH { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockHash") + } + + var r0 BLOCK_HASH + if rf, ok := ret.Get(0).(func() BLOCK_HASH); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(BLOCK_HASH) + } + + return r0 +} + +// Head_BlockHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockHash' +type Head_BlockHash_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// BlockHash is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) BlockHash() *Head_BlockHash_Call[BLOCK_HASH] { + return &Head_BlockHash_Call[BLOCK_HASH]{Call: _e.mock.On("BlockHash")} +} + +func (_c *Head_BlockHash_Call[BLOCK_HASH]) Run(run func()) *Head_BlockHash_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_BlockHash_Call[BLOCK_HASH]) Return(_a0 BLOCK_HASH) *Head_BlockHash_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_BlockHash_Call[BLOCK_HASH]) RunAndReturn(run func() BLOCK_HASH) *Head_BlockHash_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// BlockNumber provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) BlockNumber() int64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// Head_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type Head_BlockNumber_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) BlockNumber() *Head_BlockNumber_Call[BLOCK_HASH] { + return &Head_BlockNumber_Call[BLOCK_HASH]{Call: _e.mock.On("BlockNumber")} +} + +func (_c *Head_BlockNumber_Call[BLOCK_HASH]) Run(run func()) *Head_BlockNumber_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_BlockNumber_Call[BLOCK_HASH]) Return(_a0 int64) *Head_BlockNumber_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_BlockNumber_Call[BLOCK_HASH]) RunAndReturn(run func() int64) *Head_BlockNumber_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// ChainLength provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) ChainLength() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainLength") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// Head_ChainLength_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainLength' +type Head_ChainLength_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// ChainLength is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) ChainLength() *Head_ChainLength_Call[BLOCK_HASH] { + return &Head_ChainLength_Call[BLOCK_HASH]{Call: _e.mock.On("ChainLength")} +} + +func (_c *Head_ChainLength_Call[BLOCK_HASH]) Run(run func()) *Head_ChainLength_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_ChainLength_Call[BLOCK_HASH]) Return(_a0 uint32) *Head_ChainLength_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_ChainLength_Call[BLOCK_HASH]) RunAndReturn(run func() uint32) *Head_ChainLength_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// EarliestHeadInChain provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) EarliestHeadInChain() types.Head[BLOCK_HASH] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EarliestHeadInChain") + } + + var r0 types.Head[BLOCK_HASH] + if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Head[BLOCK_HASH]) + } + } + + return r0 +} + +// Head_EarliestHeadInChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EarliestHeadInChain' +type Head_EarliestHeadInChain_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// EarliestHeadInChain is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) EarliestHeadInChain() *Head_EarliestHeadInChain_Call[BLOCK_HASH] { + return &Head_EarliestHeadInChain_Call[BLOCK_HASH]{Call: _e.mock.On("EarliestHeadInChain")} +} + +func (_c *Head_EarliestHeadInChain_Call[BLOCK_HASH]) Run(run func()) *Head_EarliestHeadInChain_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_EarliestHeadInChain_Call[BLOCK_HASH]) Return(_a0 types.Head[BLOCK_HASH]) *Head_EarliestHeadInChain_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_EarliestHeadInChain_Call[BLOCK_HASH]) RunAndReturn(run func() types.Head[BLOCK_HASH]) *Head_EarliestHeadInChain_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// GetParent provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) GetParent() types.Head[BLOCK_HASH] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetParent") + } + + var r0 types.Head[BLOCK_HASH] + if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Head[BLOCK_HASH]) + } + } + + return r0 +} + +// Head_GetParent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetParent' +type Head_GetParent_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// GetParent is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) GetParent() *Head_GetParent_Call[BLOCK_HASH] { + return &Head_GetParent_Call[BLOCK_HASH]{Call: _e.mock.On("GetParent")} +} + +func (_c *Head_GetParent_Call[BLOCK_HASH]) Run(run func()) *Head_GetParent_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_GetParent_Call[BLOCK_HASH]) Return(_a0 types.Head[BLOCK_HASH]) *Head_GetParent_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_GetParent_Call[BLOCK_HASH]) RunAndReturn(run func() types.Head[BLOCK_HASH]) *Head_GetParent_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// GetParentHash provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) GetParentHash() BLOCK_HASH { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetParentHash") + } + + var r0 BLOCK_HASH + if rf, ok := ret.Get(0).(func() BLOCK_HASH); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(BLOCK_HASH) + } + + return r0 +} + +// Head_GetParentHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetParentHash' +type Head_GetParentHash_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// GetParentHash is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) GetParentHash() *Head_GetParentHash_Call[BLOCK_HASH] { + return &Head_GetParentHash_Call[BLOCK_HASH]{Call: _e.mock.On("GetParentHash")} +} + +func (_c *Head_GetParentHash_Call[BLOCK_HASH]) Run(run func()) *Head_GetParentHash_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_GetParentHash_Call[BLOCK_HASH]) Return(_a0 BLOCK_HASH) *Head_GetParentHash_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_GetParentHash_Call[BLOCK_HASH]) RunAndReturn(run func() BLOCK_HASH) *Head_GetParentHash_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// GetTimestamp provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) GetTimestamp() time.Time { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetTimestamp") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// Head_GetTimestamp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTimestamp' +type Head_GetTimestamp_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// GetTimestamp is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) GetTimestamp() *Head_GetTimestamp_Call[BLOCK_HASH] { + return &Head_GetTimestamp_Call[BLOCK_HASH]{Call: _e.mock.On("GetTimestamp")} +} + +func (_c *Head_GetTimestamp_Call[BLOCK_HASH]) Run(run func()) *Head_GetTimestamp_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_GetTimestamp_Call[BLOCK_HASH]) Return(_a0 time.Time) *Head_GetTimestamp_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_GetTimestamp_Call[BLOCK_HASH]) RunAndReturn(run func() time.Time) *Head_GetTimestamp_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// HashAtHeight provides a mock function with given fields: blockNum +func (_m *Head[BLOCK_HASH]) HashAtHeight(blockNum int64) BLOCK_HASH { + ret := _m.Called(blockNum) + + if len(ret) == 0 { + panic("no return value specified for HashAtHeight") + } + + var r0 BLOCK_HASH + if rf, ok := ret.Get(0).(func(int64) BLOCK_HASH); ok { + r0 = rf(blockNum) + } else { + r0 = ret.Get(0).(BLOCK_HASH) + } + + return r0 +} + +// Head_HashAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HashAtHeight' +type Head_HashAtHeight_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// HashAtHeight is a helper method to define mock.On call +// - blockNum int64 +func (_e *Head_Expecter[BLOCK_HASH]) HashAtHeight(blockNum interface{}) *Head_HashAtHeight_Call[BLOCK_HASH] { + return &Head_HashAtHeight_Call[BLOCK_HASH]{Call: _e.mock.On("HashAtHeight", blockNum)} +} + +func (_c *Head_HashAtHeight_Call[BLOCK_HASH]) Run(run func(blockNum int64)) *Head_HashAtHeight_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int64)) + }) + return _c +} + +func (_c *Head_HashAtHeight_Call[BLOCK_HASH]) Return(_a0 BLOCK_HASH) *Head_HashAtHeight_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_HashAtHeight_Call[BLOCK_HASH]) RunAndReturn(run func(int64) BLOCK_HASH) *Head_HashAtHeight_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// HeadAtHeight provides a mock function with given fields: blockNum +func (_m *Head[BLOCK_HASH]) HeadAtHeight(blockNum int64) (types.Head[BLOCK_HASH], error) { + ret := _m.Called(blockNum) + + if len(ret) == 0 { + panic("no return value specified for HeadAtHeight") + } + + var r0 types.Head[BLOCK_HASH] + var r1 error + if rf, ok := ret.Get(0).(func(int64) (types.Head[BLOCK_HASH], error)); ok { + return rf(blockNum) + } + if rf, ok := ret.Get(0).(func(int64) types.Head[BLOCK_HASH]); ok { + r0 = rf(blockNum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Head[BLOCK_HASH]) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Head_HeadAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeadAtHeight' +type Head_HeadAtHeight_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// HeadAtHeight is a helper method to define mock.On call +// - blockNum int64 +func (_e *Head_Expecter[BLOCK_HASH]) HeadAtHeight(blockNum interface{}) *Head_HeadAtHeight_Call[BLOCK_HASH] { + return &Head_HeadAtHeight_Call[BLOCK_HASH]{Call: _e.mock.On("HeadAtHeight", blockNum)} +} + +func (_c *Head_HeadAtHeight_Call[BLOCK_HASH]) Run(run func(blockNum int64)) *Head_HeadAtHeight_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int64)) + }) + return _c +} + +func (_c *Head_HeadAtHeight_Call[BLOCK_HASH]) Return(_a0 types.Head[BLOCK_HASH], _a1 error) *Head_HeadAtHeight_Call[BLOCK_HASH] { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Head_HeadAtHeight_Call[BLOCK_HASH]) RunAndReturn(run func(int64) (types.Head[BLOCK_HASH], error)) *Head_HeadAtHeight_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// IsValid provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) IsValid() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsValid") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Head_IsValid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsValid' +type Head_IsValid_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// IsValid is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) IsValid() *Head_IsValid_Call[BLOCK_HASH] { + return &Head_IsValid_Call[BLOCK_HASH]{Call: _e.mock.On("IsValid")} +} + +func (_c *Head_IsValid_Call[BLOCK_HASH]) Run(run func()) *Head_IsValid_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_IsValid_Call[BLOCK_HASH]) Return(_a0 bool) *Head_IsValid_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_IsValid_Call[BLOCK_HASH]) RunAndReturn(run func() bool) *Head_IsValid_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// LatestFinalizedHead provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) LatestFinalizedHead() types.Head[BLOCK_HASH] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestFinalizedHead") + } + + var r0 types.Head[BLOCK_HASH] + if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Head[BLOCK_HASH]) + } + } + + return r0 +} + +// Head_LatestFinalizedHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestFinalizedHead' +type Head_LatestFinalizedHead_Call[BLOCK_HASH types.Hashable] struct { + *mock.Call +} + +// LatestFinalizedHead is a helper method to define mock.On call +func (_e *Head_Expecter[BLOCK_HASH]) LatestFinalizedHead() *Head_LatestFinalizedHead_Call[BLOCK_HASH] { + return &Head_LatestFinalizedHead_Call[BLOCK_HASH]{Call: _e.mock.On("LatestFinalizedHead")} +} + +func (_c *Head_LatestFinalizedHead_Call[BLOCK_HASH]) Run(run func()) *Head_LatestFinalizedHead_Call[BLOCK_HASH] { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Head_LatestFinalizedHead_Call[BLOCK_HASH]) Return(_a0 types.Head[BLOCK_HASH]) *Head_LatestFinalizedHead_Call[BLOCK_HASH] { + _c.Call.Return(_a0) + return _c +} + +func (_c *Head_LatestFinalizedHead_Call[BLOCK_HASH]) RunAndReturn(run func() types.Head[BLOCK_HASH]) *Head_LatestFinalizedHead_Call[BLOCK_HASH] { + _c.Call.Return(run) + return _c +} + +// NewHead creates a new instance of Head. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHead[BLOCK_HASH types.Hashable](t interface { + mock.TestingT + Cleanup(func()) +}) *Head[BLOCK_HASH] { + mock := &Head[BLOCK_HASH]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/types/mocks/subscription.go b/types/mocks/subscription.go new file mode 100644 index 0000000..b0b87c7 --- /dev/null +++ b/types/mocks/subscription.go @@ -0,0 +1,111 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Subscription is an autogenerated mock type for the Subscription type +type Subscription struct { + mock.Mock +} + +type Subscription_Expecter struct { + mock *mock.Mock +} + +func (_m *Subscription) EXPECT() *Subscription_Expecter { + return &Subscription_Expecter{mock: &_m.Mock} +} + +// Err provides a mock function with given fields: +func (_m *Subscription) Err() <-chan error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Err") + } + + var r0 <-chan error + if rf, ok := ret.Get(0).(func() <-chan error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan error) + } + } + + return r0 +} + +// Subscription_Err_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Err' +type Subscription_Err_Call struct { + *mock.Call +} + +// Err is a helper method to define mock.On call +func (_e *Subscription_Expecter) Err() *Subscription_Err_Call { + return &Subscription_Err_Call{Call: _e.mock.On("Err")} +} + +func (_c *Subscription_Err_Call) Run(run func()) *Subscription_Err_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Subscription_Err_Call) Return(_a0 <-chan error) *Subscription_Err_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Subscription_Err_Call) RunAndReturn(run func() <-chan error) *Subscription_Err_Call { + _c.Call.Return(run) + return _c +} + +// Unsubscribe provides a mock function with given fields: +func (_m *Subscription) Unsubscribe() { + _m.Called() +} + +// Subscription_Unsubscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Unsubscribe' +type Subscription_Unsubscribe_Call struct { + *mock.Call +} + +// Unsubscribe is a helper method to define mock.On call +func (_e *Subscription_Expecter) Unsubscribe() *Subscription_Unsubscribe_Call { + return &Subscription_Unsubscribe_Call{Call: _e.mock.On("Unsubscribe")} +} + +func (_c *Subscription_Unsubscribe_Call) Run(run func()) *Subscription_Unsubscribe_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Subscription_Unsubscribe_Call) Return() *Subscription_Unsubscribe_Call { + _c.Call.Return() + return _c +} + +func (_c *Subscription_Unsubscribe_Call) RunAndReturn(run func()) *Subscription_Unsubscribe_Call { + _c.Call.Return(run) + return _c +} + +// NewSubscription creates a new instance of Subscription. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSubscription(t interface { + mock.TestingT + Cleanup(func()) +}) *Subscription { + mock := &Subscription{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/types/receipt.go b/types/receipt.go new file mode 100644 index 0000000..01d5a72 --- /dev/null +++ b/types/receipt.go @@ -0,0 +1,14 @@ +package types + +import "math/big" + +type Receipt[TX_HASH Hashable, BLOCK_HASH Hashable] interface { + GetStatus() uint64 + GetTxHash() TX_HASH + GetBlockNumber() *big.Int + IsZero() bool + IsUnmined() bool + GetFeeUsed() uint64 + GetTransactionIndex() uint + GetBlockHash() BLOCK_HASH +} diff --git a/types/subscription.go b/types/subscription.go new file mode 100644 index 0000000..3c4fd4c --- /dev/null +++ b/types/subscription.go @@ -0,0 +1,16 @@ +package types + +// Subscription represents an event subscription where events are +// delivered on a data channel. +// This is a generic interface for Subscription to represent used by clients. +type Subscription interface { + // Unsubscribe cancels the sending of events to the data channel + // and closes the error channel. Unsubscribe should be callable multiple + // times without causing an error. + Unsubscribe() + // Err returns the subscription error channel. The error channel receives + // a value if there is an issue with the subscription (e.g. the network connection + // delivering the events has been closed). Only one value will ever be sent. + // The error channel is closed by Unsubscribe. + Err() <-chan error +} diff --git a/types/test_utils.go b/types/test_utils.go new file mode 100644 index 0000000..40560f7 --- /dev/null +++ b/types/test_utils.go @@ -0,0 +1,16 @@ +package types + +import ( + "math" + "math/big" + "math/rand" +) + +func RandomID() ID { + id := rand.Int63n(math.MaxInt32) + 10000 + return big.NewInt(id) +} + +func NewIDFromInt(id int64) ID { + return big.NewInt(id) +} diff --git a/utils/utils.go b/utils/utils.go new file mode 100644 index 0000000..aeaad34 --- /dev/null +++ b/utils/utils.go @@ -0,0 +1,35 @@ +package utils + +import ( + "cmp" + "slices" + "time" + + "github.com/jpillora/backoff" + "golang.org/x/exp/constraints" +) + +// NewRedialBackoff is a standard backoff to use for redialling or reconnecting to +// unreachable network endpoints +func NewRedialBackoff() backoff.Backoff { + return backoff.Backoff{ + Min: 1 * time.Second, + Max: 15 * time.Second, + Jitter: true, + } +} + +// MinFunc returns the minimum value of the given element array with respect +// to the given key function. In the event U is not a compound type (e.g a +// struct) an identity function can be provided. +func MinFunc[U any, T constraints.Ordered](elems []U, f func(U) T) T { + var min T + if len(elems) == 0 { + return min + } + + e := slices.MinFunc(elems, func(a, b U) int { + return cmp.Compare(f(a), f(b)) + }) + return f(e) +} From cfa9d9bb5507cef0d1cb70920096257213d15819 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 27 Nov 2024 12:14:20 -0500 Subject: [PATCH 02/11] Update .gitignore --- .gitignore | 54 ------------------------------------------------------ 1 file changed, 54 deletions(-) diff --git a/.gitignore b/.gitignore index e3577b8..d4e0b15 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,3 @@ -# dependencies -node_modules/ - -# Tooling caches -*.tsbuildinfo -.eslintcache - # Log files *.log @@ -21,8 +14,6 @@ generated/ .envrc .env* .dbenv -!crib/.env.example -!.github/actions/setup-postgres/.env .direnv .idea .vscode/ @@ -40,57 +31,12 @@ env credentials.env gcr_creds.env -# DB backups - -cl_backup_*.tar.gz - -# Test artifacts -core/cmd/TestClient_ImportExportP2PKeyBundle_test_key.json -output.txt -race.* -golangci-lint-output.txt -/golangci-lint/ -.covdata -core/services/job/testdata/wasm/testmodule.wasm -core/services/job/testdata/wasm/testmodule.br - -# DB state -./db/ -.s.PGSQL.5432.lock - -# can be left behind by tests -core/cmd/vrfkey1 - -# Integration Tests -integration-tests/**/logs/ -tests-*.xml -*.test -tmp-manifest-*.yaml -ztarrepo.tar.gz -**/test-ledger/* -__debug_bin* -.test_summary/ -db_dumps/ -.run.id -integration-tests/**/traces/ -integration-tests/**/integration-tests -benchmark_report.csv -benchmark_summary.json -secrets.toml -tmp_laneconfig/ - -# goreleaser builds -cosign.* -dist/ -MacOSX* - # Test & linter reports *report.xml *report.json *.out dot_graphs/ -contracts/yarn.lock # Ignore DevSpace cache and log folder .devspace/ From e943e46ba8397cc1a81f80f007efef61cd66abed Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 27 Nov 2024 12:44:24 -0500 Subject: [PATCH 03/11] Update package name --- multinode/config/config.go | 210 ++++++++++++++++++ multinode/ctx.go | 2 +- multinode/ctx_test.go | 2 +- multinode/mock_hashable_test.go | 2 +- multinode/mock_head_test.go | 2 +- multinode/mock_node_selector_test.go | 2 +- multinode/mock_node_test.go | 2 +- .../mock_pool_chain_info_provider_test.go | 2 +- multinode/mock_rpc_client_test.go | 2 +- multinode/mock_send_only_client_test.go | 2 +- multinode/mock_send_only_node_test.go | 2 +- multinode/models.go | 2 +- multinode/models_test.go | 2 +- multinode/multi_node.go | 2 +- multinode/multi_node_test.go | 2 +- multinode/node.go | 18 +- multinode/node_fsm.go | 2 +- multinode/node_fsm_test.go | 2 +- multinode/node_lifecycle.go | 2 +- multinode/node_lifecycle_test.go | 24 +- multinode/node_selector.go | 2 +- multinode/node_selector_highest_head.go | 2 +- multinode/node_selector_highest_head_test.go | 2 +- multinode/node_selector_priority_level.go | 2 +- .../node_selector_priority_level_test.go | 2 +- multinode/node_selector_round_robin.go | 2 +- multinode/node_selector_round_robin_test.go | 2 +- multinode/node_selector_test.go | 2 +- multinode/node_selector_total_difficulty.go | 2 +- .../node_selector_total_difficulty_test.go | 2 +- multinode/node_test.go | 2 +- multinode/poller.go | 2 +- multinode/poller_test.go | 2 +- multinode/send_only_node.go | 2 +- multinode/send_only_node_lifecycle.go | 2 +- multinode/send_only_node_test.go | 2 +- multinode/transaction_sender.go | 2 +- multinode/transaction_sender_test.go | 2 +- multinode/types.go | 2 +- multinode/types_test.go | 2 +- 40 files changed, 268 insertions(+), 58 deletions(-) create mode 100644 multinode/config/config.go diff --git a/multinode/config/config.go b/multinode/config/config.go new file mode 100644 index 0000000..b56eb57 --- /dev/null +++ b/multinode/config/config.go @@ -0,0 +1,210 @@ +package config + +import ( + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + "github.com/smartcontractkit/chainlink-framework/multinode" +) + +// MultiNodeConfig is a wrapper to provide required functions while keeping configs Public +type MultiNodeConfig struct { + MultiNode +} + +type MultiNode struct { + // Feature flag + Enabled *bool + + // Node Configs + PollFailureThreshold *uint32 + PollInterval *config.Duration + SelectionMode *string + SyncThreshold *uint32 + NodeIsSyncingEnabled *bool + LeaseDuration *config.Duration + FinalizedBlockPollInterval *config.Duration + EnforceRepeatableRead *bool + DeathDeclarationDelay *config.Duration + + // Chain Configs + NodeNoNewHeadsThreshold *config.Duration + NoNewFinalizedHeadsThreshold *config.Duration + FinalityDepth *uint32 + FinalityTagEnabled *bool + FinalizedBlockOffset *uint32 +} + +func (c *MultiNodeConfig) Enabled() bool { + return c.MultiNode.Enabled != nil && *c.MultiNode.Enabled +} + +func (c *MultiNodeConfig) PollFailureThreshold() uint32 { + return *c.MultiNode.PollFailureThreshold +} + +func (c *MultiNodeConfig) PollInterval() time.Duration { + return c.MultiNode.PollInterval.Duration() +} + +func (c *MultiNodeConfig) SelectionMode() string { + return *c.MultiNode.SelectionMode +} + +func (c *MultiNodeConfig) SyncThreshold() uint32 { + return *c.MultiNode.SyncThreshold +} + +func (c *MultiNodeConfig) NodeIsSyncingEnabled() bool { + return *c.MultiNode.NodeIsSyncingEnabled +} + +func (c *MultiNodeConfig) LeaseDuration() time.Duration { return c.MultiNode.LeaseDuration.Duration() } + +func (c *MultiNodeConfig) FinalizedBlockPollInterval() time.Duration { + return c.MultiNode.FinalizedBlockPollInterval.Duration() +} + +func (c *MultiNodeConfig) EnforceRepeatableRead() bool { return *c.MultiNode.EnforceRepeatableRead } + +func (c *MultiNodeConfig) DeathDeclarationDelay() time.Duration { + return c.MultiNode.DeathDeclarationDelay.Duration() +} + +func (c *MultiNodeConfig) NodeNoNewHeadsThreshold() time.Duration { + return c.MultiNode.NodeNoNewHeadsThreshold.Duration() +} + +func (c *MultiNodeConfig) NoNewFinalizedHeadsThreshold() time.Duration { + return c.MultiNode.NoNewFinalizedHeadsThreshold.Duration() +} + +func (c *MultiNodeConfig) FinalityDepth() uint32 { return *c.MultiNode.FinalityDepth } + +func (c *MultiNodeConfig) FinalityTagEnabled() bool { return *c.MultiNode.FinalityTagEnabled } + +func (c *MultiNodeConfig) FinalizedBlockOffset() uint32 { return *c.MultiNode.FinalizedBlockOffset } + +func (c *MultiNodeConfig) SetDefaults() { + // MultiNode is disabled as it's not fully implemented yet: BCFR-122 + if c.MultiNode.Enabled == nil { + c.MultiNode.Enabled = ptr(false) + } + + /* Node Configs */ + // Failure threshold for polling set to 5 to tolerate some polling failures before taking action. + if c.MultiNode.PollFailureThreshold == nil { + c.MultiNode.PollFailureThreshold = ptr(uint32(5)) + } + // Poll interval is set to 15 seconds to ensure timely updates while minimizing resource usage. + if c.MultiNode.PollInterval == nil { + c.MultiNode.PollInterval = config.MustNewDuration(15 * time.Second) + } + // Selection mode defaults to priority level to enable using node priorities + if c.MultiNode.SelectionMode == nil { + c.MultiNode.SelectionMode = ptr(multinode.NodeSelectionModePriorityLevel) + } + // The sync threshold is set to 10 to allow for some flexibility in node synchronization before considering it out of sync. + if c.MultiNode.SyncThreshold == nil { + c.MultiNode.SyncThreshold = ptr(uint32(10)) + } + // Lease duration is set to 1 minute by default to allow node locks for a reasonable amount of time. + if c.MultiNode.LeaseDuration == nil { + c.MultiNode.LeaseDuration = config.MustNewDuration(time.Minute) + } + // Node syncing is not relevant for Solana and is disabled by default. + if c.MultiNode.NodeIsSyncingEnabled == nil { + c.MultiNode.NodeIsSyncingEnabled = ptr(false) + } + // The finalized block polling interval is set to 5 seconds to ensure timely updates while minimizing resource usage. + if c.MultiNode.FinalizedBlockPollInterval == nil { + c.MultiNode.FinalizedBlockPollInterval = config.MustNewDuration(5 * time.Second) + } + // Repeatable read guarantee should be enforced by default. + if c.MultiNode.EnforceRepeatableRead == nil { + c.MultiNode.EnforceRepeatableRead = ptr(true) + } + // The delay before declaring a node dead is set to 20 seconds to give nodes time to recover from temporary issues. + if c.MultiNode.DeathDeclarationDelay == nil { + c.MultiNode.DeathDeclarationDelay = config.MustNewDuration(20 * time.Second) + } + + /* Chain Configs */ + // Threshold for no new heads is set to 20 seconds, assuming that heads should update at a reasonable pace. + if c.MultiNode.NodeNoNewHeadsThreshold == nil { + c.MultiNode.NodeNoNewHeadsThreshold = config.MustNewDuration(20 * time.Second) + } + // Similar to heads, finalized heads should be updated within 20 seconds. + if c.MultiNode.NoNewFinalizedHeadsThreshold == nil { + c.MultiNode.NoNewFinalizedHeadsThreshold = config.MustNewDuration(20 * time.Second) + } + // Finality tags are used in Solana and enabled by default. + if c.MultiNode.FinalityTagEnabled == nil { + c.MultiNode.FinalityTagEnabled = ptr(true) + } + // Finality depth will not be used since finality tags are enabled. + if c.MultiNode.FinalityDepth == nil { + c.MultiNode.FinalityDepth = ptr(uint32(0)) + } + // Finalized block offset allows for RPCs to be slightly behind the finalized block. + if c.MultiNode.FinalizedBlockOffset == nil { + c.MultiNode.FinalizedBlockOffset = ptr(uint32(50)) + } +} + +func (c *MultiNodeConfig) SetFrom(f *MultiNodeConfig) { + if f.MultiNode.Enabled != nil { + c.MultiNode.Enabled = f.MultiNode.Enabled + } + + // Node Configs + if f.MultiNode.PollFailureThreshold != nil { + c.MultiNode.PollFailureThreshold = f.MultiNode.PollFailureThreshold + } + if f.MultiNode.PollInterval != nil { + c.MultiNode.PollInterval = f.MultiNode.PollInterval + } + if f.MultiNode.SelectionMode != nil { + c.MultiNode.SelectionMode = f.MultiNode.SelectionMode + } + if f.MultiNode.SyncThreshold != nil { + c.MultiNode.SyncThreshold = f.MultiNode.SyncThreshold + } + if f.MultiNode.NodeIsSyncingEnabled != nil { + c.MultiNode.NodeIsSyncingEnabled = f.MultiNode.NodeIsSyncingEnabled + } + if f.MultiNode.LeaseDuration != nil { + c.MultiNode.LeaseDuration = f.MultiNode.LeaseDuration + } + if f.MultiNode.FinalizedBlockPollInterval != nil { + c.MultiNode.FinalizedBlockPollInterval = f.MultiNode.FinalizedBlockPollInterval + } + if f.MultiNode.EnforceRepeatableRead != nil { + c.MultiNode.EnforceRepeatableRead = f.MultiNode.EnforceRepeatableRead + } + if f.MultiNode.DeathDeclarationDelay != nil { + c.MultiNode.DeathDeclarationDelay = f.MultiNode.DeathDeclarationDelay + } + + // Chain Configs + if f.MultiNode.NodeNoNewHeadsThreshold != nil { + c.MultiNode.NodeNoNewHeadsThreshold = f.MultiNode.NodeNoNewHeadsThreshold + } + if f.MultiNode.NoNewFinalizedHeadsThreshold != nil { + c.MultiNode.NoNewFinalizedHeadsThreshold = f.MultiNode.NoNewFinalizedHeadsThreshold + } + if f.MultiNode.FinalityDepth != nil { + c.MultiNode.FinalityDepth = f.MultiNode.FinalityDepth + } + if f.MultiNode.FinalityTagEnabled != nil { + c.MultiNode.FinalityTagEnabled = f.MultiNode.FinalityTagEnabled + } + if f.MultiNode.FinalizedBlockOffset != nil { + c.MultiNode.FinalizedBlockOffset = f.MultiNode.FinalizedBlockOffset + } +} + +func ptr[T any](t T) *T { + return &t +} diff --git a/multinode/ctx.go b/multinode/ctx.go index 57b2fc8..ed9bc32 100644 --- a/multinode/ctx.go +++ b/multinode/ctx.go @@ -1,4 +1,4 @@ -package client +package multinode import "context" diff --git a/multinode/ctx_test.go b/multinode/ctx_test.go index 822b36c..c8d46e9 100644 --- a/multinode/ctx_test.go +++ b/multinode/ctx_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "testing" diff --git a/multinode/mock_hashable_test.go b/multinode/mock_hashable_test.go index d9f1670..7a42d77 100644 --- a/multinode/mock_hashable_test.go +++ b/multinode/mock_hashable_test.go @@ -1,4 +1,4 @@ -package client +package multinode import "cmp" diff --git a/multinode/mock_head_test.go b/multinode/mock_head_test.go index f75bb34..cf83998 100644 --- a/multinode/mock_head_test.go +++ b/multinode/mock_head_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.46.3. DO NOT EDIT. -package client +package multinode import ( big "math/big" diff --git a/multinode/mock_node_selector_test.go b/multinode/mock_node_selector_test.go index 71b3b53..92c5364 100644 --- a/multinode/mock_node_selector_test.go +++ b/multinode/mock_node_selector_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.46.3. DO NOT EDIT. -package client +package multinode import ( types "github.com/smartcontractkit/chainlink-framework/types" diff --git a/multinode/mock_node_test.go b/multinode/mock_node_test.go index 87a0194..d9a1ca3 100644 --- a/multinode/mock_node_test.go +++ b/multinode/mock_node_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.46.3. DO NOT EDIT. -package client +package multinode import ( context "context" diff --git a/multinode/mock_pool_chain_info_provider_test.go b/multinode/mock_pool_chain_info_provider_test.go index c44f10b..c857ef8 100644 --- a/multinode/mock_pool_chain_info_provider_test.go +++ b/multinode/mock_pool_chain_info_provider_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.46.3. DO NOT EDIT. -package client +package multinode import mock "github.com/stretchr/testify/mock" diff --git a/multinode/mock_rpc_client_test.go b/multinode/mock_rpc_client_test.go index 6168480..c1b927d 100644 --- a/multinode/mock_rpc_client_test.go +++ b/multinode/mock_rpc_client_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.46.3. DO NOT EDIT. -package client +package multinode import ( context "context" diff --git a/multinode/mock_send_only_client_test.go b/multinode/mock_send_only_client_test.go index 46c0de3..7eb6e9f 100644 --- a/multinode/mock_send_only_client_test.go +++ b/multinode/mock_send_only_client_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.46.3. DO NOT EDIT. -package client +package multinode import ( context "context" diff --git a/multinode/mock_send_only_node_test.go b/multinode/mock_send_only_node_test.go index 83a8c2d..f277a12 100644 --- a/multinode/mock_send_only_node_test.go +++ b/multinode/mock_send_only_node_test.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.46.3. DO NOT EDIT. -package client +package multinode import ( context "context" diff --git a/multinode/models.go b/multinode/models.go index 526bb25..5339ac0 100644 --- a/multinode/models.go +++ b/multinode/models.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "bytes" diff --git a/multinode/models_test.go b/multinode/models_test.go index a10592c..89210c8 100644 --- a/multinode/models_test.go +++ b/multinode/models_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "strings" diff --git a/multinode/multi_node.go b/multinode/multi_node.go index 9851289..93d8acb 100644 --- a/multinode/multi_node.go +++ b/multinode/multi_node.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "context" diff --git a/multinode/multi_node_test.go b/multinode/multi_node_test.go index 935483b..f40e8a8 100644 --- a/multinode/multi_node_test.go +++ b/multinode/multi_node_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "fmt" diff --git a/multinode/node.go b/multinode/node.go index 850edc0..9bb1b82 100644 --- a/multinode/node.go +++ b/multinode/node.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "context" @@ -24,15 +24,15 @@ var errInvalidChainID = errors.New("invalid chain id") var ( promPoolRPCNodeVerifies = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_verifies", - Help: "The total number of chain types.ID verifications for the given RPC node", + Help: "The total number of chain ID verifications for the given RPC node", }, []string{"network", "chainID", "nodeName"}) promPoolRPCNodeVerifiesFailed = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_verifies_failed", - Help: "The total number of failed chain types.ID verifications for the given RPC node", + Help: "The total number of failed chain ID verifications for the given RPC node", }, []string{"network", "chainID", "nodeName"}) promPoolRPCNodeVerifiesSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_verifies_success", - Help: "The total number of successful chain types.ID verifications for the given RPC node", + Help: "The total number of successful chain ID verifications for the given RPC node", }, []string{"network", "chainID", "nodeName"}) ) @@ -225,7 +225,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) Start(startCtx context.Context) error { }) } -// start initially dials the node and verifies chain types.ID +// start initially dials the node and verifies chain ID // This spins off lifecycle goroutines. // Not thread-safe. // Node lifecycle is synchronous: only one goroutine should be running at a @@ -246,7 +246,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) start(startCtx context.Context) { n.declareState(state) } -// verifyChainID checks that connection to the node matches the given chain types.ID +// verifyChainID checks that connection to the node matches the given chain ID // Not thread-safe // Pure verifyChainID: does not mutate node "state" field. func (n *node[CHAIN_ID, HEAD, RPC]) verifyChainID(callerCtx context.Context, lggr logger.Logger) nodeState { @@ -270,18 +270,18 @@ func (n *node[CHAIN_ID, HEAD, RPC]) verifyChainID(callerCtx context.Context, lgg var err error if chainID, err = n.rpc.ChainID(callerCtx); err != nil { promFailed() - lggr.Errorw("Failed to verify chain types.ID for node", "err", err, "nodeState", n.getCachedState()) + lggr.Errorw("Failed to verify chain ID for node", "err", err, "nodeState", n.getCachedState()) return nodeStateUnreachable } else if chainID.String() != n.chainID.String() { promFailed() err = fmt.Errorf( - "rpc ChainID doesn't match local chain types.ID: RPC types.ID=%s, local types.ID=%s, node name=%s: %w", + "rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s: %w", chainID.String(), n.chainID.String(), n.name, errInvalidChainID, ) - lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain types.ID", "err", err, "nodeState", n.getCachedState()) + lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err, "nodeState", n.getCachedState()) return nodeStateInvalidChainID } diff --git a/multinode/node_fsm.go b/multinode/node_fsm.go index b707e9f..c10a5f8 100644 --- a/multinode/node_fsm.go +++ b/multinode/node_fsm.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "fmt" diff --git a/multinode/node_fsm_test.go b/multinode/node_fsm_test.go index af317a6..ad09a1a 100644 --- a/multinode/node_fsm_test.go +++ b/multinode/node_fsm_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "slices" diff --git a/multinode/node_lifecycle.go b/multinode/node_lifecycle.go index aaf62f1..775d204 100644 --- a/multinode/node_lifecycle.go +++ b/multinode/node_lifecycle.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "context" diff --git a/multinode/node_lifecycle_test.go b/multinode/node_lifecycle_test.go index d357ef8..abdce22 100644 --- a/multinode/node_lifecycle_test.go +++ b/multinode/node_lifecycle_test.go @@ -1,9 +1,8 @@ -package client +package multinode import ( "errors" "fmt" - "github.com/smartcontractkit/chainlink-framework/types" "math/big" "sync" "sync/atomic" @@ -20,6 +19,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" clientMocks "github.com/smartcontractkit/chainlink-framework/multinode/mocks" + "github.com/smartcontractkit/chainlink-framework/types" "github.com/smartcontractkit/chainlink-framework/types/mocks" ) @@ -395,7 +395,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() expectedError := errors.New("failed to subscribe to finalized heads") rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(nil, sub, expectedError).Once() - lggr, _ := logger.TestObserved(t, zap.DebugLevel) + lggr := logger.Test(t) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{ finalizedBlockPollInterval: tests.TestInterval, @@ -713,7 +713,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) - expectedError := errors.New("failed to get chain types.ID") + expectedError := errors.New("failed to get chain ID") // might be called multiple times rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError) node.declareOutOfSync(syncStatusNoNewHead) @@ -1196,9 +1196,9 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { assert.Equal(t, nodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) node.declareUnreachable() - tests.AssertLogCountEventually(t, observedLogs, "Failed to verify chain types.ID for node", 2) + tests.AssertLogCountEventually(t, observedLogs, "Failed to verify chain ID for node", 2) }) - t.Run("on chain types.ID mismatch transitions to invalidChainID", func(t *testing.T) { + t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) @@ -1354,7 +1354,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareInvalidChainID() - tests.AssertLogEventually(t, observedLogs, "Failed to verify chain types.ID for node") + tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -1376,7 +1376,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) node.declareInvalidChainID() - tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain types.ID", 2) + tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) tests.AssertEventually(t, func() bool { return node.State() == nodeStateInvalidChainID }) @@ -1473,12 +1473,12 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }).Return(nodeChainID, errors.New("failed to get chain id")) err := node.Start(tests.Context(t)) assert.NoError(t, err) - tests.AssertLogEventually(t, observedLogs, "Failed to verify chain types.ID for node") + tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) }) - t.Run("on chain types.ID mismatch transitions to invalidChainID", func(t *testing.T) { + t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) @@ -1788,7 +1788,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareSyncing() - tests.AssertLogEventually(t, observedLogs, "Failed to verify chain types.ID for node") + tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -1810,7 +1810,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) node.declareSyncing() - tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain types.ID", 2) + tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) tests.AssertEventually(t, func() bool { return node.State() == nodeStateInvalidChainID }) diff --git a/multinode/node_selector.go b/multinode/node_selector.go index 74f6d44..eaf6a22 100644 --- a/multinode/node_selector.go +++ b/multinode/node_selector.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "fmt" diff --git a/multinode/node_selector_highest_head.go b/multinode/node_selector_highest_head.go index d3f8e11..f00373a 100644 --- a/multinode/node_selector_highest_head.go +++ b/multinode/node_selector_highest_head.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "math" diff --git a/multinode/node_selector_highest_head_test.go b/multinode/node_selector_highest_head_test.go index bfc2af2..17eb7d5 100644 --- a/multinode/node_selector_highest_head_test.go +++ b/multinode/node_selector_highest_head_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "testing" diff --git a/multinode/node_selector_priority_level.go b/multinode/node_selector_priority_level.go index 408302a..bedbf36 100644 --- a/multinode/node_selector_priority_level.go +++ b/multinode/node_selector_priority_level.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "math" diff --git a/multinode/node_selector_priority_level_test.go b/multinode/node_selector_priority_level_test.go index 89e1f37..a89a9e9 100644 --- a/multinode/node_selector_priority_level_test.go +++ b/multinode/node_selector_priority_level_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "testing" diff --git a/multinode/node_selector_round_robin.go b/multinode/node_selector_round_robin.go index 9cc260d..68c819f 100644 --- a/multinode/node_selector_round_robin.go +++ b/multinode/node_selector_round_robin.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "sync/atomic" diff --git a/multinode/node_selector_round_robin_test.go b/multinode/node_selector_round_robin_test.go index d1db7d3..6f7af4d 100644 --- a/multinode/node_selector_round_robin_test.go +++ b/multinode/node_selector_round_robin_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "testing" diff --git a/multinode/node_selector_test.go b/multinode/node_selector_test.go index 9187b11..3b889d9 100644 --- a/multinode/node_selector_test.go +++ b/multinode/node_selector_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "testing" diff --git a/multinode/node_selector_total_difficulty.go b/multinode/node_selector_total_difficulty.go index 96d14ee..718a207 100644 --- a/multinode/node_selector_total_difficulty.go +++ b/multinode/node_selector_total_difficulty.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "math/big" diff --git a/multinode/node_selector_total_difficulty_test.go b/multinode/node_selector_total_difficulty_test.go index f1d9de4..1e399da 100644 --- a/multinode/node_selector_total_difficulty_test.go +++ b/multinode/node_selector_total_difficulty_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "math/big" diff --git a/multinode/node_test.go b/multinode/node_test.go index 703caf6..9ffa2d8 100644 --- a/multinode/node_test.go +++ b/multinode/node_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "net/url" diff --git a/multinode/poller.go b/multinode/poller.go index 3e250e8..8726bb6 100644 --- a/multinode/poller.go +++ b/multinode/poller.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "context" diff --git a/multinode/poller_test.go b/multinode/poller_test.go index 930b101..ee5b00a 100644 --- a/multinode/poller_test.go +++ b/multinode/poller_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "context" diff --git a/multinode/send_only_node.go b/multinode/send_only_node.go index e95a326..1913af4 100644 --- a/multinode/send_only_node.go +++ b/multinode/send_only_node.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "context" diff --git a/multinode/send_only_node_lifecycle.go b/multinode/send_only_node_lifecycle.go index eded9d2..6266e14 100644 --- a/multinode/send_only_node_lifecycle.go +++ b/multinode/send_only_node_lifecycle.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "fmt" diff --git a/multinode/send_only_node_test.go b/multinode/send_only_node_test.go index 50ad0af..285d083 100644 --- a/multinode/send_only_node_test.go +++ b/multinode/send_only_node_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "errors" diff --git a/multinode/transaction_sender.go b/multinode/transaction_sender.go index bd59351..c516579 100644 --- a/multinode/transaction_sender.go +++ b/multinode/transaction_sender.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "context" diff --git a/multinode/transaction_sender_test.go b/multinode/transaction_sender_test.go index f79f3df..b601417 100644 --- a/multinode/transaction_sender_test.go +++ b/multinode/transaction_sender_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "context" diff --git a/multinode/types.go b/multinode/types.go index bfe73fa..ab5513e 100644 --- a/multinode/types.go +++ b/multinode/types.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "context" diff --git a/multinode/types_test.go b/multinode/types_test.go index 68d7a3f..dbf0ac6 100644 --- a/multinode/types_test.go +++ b/multinode/types_test.go @@ -1,4 +1,4 @@ -package client +package multinode import ( "math/big" From 720cc05e7317ec435b0c0e132c06164d466d4dd8 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 27 Nov 2024 13:41:38 -0500 Subject: [PATCH 04/11] Add CI --- .github/workflows/golangci_lint.yml | 16 ++++ .github/workflows/test.yml | 61 ++++++++++++++ .golangci.yml | 124 ++++++++++++++++++++++++++++ Makefile | 17 ++++ 4 files changed, 218 insertions(+) create mode 100644 .github/workflows/golangci_lint.yml create mode 100644 .github/workflows/test.yml create mode 100644 .golangci.yml diff --git a/.github/workflows/golangci_lint.yml b/.github/workflows/golangci_lint.yml new file mode 100644 index 0000000..4a7a76b --- /dev/null +++ b/.github/workflows/golangci_lint.yml @@ -0,0 +1,16 @@ +name: Golangci-lint + +on: [pull_request] + +jobs: + golangci-lint: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + actions: read + steps: + - name: golangci-lint + uses: smartcontractkit/.github/actions/ci-lint-go@2ac9d97a83a5edded09af7fcf4ea5bce7a4473a4 # v0.2.6 + with: + golangci-lint-version: v1.62.2 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..ed4b299 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,61 @@ +name: PKG Build and Test + +on: [push] + +jobs: + build-test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + + - name: Set up Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version-file: "go.mod" + + - name: Build + run: go build -v ./... + + - name: Unit Tests + run: GORACE="log_path=$PWD/race" go test -race ./... -coverpkg=./... -coverprofile=coverage.txt + + - name: Print Races + if: failure() + id: print-races + run: | + find race.* | xargs cat > race.txt + if [[ -s race.txt ]]; then + cat race.txt + fi + + - name: Upload Go test results + if: always() + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: go-test-results + path: | + ./coverage.txt + ./race.* + + check-tidy: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - name: Set up Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version-file: "go.mod" + - name: Ensure "make gomodtidy" has been run + run: | + make gomodtidy + git add --all + git diff --minimal --cached --exit-code + - name: Ensure "make generate" has been run + run: | + make rm-mocked + make rm-builders + make generate + git add --all + git diff --stat --cached --exit-code \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..81cec4c --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,124 @@ +run: + timeout: 15m0s +linters: + enable: + - containedctx + - depguard + - errname + - errorlint + - exhaustive + - exportloopref + - fatcontext + - ginkgolinter + - gocritic + - goimports + - gosec + - loggercheck + - mirror + - misspell + - noctx + - nolintlint + - perfsprint + - prealloc + - revive + - rowserrcheck + - spancheck + - sqlclosecheck + - testifylint + - unconvert + - whitespace +linters-settings: + exhaustive: + default-signifies-exhaustive: true + goimports: + local-prefixes: github.com/smartcontractkit/chainlink + golint: + min-confidence: 1.0 + gosec: + excludes: + - G101 + - G104 + # - G204 + # - G304 + # - G404 + govet: + enable: + - shadow + revive: + confidence: 0.8 + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + # - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + # - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + - name: waitgroup-by-value + - name: unconditional-recursion + - name: struct-tag + # - name: string-format + - name: string-of-int + - name: range-val-address + - name: range-val-in-closure + - name: modifies-value-receiver + - name: modifies-parameter + - name: identical-branches + - name: get-return + # - name: flag-parameter + - name: early-return + - name: defer + - name: constant-logical-expr + # - name: confusing-naming + # - name: confusing-results + - name: bool-literal-in-expr + - name: atomic + depguard: + rules: + main: + list-mode: lax + deny: + - pkg: cosmossdk.io/errors + desc: Use the standard library instead + - pkg: github.com/gofrs/uuid + desc: Use github.com/google/uuid instead + - pkg: github.com/jackc/pgx3 + desc: Use github.com/jackc/pgx4 instead + - pkg: github.com/jackc/pgx5 + desc: Use github.com/jackc/pgx4 instead + - pkg: github.com/satori/go.uuid + desc: Use github.com/google/uuid instead + - pkg: github.com/test-go/testify/assert + desc: Use github.com/stretchr/testify/assert instead + - pkg: github.com/test-go/testify/mock + desc: Use github.com/stretchr/testify/mock instead + - pkg: github.com/test-go/testify/require + desc: Use github.com/stretchr/testify/require instead + # TODO https://smartcontract-it.atlassian.net/browse/BCI-2589 + # - pkg: go.uber.org/multierr + # desc: Use the standard library instead, for example https://pkg.go.dev/errors#Join + - pkg: gopkg.in/guregu/null.v1 + desc: Use gopkg.in/guregu/null.v4 instead + - pkg: gopkg.in/guregu/null.v2 + desc: Use gopkg.in/guregu/null.v4 instead + - pkg: gopkg.in/guregu/null.v3 + desc: Use gopkg.in/guregu/null.v4 instead + - pkg: github.com/go-gorm/gorm + desc: Use github.com/jmoiron/sqlx directly instead \ No newline at end of file diff --git a/Makefile b/Makefile index 217accd..5676625 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,10 @@ +.PHONY: gomods +gomods: ## Install gomods + go install github.com/jmank88/gomods@v0.1.3 + +.PHONY: gomodtidy +gomodtidy: gomods + gomods tidy .PHONY: mockery mockery: $(mockery) ## Install mockery. @@ -7,3 +14,13 @@ mockery: $(mockery) ## Install mockery. rm-mocked: grep -rl "^// Code generated by mockery" | grep .go$ | xargs -r rm +.PHONY: lint-workspace lint +GOLANGCI_LINT_VERSION := 1.62.2 +GOLANGCI_LINT_COMMON_OPTS := --max-issues-per-linter 0 --max-same-issues 0 +GOLANGCI_LINT_DIRECTORY := ./golangci-lint + +lint-workspace: + @./script/lint.sh $(GOLANGCI_LINT_VERSION) "$(GOLANGCI_LINT_COMMON_OPTS)" $(GOLANGCI_LINT_DIRECTORY) + +lint: + @./script/lint.sh $(GOLANGCI_LINT_VERSION) "$(GOLANGCI_LINT_COMMON_OPTS)" $(GOLANGCI_LINT_DIRECTORY) "--new-from-rev=origin/main" \ No newline at end of file From 9a74dfc8d58918eb5c70e2a6f8fd2a7b3ac38580 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 27 Nov 2024 14:21:50 -0500 Subject: [PATCH 05/11] tidy --- .../evm-chain-bindings/examples/basic/go.mod | 110 -- .../evm-chain-bindings/examples/basic/go.sum | 995 ------------------ 2 files changed, 1105 deletions(-) diff --git a/tools/evm-chain-bindings/examples/basic/go.mod b/tools/evm-chain-bindings/examples/basic/go.mod index 6c08850..86e7c9f 100644 --- a/tools/evm-chain-bindings/examples/basic/go.mod +++ b/tools/evm-chain-bindings/examples/basic/go.mod @@ -4,114 +4,4 @@ go 1.22.7 toolchain go1.22.8 -require github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241003164714-620c5129ca80 - -require ( - github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect - github.com/ethereum/go-ethereum v1.13.8 - github.com/smartcontractkit/chainlink-common v0.2.3-0.20241001140426-35be2fad06ec -) - -require ( - github.com/DataDog/zstd v1.5.2 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/VictoriaMetrics/fastcache v1.12.1 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.10.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cockroachdb/errors v1.10.0 // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect - github.com/cockroachdb/redact v1.1.5 // indirect - github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/consensys/bavard v0.1.13 // indirect - github.com/consensys/gnark-crypto v0.12.1 // indirect - github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect - github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/deckarep/golang-set/v2 v2.6.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect - github.com/ethereum/c-kzg-4844 v0.4.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect - github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect - github.com/getsentry/sentry-go v0.23.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-viper/mapstructure/v2 v2.1.0 // indirect - github.com/gofrs/flock v0.8.1 // indirect - github.com/gogo/protobuf v1.3.3 // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect - github.com/holiman/bloomfilter/v2 v2.0.3 // indirect - github.com/holiman/uint256 v1.2.4 // indirect - github.com/huin/goupnp v1.3.0 // indirect - github.com/jackc/pgio v1.0.0 // indirect - github.com/jackc/pgtype v1.14.0 // indirect - github.com/jackpal/go-nat-pmp v1.0.2 // indirect - github.com/jmoiron/sqlx v1.4.0 // indirect - github.com/jpillora/backoff v1.0.0 // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/lib/pq v1.10.9 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.20.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.59.1 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/rivo/uniseg v0.4.4 // indirect - github.com/robfig/cron/v3 v3.0.1 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/rs/cors v1.8.2 // indirect - github.com/shirou/gopsutil v3.21.11+incompatible // indirect - github.com/shopspring/decimal v1.4.0 // indirect - github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7 // indirect - github.com/status-im/keycard-go v0.2.0 // indirect - github.com/stretchr/testify v1.9.0 // indirect - github.com/supranational/blst v0.3.11 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/tidwall/gjson v1.17.0 // indirect - github.com/tidwall/match v1.1.1 // indirect - github.com/tidwall/pretty v1.2.0 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/tyler-smith/go-bip39 v1.1.0 // indirect - github.com/ugorji/go/codec v1.2.12 // indirect - github.com/x448/float16 v0.8.4 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.27.0 // indirect - golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - golang.org/x/tools v0.25.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/guregu/null.v4 v4.0.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - rsc.io/tmplfunc v0.0.3 // indirect -) - replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 diff --git a/tools/evm-chain-bindings/examples/basic/go.sum b/tools/evm-chain-bindings/examples/basic/go.sum index f971d80..e69de29 100644 --- a/tools/evm-chain-bindings/examples/basic/go.sum +++ b/tools/evm-chain-bindings/examples/basic/go.sum @@ -1,995 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -contrib.go.opencensus.io/exporter/stackdriver v0.13.5 h1:TNaexHK16gPUoc7uzELKOU7JULqccn1NDuqUxmxSqfo= -contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -cosmossdk.io/api v0.3.1 h1:NNiOclKRR0AOlO4KIqeaG6PS6kswOMhHD0ir0SscNXE= -cosmossdk.io/api v0.3.1/go.mod h1:DfHfMkiNA2Uhy8fj0JJlOCYOBp4eWUUJ1te5zBGNyIw= -cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= -cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= -cosmossdk.io/depinject v1.0.0-alpha.4 h1:PLNp8ZYAMPTUKyG9IK2hsbciDWqna2z1Wsl98okJopc= -cosmossdk.io/depinject v1.0.0-alpha.4/go.mod h1:HeDk7IkR5ckZ3lMGs/o91AVUc7E596vMaOmslGFM3yU= -cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= -cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= -cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= -cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= -github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= -github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= -github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= -github.com/CosmWasm/wasmd v0.40.1 h1:LxbO78t/6S8TkeQlUrJ0m5O87HtAwLx4RGHq3rdrOEU= -github.com/CosmWasm/wasmd v0.40.1/go.mod h1:6EOwnv7MpuFaEqxcUOdFV9i4yvrdOciaY6VQ1o7A3yg= -github.com/CosmWasm/wasmvm v1.2.4 h1:6OfeZuEcEH/9iqwrg2pkeVtDCkMoj9U6PpKtcrCyVrQ= -github.com/CosmWasm/wasmvm v1.2.4/go.mod h1:vW/E3h8j9xBQs9bCoijDuawKo9kCtxOaS8N8J7KFtkc= -github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= -github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/Depado/ginprom v1.8.0 h1:zaaibRLNI1dMiiuj1MKzatm8qrcHzikMlCc1anqOdyo= -github.com/Depado/ginprom v1.8.0/go.mod h1:XBaKzeNBqPF4vxJpNLincSQZeMDnZp1tIbU0FU0UKgg= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/NethermindEth/juno v0.3.1 h1:AW72LiAm9gqUeCVJWvepnZcTnpU4Vkl0KzPMxS+42FA= -github.com/NethermindEth/juno v0.3.1/go.mod h1:SGbTpgGaCsxhFsKOid7Ylnz//WZ8swtILk+NbHGsk/Q= -github.com/NethermindEth/starknet.go v0.7.1-0.20240401080518-34a506f3cfdb h1:Mv8SscePPyw2ju4igIJAjFgcq5zCQfjgbz53DwYu5mc= -github.com/NethermindEth/starknet.go v0.7.1-0.20240401080518-34a506f3cfdb/go.mod h1:gQkhWpAs9/QR6reZU2xoi1UIYlMS64FLTlh9CrgHH/Y= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= -github.com/XSAM/otelsql v0.27.0 h1:i9xtxtdcqXV768a5C6SoT/RkG+ue3JTOgkYInzlTOqs= -github.com/XSAM/otelsql v0.27.0/go.mod h1:0mFB3TvLa7NCuhm/2nU7/b2wEtsczkj8Rey8ygO7V+A= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= -github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= -github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= -github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= -github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= -github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= -github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= -github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= -github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= -github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= -github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc= -github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= -github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= -github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo= -github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU= -github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= -github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= -github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= -github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= -github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/cometbft/cometbft v0.37.5 h1:/U/TlgMh4NdnXNo+YU9T2NMCWyhXNDF34Mx582jlvq0= -github.com/cometbft/cometbft v0.37.5/go.mod h1:QC+mU0lBhKn8r9qvmnq53Dmf3DWBt4VtkcKw2C81wxY= -github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= -github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= -github.com/confio/ics23/go v0.9.0 h1:cWs+wdbS2KRPZezoaaj+qBleXgUk5WOQFMP3CQFGTr4= -github.com/confio/ics23/go v0.9.0/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= -github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= -github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= -github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= -github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= -github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= -github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.47.11 h1:0Qx7eORw0RJqPv+mvDuU8NQ1LV3nJJKJnPoYblWHolc= -github.com/cosmos/cosmos-sdk v0.47.11/go.mod h1:ADjORYzUQqQv/FxDi0H0K5gW/rAk1CiDR3ZKsExfJV0= -github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= -github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= -github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= -github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= -github.com/cosmos/iavl v0.20.1 h1:rM1kqeG3/HBT85vsZdoSNsehciqUQPWrR4BYmqE2+zg= -github.com/cosmos/iavl v0.20.1/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A= -github.com/cosmos/ibc-go/v7 v7.0.1 h1:NIBNRWjlOoFvFQu1ZlgwkaSeHO5avf4C1YQiWegt8jw= -github.com/cosmos/ibc-go/v7 v7.0.1/go.mod h1:vEaapV6nuLPQlS+g8IKmxMo6auPi0i7HMv1PhViht/E= -github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab h1:I9ialKTQo7248V827Bba4OuKPmk+FPzmTVHsLXaIJWw= -github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab/go.mod h1:2CwqasX5dSD7Hbp/9b6lhK6BwoBDCBldx7gPKRukR60= -github.com/cosmos/ledger-cosmos-go v0.12.4 h1:drvWt+GJP7Aiw550yeb3ON/zsrgW0jgh5saFCr7pDnw= -github.com/cosmos/ledger-cosmos-go v0.12.4/go.mod h1:fjfVWRf++Xkygt9wzCsjEBdjcf7wiiY35fv3ctT+k4M= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= -github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= -github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= -github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= -github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e h1:5jVSh2l/ho6ajWhSPNN84eHEdq3dp0T7+f6r3Tc6hsk= -github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e/go.mod h1:IJgIiGUARc4aOr4bOQ85klmjsShkEEfiRc6q/yBSfo8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= -github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70 h1:CuJS05R9jmNlUK8GOxrEELPbfXm0EuGh/30LjkjN5vo= -github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70/go.mod h1:EoK/8RFbMEteaCaz89uessDTnCWjbbcr+DXcBh4el5o= -github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= -github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/dominikbraun/graph v0.23.0 h1:TdZB4pPqCLFxYhdyMFb1TBdFxp8XLcJfTTBQucVPgCo= -github.com/dominikbraun/graph v0.23.0/go.mod h1:yOjYyogZLY1LSG9E33JWZJiq5k83Qy2C6POAuiViluc= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= -github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esote/minmaxheap v1.0.0 h1:rgA7StnXXpZG6qlM0S7pUmEv1KpWe32rYT4x8J8ntaA= -github.com/esote/minmaxheap v1.0.0/go.mod h1:Ln8+i7fS1k3PLgZI2JAo0iA1as95QnIYiGCrqSJ5FZk= -github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= -github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.13.8 h1:1od+thJel3tM52ZUNQwvpYOeRHlbkVFZ5S8fhi0Lgsg= -github.com/ethereum/go-ethereum v1.13.8/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= -github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= -github.com/gagliardetto/binary v0.7.7 h1:QZpT38+sgoPg+TIQjH94sLbl/vX+nlIRA37pEyOsjfY= -github.com/gagliardetto/binary v0.7.7/go.mod h1:mUuay5LL8wFVnIlecHakSZMvcdqfs+CsotR5n77kyjM= -github.com/gagliardetto/solana-go v1.8.4 h1:vmD/JmTlonyXGy39bAo0inMhmbdAwV7rXZtLDMZeodE= -github.com/gagliardetto/solana-go v1.8.4/go.mod h1:i+7aAyNDTHG0jK8GZIBSI4OVvDqkt2Qx+LklYclRNG8= -github.com/gagliardetto/treeout v0.1.4 h1:ozeYerrLCmCubo1TcIjFiOWTTGteOOHND1twdFpgwaw= -github.com/gagliardetto/treeout v0.1.4/go.mod h1:loUefvXTrlRG5rYmJmExNryyBRh8f89VZhmMOyCyqok= -github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= -github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= -github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= -github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 h1:Uc+IZ7gYqAf/rSGFplbWBSHaGolEQlNLgMgSE3ccnIQ= -github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9yhSRvsmYyZsshflcR6ePWYLql6UU1amW13IM= -github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= -github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= -github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk= -github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI= -github.com/gin-contrib/expvar v0.0.1 h1:IuU5ArEgihz50vG8Onrwz22kJr7Mcvgv9xSSpfU5g+w= -github.com/gin-contrib/expvar v0.0.1/go.mod h1:8o2CznfQi1JjktORdHr2/abg3wSV6OCnXh0yGypvvVw= -github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE= -github.com/gin-contrib/sessions v0.0.5/go.mod h1:vYAuaUPqie3WUSsft6HUlCjlwwoJQs97miaG2+7neKY= -github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 h1:Z9J0PVIt1PuibOShaOw1jH8hUYz+Ak8NLsR/GI0Hv5I= -github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4/go.mod h1:CEPcgZiz8998l9E8fDm16h8UfHRL7b+5oG0j/0koeVw= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= -github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= -github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= -github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= -github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= -github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A= -github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= -github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= -github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao= -github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= -github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= -github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= -github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/go-webauthn/webauthn v0.9.4 h1:YxvHSqgUyc5AK2pZbqkWWR55qKeDPhP8zLDr6lpIc2g= -github.com/go-webauthn/webauthn v0.9.4/go.mod h1:LqupCtzSef38FcxzaklmOn7AykGKhAhr9xlRbdbgnTw= -github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0= -github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk= -github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= -github.com/gorilla/sessions v1.2.2 h1:lqzMYz6bOfvn2WriPUjNByzeXIlVzURcPmgMczkmTjY= -github.com/gorilla/sessions v1.2.2/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/grafana/pyroscope-go v1.1.1 h1:PQoUU9oWtO3ve/fgIiklYuGilvsm8qaGhlY4Vw6MAcQ= -github.com/grafana/pyroscope-go v1.1.1/go.mod h1:Mw26jU7jsL/KStNSGGuuVYdUq7Qghem5P8aXYXSXG88= -github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= -github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= -github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug= -github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4= -github.com/graph-gophers/graphql-go v1.5.0 h1:fDqblo50TEpD0LY7RXk/LFVYEVqo3+tXMNMPSVXA1yc= -github.com/graph-gophers/graphql-go v1.5.0/go.mod h1:YtmJZDLbF1YYNrlNAuiO5zAStUWc3XZT07iGsVqe1Os= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA= -github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= -github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= -github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= -github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= -github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= -github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= -github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= -github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= -github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-envparse v0.1.0 h1:bE++6bhIsNCPLvgDZkYqo3nA+/PFI51pkrHdmPSDFPY= -github.com/hashicorp/go-envparse v0.1.0/go.mod h1:OHheN1GoygLlAkTlXLXvAdnXdZxy8JUweQ1rAXx1xnc= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-plugin v1.6.2-0.20240829161738-06afb6d7ae99 h1:OSQYEsRT3tRttZkk6zyC3aAaliwd7Loi/KgXgXxGtwA= -github.com/hashicorp/go-plugin v1.6.2-0.20240829161738-06afb6d7ae99/go.mod h1:CkgLQ5CZqNmdL9U9JzM532t8ZiYQ35+pj3b1FD37R0Q= -github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= -github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= -github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= -github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= -github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= -github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= -github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= -github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= -github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= -github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= -github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= -github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= -github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= -github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU= -github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= -github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= -github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= -github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= -github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= -github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= -github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a h1:dHCfT5W7gghzPtfsW488uPmEOm85wewI+ypUwibyTdU= -github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= -github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= -github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= -github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U= -github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= -github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= -github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= -github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= -github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= -github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 h1:mPMvm6X6tf4w8y7j9YIt6V9jfWhL6QlbEc7CCmeQlWk= -github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1/go.mod h1:ye2e/VUEtE2BHE+G/QcKkcLQVAEJoYRFj5VUOQatCRE= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= -github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= -github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/pressly/goose/v3 v3.21.1 h1:5SSAKKWej8LVVzNLuT6KIvP1eFDuPvxa+B6H0w78buQ= -github.com/pressly/goose/v3 v3.21.1/go.mod h1:sqthmzV8PitchEkjecFJII//l43dLOCzfWh8pHEe+vE= -github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= -github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.48.1 h1:CTszphSNTXkuCG6O0IfpKdHcJkvvnAAE1GbELKS+NFk= -github.com/prometheus/prometheus v0.48.1/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= -github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= -github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/scylladb/go-reflectx v1.0.1 h1:b917wZM7189pZdlND9PbIJ6NQxfDPfBvUaQ7cjj1iZQ= -github.com/scylladb/go-reflectx v1.0.1/go.mod h1:rWnOfDIRWBGN0miMLIcoPt/Dhi2doCMZqwMCJ3KupFc= -github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= -github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= -github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= -github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v3 v3.24.3 h1:eoUGJSmdfLzJ3mxIhmOAhgKEKgQkeOwKpz1NbhVnuPE= -github.com/shirou/gopsutil/v3 v3.24.3/go.mod h1:JpND7O217xa72ewWz9zN2eIIkPWsDN/3pl0H8Qt0uwg= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= -github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartcontractkit/chain-selectors v1.0.23 h1:D2Eaex4Cw/O7Lg3tX6WklOqnjjIQAEBnutCtksPzVDY= -github.com/smartcontractkit/chain-selectors v1.0.23/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= -github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= -github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= -github.com/smartcontractkit/chainlink-ccip v0.0.0-20241002064705-34d7f9b7e26a h1:zHMknn5+VAOMzMk3LXT+GfCD17h252jTKreFyQwRaB0= -github.com/smartcontractkit/chainlink-ccip v0.0.0-20241002064705-34d7f9b7e26a/go.mod h1:/nGkIe25kgtr+l6y30VH+aTVaxu0NjIEEEhtV1TDlaE= -github.com/smartcontractkit/chainlink-common v0.2.3-0.20241001140426-35be2fad06ec h1:zmLmKLCpoV73AaGU8YBc86YPLJBzoZFv9lPC677Eqcs= -github.com/smartcontractkit/chainlink-common v0.2.3-0.20241001140426-35be2fad06ec/go.mod h1:F6WUS6N4mP5ScwpwyTyAJc9/vjR+GXbMCRUOVekQi1g= -github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240911175228-daf2600bb7b7 h1:lTGIOQYLk1Ufn++X/AvZnt6VOcuhste5yp+C157No/Q= -github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240911175228-daf2600bb7b7/go.mod h1:BMYE1vC/pGmdFSsOJdPrAA0/4gZ0Xo0SxTMdGspBtRo= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240916152957-433914114bd2 h1:yRk4ektpx/UxwarqAfgxUXLrsYXlaNeP1NOwzHGrK2Q= -github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240916152957-433914114bd2/go.mod h1:rNhNSrrRMvkgAm5SA6bNTdh2340bTQQZdUVNtZ2o2bk= -github.com/smartcontractkit/chainlink-feeds v0.0.0-20240910155501-42f20443189f h1:p4p3jBT91EQyLuAMvHD+zNJsuAYI/QjJbzuGUJ7wIgg= -github.com/smartcontractkit/chainlink-feeds v0.0.0-20240910155501-42f20443189f/go.mod h1:FLlWBt2hwiMVgt9AcSo6wBJYIRd/nsc8ENbV1Wir1bw= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240927143737-7e527aa85bff h1:piMugtrRlbVdcC6xZF37me686eS1YwpLQ0kN2v2b9YE= -github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240927143737-7e527aa85bff/go.mod h1:5jD47oCERRQ4eGi0iNdk9ZV5HMEdolfQwHpUX1+Ix4s= -github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240911194142-506bc469d8ae h1:d+B8y2Nd/PrnPMNoaSPn3eDgUgxcVcIqAxGrvYu/gGw= -github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240911194142-506bc469d8ae/go.mod h1:ec/a20UZ7YRK4oxJcnTBFzp1+DBcJcwqEaerUMsktMs= -github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241003164714-620c5129ca80 h1:xC+uXL2o/N6n4C46Iq5/SN2Q1Cxay/HTgDAY8pLgJ7g= -github.com/smartcontractkit/chainlink/v2 v2.14.0-mercury-20240807.0.20241003164714-620c5129ca80/go.mod h1:9IIWlhuIrHXCcHaRTcYVOZ5fDiQR9xrM9GzgydQqx8U= -github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12ijqMM9tvYVEm+nR826WsrNi6zCKpwBhuApq127wHs= -github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7/go.mod h1:FX7/bVdoep147QQhsOPkYsPEXhGZjeYx6lBSaSXtZOA= -github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7 h1:e38V5FYE7DA1JfKXeD5Buo/7lczALuVXlJ8YNTAUxcw= -github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7/go.mod h1:fb1ZDVXACvu4frX3APHZaEBp0xi1DIm34DcA0CwTsZM= -github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE= -github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg= -github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ= -github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:G5Sd/yzHWf26rQ+X0nG9E0buKPqRGPMJAfk2gwCzOOw= -github.com/smartcontractkit/wsrpc v0.8.2 h1:XB/xcn/MMseHW+8JE8+a/rceA86ck7Ur6cEa9LiUC8M= -github.com/smartcontractkit/wsrpc v0.8.2/go.mod h1:2u/wfnhl5R4RlSXseN4n6HHIWk8w1Am3AT6gWftQbNg= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= -github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= -github.com/streamingfast/logging v0.0.0-20220405224725-2755dab2ce75 h1:ZqpS7rAhhKD7S7DnrpEdrnW1/gZcv82ytpMviovkli4= -github.com/streamingfast/logging v0.0.0-20220405224725-2755dab2ce75/go.mod h1:VlduQ80JcGJSargkRU4Sg9Xo63wZD/l8A5NC/Uo1/uU= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= -github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= -github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= -github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 h1:3SNcvBmEPE1YlB1JpVZouslJpI3GBNoiqW7+wb0Rz7w= -github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= -github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= -github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= -github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a h1:YuO+afVc3eqrjiCUizNCxI53bl/BnPiVwXqLzqYTqgU= -github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a/go.mod h1:/sfW47zCZp9FrtGcWyo1VjbgDaodxX9ovZvgLb/MxaA= -github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= -github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= -github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= -github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= -github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= -github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= -github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= -github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= -github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA= -github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI= -github.com/unrolled/secure v1.13.0 h1:sdr3Phw2+f8Px8HE5sd1EHdj1aV3yUwed/uZXChLFsk= -github.com/unrolled/secure v1.13.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= -github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= -github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= -github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= -github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= -github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= -github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= -github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= -github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= -github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= -go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= -go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= -go.dedis.ch/kyber/v3 v3.1.0 h1:ghu+kiRgM5JyD9TJ0hTIxTLQlJBR/ehjWvWwYW3XsC0= -go.dedis.ch/kyber/v3 v3.1.0/go.mod h1:kXy7p3STAurkADD+/aZcsznZGKVHEqbtmdIzvPfrs1U= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= -go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.49.0 h1:1f31+6grJmV3X4lxcEvUy13i5/kfDw1nJZwhd8mA4tg= -go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.49.0/go.mod h1:1P/02zM3OwkX9uki+Wmxw3a5GVb6KUXRsa7m7bOC9Fg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240823153156-2a54df7bffb9 h1:UiRNKd1OgqsLbFwE+wkAWTdiAxXtCBqKIHeBIse4FUA= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240823153156-2a54df7bffb9/go.mod h1:eqZlW3pJWhjyexnDPrdQxix1pn0wwhI4AO4GKpP/bMI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0 h1:0MH3f8lZrflbUWXVxyBg/zviDFdGE062uKh5+fu8Vv0= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0/go.mod h1:Vh68vYiHY5mPdekTr0ox0sALsqjoVy0w3Os278yX5SQ= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 h1:BJee2iLkfRfl9lc7aFmBwkWxY/RI1RDdXepSF6y8TPE= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0/go.mod h1:DIzlHs3DRscCIBU3Y9YSzPfScwnYnzfnCd4g8zA7bZc= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/log v0.4.0 h1:/vZ+3Utqh18e8TPjuc3ecg284078KWrR8BRz+PQAj3o= -go.opentelemetry.io/otel/log v0.4.0/go.mod h1:DhGnQvky7pHy82MIRV43iXh3FlKN8UUKftn0KbLOq6I= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/sdk/log v0.4.0 h1:1mMI22L82zLqf6KtkjrRy5BbagOTWdJsqMY/HSqILAA= -go.opentelemetry.io/otel/sdk/log v0.4.0/go.mod h1:AYJ9FVF0hNOgAVzUG/ybg/QttnXhUePWAupmCqtdESo= -go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= -go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/ratelimit v0.3.0 h1:IdZd9wqvFXnvLvSEBo0KPcGfkoBGNkpTHlrE3Rcjkjw= -go.uber.org/ratelimit v0.3.0/go.mod h1:So5LG7CV1zWpY1sHe+DXTJqQvOx+FFPFaAs2SnoyBaI= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= -golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= -golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240711142825-46eb208f015d h1:/hmn0Ku5kWij/kjGsrcJeC1T/MrJi2iNWwgAqrihFwc= -google.golang.org/genproto v0.0.0-20240711142825-46eb208f015d/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= -google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= -google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/guregu/null.v4 v4.0.0 h1:1Wm3S1WEA2I26Kq+6vcW+w0gcDo44YKYD7YIEJNHDjg= -gopkg.in/guregu/null.v4 v4.0.0/go.mod h1:YoQhUrADuG3i9WqesrCmpNRwm1ypAgSHYqoOcTu/JrI= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= -gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= -pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= From 714c95e3138d17558586cd3a255193a38a557a15 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 27 Nov 2024 14:33:47 -0500 Subject: [PATCH 06/11] Update test.yml --- .github/workflows/test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ed4b299..9b39eba 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -55,7 +55,6 @@ jobs: - name: Ensure "make generate" has been run run: | make rm-mocked - make rm-builders make generate git add --all git diff --stat --cached --exit-code \ No newline at end of file From f386d4418eb612126feece666b887d91c7253481 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 27 Nov 2024 14:43:01 -0500 Subject: [PATCH 07/11] Add generate --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index 5676625..282ed98 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,10 @@ gomodtidy: gomods mockery: $(mockery) ## Install mockery. go install github.com/vektra/mockery/v2@v2.46.3 +.PHONY: generate +generate: mockery + mockery + .PHONY: rm-mocked rm-mocked: grep -rl "^// Code generated by mockery" | grep .go$ | xargs -r rm From 76d9596e39c42c3c0b950a3c7155733fe9bc17fa Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 5 Dec 2024 08:59:44 -0500 Subject: [PATCH 08/11] Remove /types and /utils --- .mockery.yaml | 3 - multinode/mock_hashable_test.go | 18 - multinode/mock_node_selector_test.go | 15 +- multinode/mock_node_test.go | 31 +- multinode/mock_rpc_client_test.go | 65 +- multinode/mock_send_only_client_test.go | 13 +- multinode/mock_send_only_node_test.go | 21 +- multinode/mock_subscription_test.go | 111 ++++ multinode/multi_node.go | 6 +- multinode/multi_node_test.go | 118 ++-- multinode/node.go | 10 +- multinode/node_fsm_test.go | 18 +- multinode/node_lifecycle.go | 17 +- multinode/node_lifecycle_test.go | 254 ++++---- multinode/node_selector.go | 6 +- multinode/node_selector_highest_head.go | 6 +- multinode/node_selector_highest_head_test.go | 60 +- multinode/node_selector_priority_level.go | 14 +- .../node_selector_priority_level_test.go | 10 +- multinode/node_selector_round_robin.go | 6 +- multinode/node_selector_round_robin_test.go | 16 +- multinode/node_selector_test.go | 4 +- multinode/node_selector_total_difficulty.go | 6 +- .../node_selector_total_difficulty_test.go | 58 +- multinode/node_test.go | 13 +- multinode/poller.go | 4 +- multinode/send_only_node.go | 10 +- multinode/send_only_node_lifecycle.go | 4 +- multinode/send_only_node_test.go | 30 +- multinode/transaction_sender.go | 6 +- multinode/transaction_sender_test.go | 75 ++- multinode/types.go | 37 +- multinode/utils.go | 29 + types/chain.go | 32 - types/hashable.go | 12 - types/head.go | 45 -- types/mocks/head.go | 601 ------------------ types/mocks/subscription.go | 111 ---- types/receipt.go | 14 - types/subscription.go | 16 - types/test_utils.go | 16 - utils/utils.go | 35 - 42 files changed, 593 insertions(+), 1383 deletions(-) delete mode 100644 multinode/mock_hashable_test.go create mode 100644 multinode/mock_subscription_test.go create mode 100644 multinode/utils.go delete mode 100644 types/chain.go delete mode 100644 types/hashable.go delete mode 100644 types/head.go delete mode 100644 types/mocks/head.go delete mode 100644 types/mocks/subscription.go delete mode 100644 types/receipt.go delete mode 100644 types/subscription.go delete mode 100644 types/test_utils.go delete mode 100644 utils/utils.go diff --git a/.mockery.yaml b/.mockery.yaml index c71f222..84e8332 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -17,7 +17,4 @@ packages: RPCClient: Head: PoolChainInfoProvider: - github.com/smartcontractkit/chainlink-framework/types: - interfaces: - Head: Subscription: diff --git a/multinode/mock_hashable_test.go b/multinode/mock_hashable_test.go deleted file mode 100644 index 7a42d77..0000000 --- a/multinode/mock_hashable_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package multinode - -import "cmp" - -// Hashable - simple implementation of types.Hashable interface to be used as concrete type in tests -type Hashable string - -func (h Hashable) Cmp(c Hashable) int { - return cmp.Compare(h, c) -} - -func (h Hashable) String() string { - return string(h) -} - -func (h Hashable) Bytes() []byte { - return []byte(h) -} diff --git a/multinode/mock_node_selector_test.go b/multinode/mock_node_selector_test.go index 92c5364..6613b51 100644 --- a/multinode/mock_node_selector_test.go +++ b/multinode/mock_node_selector_test.go @@ -2,17 +2,14 @@ package multinode -import ( - types "github.com/smartcontractkit/chainlink-framework/types" - mock "github.com/stretchr/testify/mock" -) +import mock "github.com/stretchr/testify/mock" // mockNodeSelector is an autogenerated mock type for the NodeSelector type -type mockNodeSelector[CHAIN_ID types.ID, RPC any] struct { +type mockNodeSelector[CHAIN_ID ID, RPC any] struct { mock.Mock } -type mockNodeSelector_Expecter[CHAIN_ID types.ID, RPC any] struct { +type mockNodeSelector_Expecter[CHAIN_ID ID, RPC any] struct { mock *mock.Mock } @@ -39,7 +36,7 @@ func (_m *mockNodeSelector[CHAIN_ID, RPC]) Name() string { } // mockNodeSelector_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' -type mockNodeSelector_Name_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNodeSelector_Name_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -86,7 +83,7 @@ func (_m *mockNodeSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { } // mockNodeSelector_Select_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Select' -type mockNodeSelector_Select_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNodeSelector_Select_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -114,7 +111,7 @@ func (_c *mockNodeSelector_Select_Call[CHAIN_ID, RPC]) RunAndReturn(run func() N // newMockNodeSelector creates a new instance of mockNodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockNodeSelector[CHAIN_ID types.ID, RPC any](t interface { +func newMockNodeSelector[CHAIN_ID ID, RPC any](t interface { mock.TestingT Cleanup(func()) }) *mockNodeSelector[CHAIN_ID, RPC] { diff --git a/multinode/mock_node_test.go b/multinode/mock_node_test.go index d9a1ca3..3924591 100644 --- a/multinode/mock_node_test.go +++ b/multinode/mock_node_test.go @@ -5,16 +5,15 @@ package multinode import ( context "context" - types "github.com/smartcontractkit/chainlink-framework/types" mock "github.com/stretchr/testify/mock" ) // mockNode is an autogenerated mock type for the Node type -type mockNode[CHAIN_ID types.ID, RPC any] struct { +type mockNode[CHAIN_ID ID, RPC any] struct { mock.Mock } -type mockNode_Expecter[CHAIN_ID types.ID, RPC any] struct { +type mockNode_Expecter[CHAIN_ID ID, RPC any] struct { mock *mock.Mock } @@ -41,7 +40,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) Close() error { } // mockNode_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' -type mockNode_Close_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_Close_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -86,7 +85,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID { } // mockNode_ConfiguredChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ConfiguredChainID' -type mockNode_ConfiguredChainID_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_ConfiguredChainID_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -131,7 +130,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) HighestUserObservations() ChainInfo { } // mockNode_HighestUserObservations_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HighestUserObservations' -type mockNode_HighestUserObservations_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_HighestUserObservations_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -176,7 +175,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) Name() string { } // mockNode_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' -type mockNode_Name_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_Name_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -221,7 +220,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) Order() int32 { } // mockNode_Order_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Order' -type mockNode_Order_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_Order_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -266,7 +265,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) RPC() RPC { } // mockNode_RPC_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RPC' -type mockNode_RPC_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_RPC_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -298,7 +297,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) SetPoolChainInfoProvider(_a0 PoolChainInfoPro } // mockNode_SetPoolChainInfoProvider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetPoolChainInfoProvider' -type mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_SetPoolChainInfoProvider_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -344,7 +343,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) Start(_a0 context.Context) error { } // mockNode_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' -type mockNode_Start_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_Start_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -390,7 +389,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) State() nodeState { } // mockNode_State_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'State' -type mockNode_State_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_State_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -445,7 +444,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) StateAndLatest() (nodeState, ChainInfo) { } // mockNode_StateAndLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StateAndLatest' -type mockNode_StateAndLatest_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_StateAndLatest_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -490,7 +489,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) String() string { } // mockNode_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' -type mockNode_String_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_String_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -522,7 +521,7 @@ func (_m *mockNode[CHAIN_ID, RPC]) UnsubscribeAllExceptAliveLoop() { } // mockNode_UnsubscribeAllExceptAliveLoop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnsubscribeAllExceptAliveLoop' -type mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID types.ID, RPC any] struct { +type mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -550,7 +549,7 @@ func (_c *mockNode_UnsubscribeAllExceptAliveLoop_Call[CHAIN_ID, RPC]) RunAndRetu // newMockNode creates a new instance of mockNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockNode[CHAIN_ID types.ID, RPC any](t interface { +func newMockNode[CHAIN_ID ID, RPC any](t interface { mock.TestingT Cleanup(func()) }) *mockNode[CHAIN_ID, RPC] { diff --git a/multinode/mock_rpc_client_test.go b/multinode/mock_rpc_client_test.go index c1b927d..0dbdc1a 100644 --- a/multinode/mock_rpc_client_test.go +++ b/multinode/mock_rpc_client_test.go @@ -5,16 +5,15 @@ package multinode import ( context "context" - types "github.com/smartcontractkit/chainlink-framework/types" mock "github.com/stretchr/testify/mock" ) // mockRPCClient is an autogenerated mock type for the RPCClient type -type mockRPCClient[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient[CHAIN_ID ID, HEAD Head] struct { mock.Mock } -type mockRPCClient_Expecter[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_Expecter[CHAIN_ID ID, HEAD Head] struct { mock *mock.Mock } @@ -51,7 +50,7 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, } // mockRPCClient_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' -type mockRPCClient_ChainID_Call[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_ChainID_Call[CHAIN_ID ID, HEAD Head] struct { *mock.Call } @@ -84,7 +83,7 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) Close() { } // mockRPCClient_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' -type mockRPCClient_Close_Call[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_Close_Call[CHAIN_ID ID, HEAD Head] struct { *mock.Call } @@ -129,7 +128,7 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { } // mockRPCClient_Dial_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Dial' -type mockRPCClient_Dial_Call[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_Dial_Call[CHAIN_ID ID, HEAD Head] struct { *mock.Call } @@ -185,7 +184,7 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) GetInterceptedChainInfo() (ChainInfo, C } // mockRPCClient_GetInterceptedChainInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInterceptedChainInfo' -type mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_GetInterceptedChainInfo_Call[CHAIN_ID ID, HEAD Head] struct { *mock.Call } @@ -240,7 +239,7 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, e } // mockRPCClient_IsSyncing_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsSyncing' -type mockRPCClient_IsSyncing_Call[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_IsSyncing_Call[CHAIN_ID ID, HEAD Head] struct { *mock.Call } @@ -286,7 +285,7 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) Ping(_a0 context.Context) error { } // mockRPCClient_Ping_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Ping' -type mockRPCClient_Ping_Call[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_Ping_Call[CHAIN_ID ID, HEAD Head] struct { *mock.Call } @@ -314,7 +313,7 @@ func (_c *mockRPCClient_Ping_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context } // SubscribeToFinalizedHeads provides a mock function with given fields: ctx -func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, Subscription, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -322,9 +321,9 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.C } var r0 <-chan HEAD - var r1 types.Subscription + var r1 Subscription var r2 error - if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, Subscription, error)); ok { return rf(ctx) } if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { @@ -335,11 +334,11 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.C } } - if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + if rf, ok := ret.Get(1).(func(context.Context) Subscription); ok { r1 = rf(ctx) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).(types.Subscription) + r1 = ret.Get(1).(Subscription) } } @@ -353,7 +352,7 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.C } // mockRPCClient_SubscribeToFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToFinalizedHeads' -type mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID ID, HEAD Head] struct { *mock.Call } @@ -370,18 +369,18 @@ func (_c *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) Run(run return _c } -func (_c *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) Return(_a0 <-chan HEAD, _a1 types.Subscription, _a2 error) *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { +func (_c *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) Return(_a0 <-chan HEAD, _a1 Subscription, _a2 error) *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { +func (_c *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, Subscription, error)) *mockRPCClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { _c.Call.Return(run) return _c } // SubscribeToHeads provides a mock function with given fields: ctx -func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, Subscription, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -389,9 +388,9 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) ( } var r0 <-chan HEAD - var r1 types.Subscription + var r1 Subscription var r2 error - if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, Subscription, error)); ok { return rf(ctx) } if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { @@ -402,11 +401,11 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) ( } } - if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + if rf, ok := ret.Get(1).(func(context.Context) Subscription); ok { r1 = rf(ctx) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).(types.Subscription) + r1 = ret.Get(1).(Subscription) } } @@ -420,7 +419,7 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) ( } // mockRPCClient_SubscribeToHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToHeads' -type mockRPCClient_SubscribeToHeads_Call[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_SubscribeToHeads_Call[CHAIN_ID ID, HEAD Head] struct { *mock.Call } @@ -437,18 +436,18 @@ func (_c *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) Run(run func(ctx return _c } -func (_c *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) Return(_a0 <-chan HEAD, _a1 types.Subscription, _a2 error) *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { +func (_c *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) Return(_a0 <-chan HEAD, _a1 Subscription, _a2 error) *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { +func (_c *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, Subscription, error)) *mockRPCClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { _c.Call.Return(run) return _c } // UnsubscribeAllExcept provides a mock function with given fields: subs -func (_m *mockRPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subscription) { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...Subscription) { _va := make([]interface{}, len(subs)) for _i := range subs { _va[_i] = subs[_i] @@ -459,23 +458,23 @@ func (_m *mockRPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subs } // mockRPCClient_UnsubscribeAllExcept_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnsubscribeAllExcept' -type mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID types.ID, HEAD Head] struct { +type mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID ID, HEAD Head] struct { *mock.Call } // UnsubscribeAllExcept is a helper method to define mock.On call -// - subs ...types.Subscription +// - subs ...Subscription func (_e *mockRPCClient_Expecter[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...interface{}) *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD] { return &mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("UnsubscribeAllExcept", append([]interface{}{}, subs...)...)} } -func (_c *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]) Run(run func(subs ...types.Subscription)) *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD] { +func (_c *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]) Run(run func(subs ...Subscription)) *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD] { _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]types.Subscription, len(args)-0) + variadicArgs := make([]Subscription, len(args)-0) for i, a := range args[0:] { if a != nil { - variadicArgs[i] = a.(types.Subscription) + variadicArgs[i] = a.(Subscription) } } run(variadicArgs...) @@ -488,14 +487,14 @@ func (_c *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]) Return() *moc return _c } -func (_c *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(...types.Subscription)) *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD] { +func (_c *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(...Subscription)) *mockRPCClient_UnsubscribeAllExcept_Call[CHAIN_ID, HEAD] { _c.Call.Return(run) return _c } // newMockRPCClient creates a new instance of mockRPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockRPCClient[CHAIN_ID types.ID, HEAD Head](t interface { +func newMockRPCClient[CHAIN_ID ID, HEAD Head](t interface { mock.TestingT Cleanup(func()) }) *mockRPCClient[CHAIN_ID, HEAD] { diff --git a/multinode/mock_send_only_client_test.go b/multinode/mock_send_only_client_test.go index 7eb6e9f..5c08506 100644 --- a/multinode/mock_send_only_client_test.go +++ b/multinode/mock_send_only_client_test.go @@ -5,16 +5,15 @@ package multinode import ( context "context" - types "github.com/smartcontractkit/chainlink-framework/types" mock "github.com/stretchr/testify/mock" ) // mockSendOnlyClient is an autogenerated mock type for the sendOnlyClient type -type mockSendOnlyClient[CHAIN_ID types.ID] struct { +type mockSendOnlyClient[CHAIN_ID ID] struct { mock.Mock } -type mockSendOnlyClient_Expecter[CHAIN_ID types.ID] struct { +type mockSendOnlyClient_Expecter[CHAIN_ID ID] struct { mock *mock.Mock } @@ -51,7 +50,7 @@ func (_m *mockSendOnlyClient[CHAIN_ID]) ChainID(_a0 context.Context) (CHAIN_ID, } // mockSendOnlyClient_ChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainID' -type mockSendOnlyClient_ChainID_Call[CHAIN_ID types.ID] struct { +type mockSendOnlyClient_ChainID_Call[CHAIN_ID ID] struct { *mock.Call } @@ -84,7 +83,7 @@ func (_m *mockSendOnlyClient[CHAIN_ID]) Close() { } // mockSendOnlyClient_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' -type mockSendOnlyClient_Close_Call[CHAIN_ID types.ID] struct { +type mockSendOnlyClient_Close_Call[CHAIN_ID ID] struct { *mock.Call } @@ -129,7 +128,7 @@ func (_m *mockSendOnlyClient[CHAIN_ID]) Dial(ctx context.Context) error { } // mockSendOnlyClient_Dial_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Dial' -type mockSendOnlyClient_Dial_Call[CHAIN_ID types.ID] struct { +type mockSendOnlyClient_Dial_Call[CHAIN_ID ID] struct { *mock.Call } @@ -158,7 +157,7 @@ func (_c *mockSendOnlyClient_Dial_Call[CHAIN_ID]) RunAndReturn(run func(context. // newMockSendOnlyClient creates a new instance of mockSendOnlyClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockSendOnlyClient[CHAIN_ID types.ID](t interface { +func newMockSendOnlyClient[CHAIN_ID ID](t interface { mock.TestingT Cleanup(func()) }) *mockSendOnlyClient[CHAIN_ID] { diff --git a/multinode/mock_send_only_node_test.go b/multinode/mock_send_only_node_test.go index f277a12..e76b053 100644 --- a/multinode/mock_send_only_node_test.go +++ b/multinode/mock_send_only_node_test.go @@ -5,16 +5,15 @@ package multinode import ( context "context" - types "github.com/smartcontractkit/chainlink-framework/types" mock "github.com/stretchr/testify/mock" ) // mockSendOnlyNode is an autogenerated mock type for the SendOnlyNode type -type mockSendOnlyNode[CHAIN_ID types.ID, RPC any] struct { +type mockSendOnlyNode[CHAIN_ID ID, RPC any] struct { mock.Mock } -type mockSendOnlyNode_Expecter[CHAIN_ID types.ID, RPC any] struct { +type mockSendOnlyNode_Expecter[CHAIN_ID ID, RPC any] struct { mock *mock.Mock } @@ -41,7 +40,7 @@ func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Close() error { } // mockSendOnlyNode_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' -type mockSendOnlyNode_Close_Call[CHAIN_ID types.ID, RPC any] struct { +type mockSendOnlyNode_Close_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -86,7 +85,7 @@ func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID { } // mockSendOnlyNode_ConfiguredChainID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ConfiguredChainID' -type mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID types.ID, RPC any] struct { +type mockSendOnlyNode_ConfiguredChainID_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -131,7 +130,7 @@ func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Name() string { } // mockSendOnlyNode_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' -type mockSendOnlyNode_Name_Call[CHAIN_ID types.ID, RPC any] struct { +type mockSendOnlyNode_Name_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -176,7 +175,7 @@ func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) RPC() RPC { } // mockSendOnlyNode_RPC_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RPC' -type mockSendOnlyNode_RPC_Call[CHAIN_ID types.ID, RPC any] struct { +type mockSendOnlyNode_RPC_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -221,7 +220,7 @@ func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Start(_a0 context.Context) error { } // mockSendOnlyNode_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' -type mockSendOnlyNode_Start_Call[CHAIN_ID types.ID, RPC any] struct { +type mockSendOnlyNode_Start_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -267,7 +266,7 @@ func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) State() nodeState { } // mockSendOnlyNode_State_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'State' -type mockSendOnlyNode_State_Call[CHAIN_ID types.ID, RPC any] struct { +type mockSendOnlyNode_State_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -312,7 +311,7 @@ func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) String() string { } // mockSendOnlyNode_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' -type mockSendOnlyNode_String_Call[CHAIN_ID types.ID, RPC any] struct { +type mockSendOnlyNode_String_Call[CHAIN_ID ID, RPC any] struct { *mock.Call } @@ -340,7 +339,7 @@ func (_c *mockSendOnlyNode_String_Call[CHAIN_ID, RPC]) RunAndReturn(run func() s // newMockSendOnlyNode creates a new instance of mockSendOnlyNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockSendOnlyNode[CHAIN_ID types.ID, RPC any](t interface { +func newMockSendOnlyNode[CHAIN_ID ID, RPC any](t interface { mock.TestingT Cleanup(func()) }) *mockSendOnlyNode[CHAIN_ID, RPC] { diff --git a/multinode/mock_subscription_test.go b/multinode/mock_subscription_test.go new file mode 100644 index 0000000..ccb9017 --- /dev/null +++ b/multinode/mock_subscription_test.go @@ -0,0 +1,111 @@ +// Code generated by mockery v2.46.3. DO NOT EDIT. + +package multinode + +import mock "github.com/stretchr/testify/mock" + +// mockSubscription is an autogenerated mock type for the Subscription type +type mockSubscription struct { + mock.Mock +} + +type mockSubscription_Expecter struct { + mock *mock.Mock +} + +func (_m *mockSubscription) EXPECT() *mockSubscription_Expecter { + return &mockSubscription_Expecter{mock: &_m.Mock} +} + +// Err provides a mock function with given fields: +func (_m *mockSubscription) Err() <-chan error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Err") + } + + var r0 <-chan error + if rf, ok := ret.Get(0).(func() <-chan error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan error) + } + } + + return r0 +} + +// mockSubscription_Err_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Err' +type mockSubscription_Err_Call struct { + *mock.Call +} + +// Err is a helper method to define mock.On call +func (_e *mockSubscription_Expecter) Err() *mockSubscription_Err_Call { + return &mockSubscription_Err_Call{Call: _e.mock.On("Err")} +} + +func (_c *mockSubscription_Err_Call) Run(run func()) *mockSubscription_Err_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockSubscription_Err_Call) Return(_a0 <-chan error) *mockSubscription_Err_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *mockSubscription_Err_Call) RunAndReturn(run func() <-chan error) *mockSubscription_Err_Call { + _c.Call.Return(run) + return _c +} + +// Unsubscribe provides a mock function with given fields: +func (_m *mockSubscription) Unsubscribe() { + _m.Called() +} + +// mockSubscription_Unsubscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Unsubscribe' +type mockSubscription_Unsubscribe_Call struct { + *mock.Call +} + +// Unsubscribe is a helper method to define mock.On call +func (_e *mockSubscription_Expecter) Unsubscribe() *mockSubscription_Unsubscribe_Call { + return &mockSubscription_Unsubscribe_Call{Call: _e.mock.On("Unsubscribe")} +} + +func (_c *mockSubscription_Unsubscribe_Call) Run(run func()) *mockSubscription_Unsubscribe_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *mockSubscription_Unsubscribe_Call) Return() *mockSubscription_Unsubscribe_Call { + _c.Call.Return() + return _c +} + +func (_c *mockSubscription_Unsubscribe_Call) RunAndReturn(run func()) *mockSubscription_Unsubscribe_Call { + _c.Call.Return(run) + return _c +} + +// newMockSubscription creates a new instance of mockSubscription. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockSubscription(t interface { + mock.TestingT + Cleanup(func()) +}) *mockSubscription { + mock := &mockSubscription{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/multinode/multi_node.go b/multinode/multi_node.go index 93d8acb..15261b4 100644 --- a/multinode/multi_node.go +++ b/multinode/multi_node.go @@ -12,8 +12,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" - - "github.com/smartcontractkit/chainlink-framework/types" ) var ( @@ -28,7 +26,7 @@ var ( // MultiNode is a generalized multi node client interface that includes methods to interact with different chains. // It also handles multiple node RPC connections simultaneously. type MultiNode[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ] struct { services.Service @@ -51,7 +49,7 @@ type MultiNode[ } func NewMultiNode[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ]( lggr logger.Logger, diff --git a/multinode/multi_node_test.go b/multinode/multi_node_test.go index f40e8a8..72a2b8f 100644 --- a/multinode/multi_node_test.go +++ b/multinode/multi_node_test.go @@ -16,23 +16,21 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - - "github.com/smartcontractkit/chainlink-framework/types" ) -type multiNodeRPCClient RPCClient[types.ID, types.Head[Hashable]] +type multiNodeRPCClient RPCClient[ID, Head] type testMultiNode struct { - *MultiNode[types.ID, multiNodeRPCClient] + *MultiNode[ID, multiNodeRPCClient] } type multiNodeOpts struct { logger logger.Logger selectionMode string leaseDuration time.Duration - nodes []Node[types.ID, multiNodeRPCClient] - sendonlys []SendOnlyNode[types.ID, multiNodeRPCClient] - chainID types.ID + nodes []Node[ID, multiNodeRPCClient] + sendonlys []SendOnlyNode[ID, multiNodeRPCClient] + chainID ID chainFamily string deathDeclarationDelay time.Duration } @@ -42,19 +40,19 @@ func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { opts.logger = logger.Test(t) } - result := NewMultiNode[types.ID, multiNodeRPCClient]( + result := NewMultiNode[ID, multiNodeRPCClient]( opts.logger, opts.selectionMode, opts.leaseDuration, opts.nodes, opts.sendonlys, opts.chainID, opts.chainFamily, opts.deathDeclarationDelay) return testMultiNode{ result, } } -func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, multiNodeRPCClient] { +func newHealthyNode(t *testing.T, chainID ID) *mockNode[ID, multiNodeRPCClient] { return newNodeWithState(t, chainID, nodeStateAlive) } -func newNodeWithState(t *testing.T, chainID types.ID, state nodeState) *mockNode[types.ID, multiNodeRPCClient] { - node := newMockNode[types.ID, multiNodeRPCClient](t) +func newNodeWithState(t *testing.T, chainID ID, state nodeState) *mockNode[ID, multiNodeRPCClient] { + node := newMockNode[ID, multiNodeRPCClient](t) node.On("ConfiguredChainID").Return(chainID).Once() node.On("Start", mock.Anything).Return(nil).Once() node.On("Close").Return(nil).Once() @@ -67,14 +65,14 @@ func newNodeWithState(t *testing.T, chainID types.ID, state nodeState) *mockNode func TestMultiNode_Dial(t *testing.T) { t.Parallel() - newMockNode := newMockNode[types.ID, multiNodeRPCClient] - newMockSendOnlyNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient] + newMockNode := newMockNode[ID, multiNodeRPCClient] + newMockSendOnlyNode := newMockSendOnlyNode[ID, multiNodeRPCClient] t.Run("Fails without nodes", func(t *testing.T) { t.Parallel() mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, - chainID: types.RandomID(), + chainID: RandomID(), }) err := mn.Start(tests.Context(t)) assert.ErrorContains(t, err, fmt.Sprintf("no available nodes for chain %s", mn.chainID)) @@ -82,15 +80,15 @@ func TestMultiNode_Dial(t *testing.T) { t.Run("Fails with wrong node's chainID", func(t *testing.T) { t.Parallel() node := newMockNode(t) - multiNodeChainID := types.NewIDFromInt(10) - nodeChainID := types.NewIDFromInt(11) + multiNodeChainID := NewIDFromInt(10) + nodeChainID := NewIDFromInt(11) node.On("ConfiguredChainID").Return(nodeChainID).Twice() const nodeName = "nodeName" node.On("String").Return(nodeName).Once() mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: multiNodeChainID, - nodes: []Node[types.ID, multiNodeRPCClient]{node}, + nodes: []Node[ID, multiNodeRPCClient]{node}, }) err := mn.Start(tests.Context(t)) assert.ErrorContains(t, err, fmt.Sprintf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", nodeName, nodeChainID, mn.chainID)) @@ -98,7 +96,7 @@ func TestMultiNode_Dial(t *testing.T) { t.Run("Fails if node fails", func(t *testing.T) { t.Parallel() node := newMockNode(t) - chainID := types.RandomID() + chainID := RandomID() node.On("ConfiguredChainID").Return(chainID).Once() expectedError := errors.New("failed to start node") node.On("Start", mock.Anything).Return(expectedError).Once() @@ -106,7 +104,7 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, multiNodeRPCClient]{node}, + nodes: []Node[ID, multiNodeRPCClient]{node}, }) err := mn.Start(tests.Context(t)) assert.ErrorIs(t, err, expectedError) @@ -114,7 +112,7 @@ func TestMultiNode_Dial(t *testing.T) { t.Run("Closes started nodes on failure", func(t *testing.T) { t.Parallel() - chainID := types.RandomID() + chainID := RandomID() node1 := newHealthyNode(t, chainID) node2 := newMockNode(t) node2.On("ConfiguredChainID").Return(chainID).Once() @@ -125,17 +123,17 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, + nodes: []Node[ID, multiNodeRPCClient]{node1, node2}, }) err := mn.Start(tests.Context(t)) assert.ErrorIs(t, err, expectedError) }) t.Run("Fails with wrong send only node's chainID", func(t *testing.T) { t.Parallel() - multiNodeChainID := types.NewIDFromInt(10) + multiNodeChainID := NewIDFromInt(10) node := newHealthyNode(t, multiNodeChainID) sendOnly := newMockSendOnlyNode(t) - sendOnlyChainID := types.NewIDFromInt(11) + sendOnlyChainID := NewIDFromInt(11) sendOnly.On("ConfiguredChainID").Return(sendOnlyChainID).Twice() const sendOnlyName = "sendOnlyNodeName" sendOnly.On("String").Return(sendOnlyName).Once() @@ -143,14 +141,14 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: multiNodeChainID, - nodes: []Node[types.ID, multiNodeRPCClient]{node}, - sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly}, + nodes: []Node[ID, multiNodeRPCClient]{node}, + sendonlys: []SendOnlyNode[ID, multiNodeRPCClient]{sendOnly}, }) err := mn.Start(tests.Context(t)) assert.ErrorContains(t, err, fmt.Sprintf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", sendOnlyName, sendOnlyChainID, mn.chainID)) }) - newHealthySendOnly := func(t *testing.T, chainID types.ID) *mockSendOnlyNode[types.ID, multiNodeRPCClient] { + newHealthySendOnly := func(t *testing.T, chainID ID) *mockSendOnlyNode[ID, multiNodeRPCClient] { node := newMockSendOnlyNode(t) node.On("ConfiguredChainID").Return(chainID).Once() node.On("Start", mock.Anything).Return(nil).Once() @@ -159,7 +157,7 @@ func TestMultiNode_Dial(t *testing.T) { } t.Run("Fails on send only node failure", func(t *testing.T) { t.Parallel() - chainID := types.NewIDFromInt(10) + chainID := NewIDFromInt(10) node := newHealthyNode(t, chainID) sendOnly1 := newHealthySendOnly(t, chainID) sendOnly2 := newMockSendOnlyNode(t) @@ -170,21 +168,21 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, multiNodeRPCClient]{node}, - sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly1, sendOnly2}, + nodes: []Node[ID, multiNodeRPCClient]{node}, + sendonlys: []SendOnlyNode[ID, multiNodeRPCClient]{sendOnly1, sendOnly2}, }) err := mn.Start(tests.Context(t)) assert.ErrorIs(t, err, expectedError) }) t.Run("Starts successfully with healthy nodes", func(t *testing.T) { t.Parallel() - chainID := types.NewIDFromInt(10) + chainID := NewIDFromInt(10) node := newHealthyNode(t, chainID) mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, multiNodeRPCClient]{node}, - sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newHealthySendOnly(t, chainID)}, + nodes: []Node[ID, multiNodeRPCClient]{node}, + sendonlys: []SendOnlyNode[ID, multiNodeRPCClient]{newHealthySendOnly(t, chainID)}, }) servicetest.Run(t, mn) selectedNode, err := mn.selectNode() @@ -197,14 +195,14 @@ func TestMultiNode_Report(t *testing.T) { t.Parallel() t.Run("Dial starts periodical reporting", func(t *testing.T) { t.Parallel() - chainID := types.RandomID() + chainID := RandomID() node1 := newHealthyNode(t, chainID) node2 := newNodeWithState(t, chainID, nodeStateOutOfSync) lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, + nodes: []Node[ID, multiNodeRPCClient]{node1, node2}, logger: lggr, }) mn.reportInterval = tests.TestInterval @@ -214,13 +212,13 @@ func TestMultiNode_Report(t *testing.T) { }) t.Run("Report critical error on all node failure", func(t *testing.T) { t.Parallel() - chainID := types.RandomID() + chainID := RandomID() node := newNodeWithState(t, chainID, nodeStateOutOfSync) lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, multiNodeRPCClient]{node}, + nodes: []Node[ID, multiNodeRPCClient]{node}, logger: lggr, }) mn.reportInterval = tests.TestInterval @@ -237,28 +235,28 @@ func TestMultiNode_CheckLease(t *testing.T) { t.Parallel() t.Run("Round robin disables lease check", func(t *testing.T) { t.Parallel() - chainID := types.RandomID() + chainID := RandomID() node := newHealthyNode(t, chainID) lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, logger: lggr, - nodes: []Node[types.ID, multiNodeRPCClient]{node}, + nodes: []Node[ID, multiNodeRPCClient]{node}, }) servicetest.Run(t, mn) tests.RequireLogMessage(t, observedLogs, "Best node switching is disabled") }) t.Run("Misconfigured lease check period won't start", func(t *testing.T) { t.Parallel() - chainID := types.RandomID() + chainID := RandomID() node := newHealthyNode(t, chainID) lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeHighestHead, chainID: chainID, logger: lggr, - nodes: []Node[types.ID, multiNodeRPCClient]{node}, + nodes: []Node[ID, multiNodeRPCClient]{node}, leaseDuration: 0, }) servicetest.Run(t, mn) @@ -266,18 +264,18 @@ func TestMultiNode_CheckLease(t *testing.T) { }) t.Run("Lease check updates active node", func(t *testing.T) { t.Parallel() - chainID := types.RandomID() + chainID := RandomID() node := newHealthyNode(t, chainID) node.On("UnsubscribeAllExceptAliveLoop") bestNode := newHealthyNode(t, chainID) - nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) + nodeSelector := newMockNodeSelector[ID, multiNodeRPCClient](t) nodeSelector.On("Select").Return(bestNode) lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeHighestHead, chainID: chainID, logger: lggr, - nodes: []Node[types.ID, multiNodeRPCClient]{node, bestNode}, + nodes: []Node[ID, multiNodeRPCClient]{node, bestNode}, leaseDuration: tests.TestInterval, }) mn.nodeSelector = nodeSelector @@ -292,7 +290,7 @@ func TestMultiNode_CheckLease(t *testing.T) { }) t.Run("NodeStates returns proper states", func(t *testing.T) { t.Parallel() - chainID := types.NewIDFromInt(10) + chainID := NewIDFromInt(10) nodes := map[string]nodeState{ "node_1": nodeStateAlive, "node_2": nodeStateUnreachable, @@ -306,12 +304,12 @@ func TestMultiNode_CheckLease(t *testing.T) { expectedResult := map[string]string{} for name, state := range nodes { - node := newMockNode[types.ID, multiNodeRPCClient](t) + node := newMockNode[ID, multiNodeRPCClient](t) node.On("State").Return(state).Once() node.On("Name").Return(name).Once() opts.nodes = append(opts.nodes, node) - sendOnly := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t) + sendOnly := newMockSendOnlyNode[ID, multiNodeRPCClient](t) sendOnlyName := "send_only_" + name sendOnly.On("State").Return(state).Once() sendOnly.On("Name").Return(sendOnlyName).Once() @@ -331,18 +329,18 @@ func TestMultiNode_selectNode(t *testing.T) { t.Parallel() t.Run("Returns same node, if it's still healthy", func(t *testing.T) { t.Parallel() - chainID := types.RandomID() - node1 := newMockNode[types.ID, multiNodeRPCClient](t) + chainID := RandomID() + node1 := newMockNode[ID, multiNodeRPCClient](t) node1.On("State").Return(nodeStateAlive).Once() node1.On("String").Return("node1").Maybe() - node2 := newMockNode[types.ID, multiNodeRPCClient](t) + node2 := newMockNode[ID, multiNodeRPCClient](t) node2.On("String").Return("node2").Maybe() mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, + nodes: []Node[ID, multiNodeRPCClient]{node1, node2}, }) - nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) + nodeSelector := newMockNodeSelector[ID, multiNodeRPCClient](t) nodeSelector.On("Select").Return(node1).Once() mn.nodeSelector = nodeSelector prevActiveNode, err := mn.selectNode() @@ -354,18 +352,18 @@ func TestMultiNode_selectNode(t *testing.T) { }) t.Run("Updates node if active is not healthy", func(t *testing.T) { t.Parallel() - chainID := types.RandomID() - oldBest := newMockNode[types.ID, multiNodeRPCClient](t) + chainID := RandomID() + oldBest := newMockNode[ID, multiNodeRPCClient](t) oldBest.On("String").Return("oldBest").Maybe() oldBest.On("UnsubscribeAllExceptAliveLoop") - newBest := newMockNode[types.ID, multiNodeRPCClient](t) + newBest := newMockNode[ID, multiNodeRPCClient](t) newBest.On("String").Return("newBest").Maybe() mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, multiNodeRPCClient]{oldBest, newBest}, + nodes: []Node[ID, multiNodeRPCClient]{oldBest, newBest}, }) - nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) + nodeSelector := newMockNodeSelector[ID, multiNodeRPCClient](t) nodeSelector.On("Select").Return(oldBest).Once() mn.nodeSelector = nodeSelector activeNode, err := mn.selectNode() @@ -380,14 +378,14 @@ func TestMultiNode_selectNode(t *testing.T) { }) t.Run("No active nodes - reports critical error", func(t *testing.T) { t.Parallel() - chainID := types.RandomID() + chainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, logger: lggr, }) - nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) + nodeSelector := newMockNodeSelector[ID, multiNodeRPCClient](t) nodeSelector.On("Select").Return(nil).Once() nodeSelector.On("Name").Return("MockedNodeSelector").Once() mn.nodeSelector = nodeSelector @@ -491,7 +489,7 @@ func TestMultiNode_ChainInfo(t *testing.T) { }, } - chainID := types.RandomID() + chainID := RandomID() mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, @@ -500,7 +498,7 @@ func TestMultiNode_ChainInfo(t *testing.T) { tc := testCases[i] t.Run(tc.Name, func(t *testing.T) { for _, params := range tc.NodeParams { - node := newMockNode[types.ID, multiNodeRPCClient](t) + node := newMockNode[ID, multiNodeRPCClient](t) mn.primaryNodes = append(mn.primaryNodes, node) node.On("StateAndLatest").Return(params.State, params.LatestChainInfo) node.On("HighestUserObservations").Return(params.HighestUserObservations) diff --git a/multinode/node.go b/multinode/node.go index 9bb1b82..45f46d5 100644 --- a/multinode/node.go +++ b/multinode/node.go @@ -13,8 +13,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" - - "github.com/smartcontractkit/chainlink-framework/types" ) const QueryTimeout = 10 * time.Second @@ -57,7 +55,7 @@ type ChainConfig interface { } type Node[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ] interface { // State returns most accurate state of the Node on the moment of call. @@ -85,7 +83,7 @@ type Node[ } type node[ - CHAIN_ID types.ID, + CHAIN_ID ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], ] struct { @@ -113,11 +111,11 @@ type node[ // wg waits for subsidiary goroutines wg sync.WaitGroup - healthCheckSubs []types.Subscription + healthCheckSubs []Subscription } func NewNode[ - CHAIN_ID types.ID, + CHAIN_ID ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], ]( diff --git a/multinode/node_fsm_test.go b/multinode/node_fsm_test.go index ad09a1a..efb3249 100644 --- a/multinode/node_fsm_test.go +++ b/multinode/node_fsm_test.go @@ -6,8 +6,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "github.com/smartcontractkit/chainlink-framework/types" ) type fnMock struct{ calls int } @@ -39,46 +37,46 @@ func TestUnit_Node_StateTransitions(t *testing.T) { t.Run("transitionToAlive", func(t *testing.T) { const destinationState = nodeStateAlive allowedStates := []nodeState{nodeStateDialed, nodeStateInvalidChainID, nodeStateSyncing} - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) testTransition(t, rpc, testNode.transitionToAlive, destinationState, allowedStates...) }) t.Run("transitionToInSync", func(t *testing.T) { const destinationState = nodeStateAlive allowedStates := []nodeState{nodeStateOutOfSync, nodeStateSyncing} - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) testTransition(t, rpc, testNode.transitionToInSync, destinationState, allowedStates...) }) t.Run("transitionToOutOfSync", func(t *testing.T) { const destinationState = nodeStateOutOfSync allowedStates := []nodeState{nodeStateAlive} - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("Close") testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { const destinationState = nodeStateUnreachable allowedStates := []nodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing} - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("Close") testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { const destinationState = nodeStateInvalidChainID allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing} - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("Close") testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { const destinationState = nodeStateSyncing allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID} - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("Close") testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) }) t.Run("transitionToSyncing panics if nodeIsSyncing is disabled", func(t *testing.T) { - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("Close") node := newTestNode(t, testNodeOpts{rpc: rpc}) node.setState(nodeStateDialed) @@ -90,7 +88,7 @@ func TestUnit_Node_StateTransitions(t *testing.T) { }) } -func testTransition(t *testing.T, rpc *mockRPCClient[types.ID, Head], transition func(node testNode, fn func()), destinationState nodeState, allowedStates ...nodeState) { +func testTransition(t *testing.T, rpc *mockRPCClient[ID, Head], transition func(node testNode, fn func()), destinationState nodeState, allowedStates ...nodeState) { node := newTestNode(t, testNodeOpts{rpc: rpc, config: testNodeConfig{nodeIsSyncingEnabled: true}}) for _, allowedState := range allowedStates { m := new(fnMock) diff --git a/multinode/node_lifecycle.go b/multinode/node_lifecycle.go index 775d204..ae64851 100644 --- a/multinode/node_lifecycle.go +++ b/multinode/node_lifecycle.go @@ -13,9 +13,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/utils" bigmath "github.com/smartcontractkit/chainlink-common/pkg/utils/big_math" - - "github.com/smartcontractkit/chainlink-framework/types" - iutils "github.com/smartcontractkit/chainlink-framework/utils" ) var ( @@ -245,7 +242,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unsubscribeHealthChecks() { for _, sub := range n.healthCheckSubs { sub.Unsubscribe() } - n.healthCheckSubs = []types.Subscription{} + n.healthCheckSubs = []Subscription{} n.stateMu.Unlock() } @@ -255,7 +252,7 @@ type headSubscription[HEAD any] struct { NoNewHeads <-chan time.Time noNewHeadsTicker *time.Ticker - sub types.Subscription + sub Subscription cleanUpTasks []func() } @@ -270,10 +267,10 @@ func (sub *headSubscription[HEAD]) Unsubscribe() { } func (n *node[CHAIN_ID, HEAD, PRC]) registerNewSubscription(ctx context.Context, lggr logger.SugaredLogger, - noNewDataThreshold time.Duration, newSub func(ctx context.Context) (<-chan HEAD, types.Subscription, error)) (headSubscription[HEAD], error) { + noNewDataThreshold time.Duration, newSub func(ctx context.Context) (<-chan HEAD, Subscription, error)) (headSubscription[HEAD], error) { result := headSubscription[HEAD]{} var err error - var sub types.Subscription + var sub Subscription result.Heads, sub, err = newSub(ctx) if err != nil { return result, err @@ -559,7 +556,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { lggr := logger.Sugared(logger.Named(n.lfcLog, "Unreachable")) lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.getCachedState()) - dialRetryBackoff := iutils.NewRedialBackoff() + dialRetryBackoff := NewRedialBackoff() for { select { @@ -622,7 +619,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "nodeState", n.getCachedState()) - chainIDRecheckBackoff := iutils.NewRedialBackoff() + chainIDRecheckBackoff := NewRedialBackoff() for { select { @@ -672,7 +669,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { return } - recheckBackoff := iutils.NewRedialBackoff() + recheckBackoff := NewRedialBackoff() for { select { diff --git a/multinode/node_lifecycle_test.go b/multinode/node_lifecycle_test.go index abdce22..3751750 100644 --- a/multinode/node_lifecycle_test.go +++ b/multinode/node_lifecycle_test.go @@ -19,12 +19,10 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" clientMocks "github.com/smartcontractkit/chainlink-framework/multinode/mocks" - "github.com/smartcontractkit/chainlink-framework/types" - "github.com/smartcontractkit/chainlink-framework/types/mocks" ) -func newSub(t *testing.T) *mocks.Subscription { - sub := mocks.NewSubscription(t) +func newSub(t *testing.T) *mockSubscription { + sub := newMockSubscription(t) sub.On("Err").Return((<-chan error)(nil)).Maybe() sub.On("Unsubscribe") return sub @@ -49,7 +47,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("if initial subscribe fails, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) node := newDialedNode(t, testNodeOpts{ rpc: rpc, }) @@ -66,7 +64,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("if remote RPC connection is closed transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) node := newDialedNode(t, testNodeOpts{ @@ -76,7 +74,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() defer func() { assert.NoError(t, node.close()) }() - sub := mocks.NewSubscription(t) + sub := newMockSubscription(t) errChan := make(chan error) close(errChan) sub.On("Err").Return((<-chan error)(errChan)).Once() @@ -96,7 +94,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { } t.Run("Stays alive and waits for signal", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ @@ -112,7 +110,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("stays alive while below pollFailureThreshold and resets counter on success", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 @@ -154,7 +152,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("with threshold poll failures, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 @@ -178,7 +176,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("with threshold poll failures, but we are the last node alive, forcibly keeps it alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 node := newSubscribedNode(t, testNodeOpts{ @@ -204,7 +202,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when behind more than SyncThreshold, transitions to out of sync", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const syncThreshold = 10 node := newSubscribedNode(t, testNodeOpts{ @@ -239,7 +237,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when behind more than SyncThreshold but we are the last live node, forcibly stays alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const syncThreshold = 10 node := newSubscribedNode(t, testNodeOpts{ @@ -266,7 +264,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when behind but SyncThreshold=0, stay alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -287,7 +285,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{}, @@ -311,7 +309,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ @@ -336,7 +334,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("rpc closed head channel", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) sub := newSub(t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() ch := make(chan Head) @@ -360,7 +358,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("If finality tag is not enabled updates finalized block metric using finality depth and latest head", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) sub := newSub(t) const blockNumber = 1000 const finalityDepth = 10 @@ -390,7 +388,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("If fails to subscribe to latest finalized blocks, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) sub := newSub(t) rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() expectedError := errors.New("failed to subscribe to finalized heads") @@ -414,7 +412,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("Logs warning if latest finalized block is not valid", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) sub := newSub(t) rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() ch := make(chan Head, 1) @@ -440,7 +438,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("On new finalized block updates corresponding metric", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) const expectedBlock = 1101 const finalityDepth = 10 ch := make(chan Head) @@ -475,7 +473,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("If finalized heads channel is closed, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() ch := make(chan Head) close(ch) @@ -498,7 +496,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when no new finalized heads received for threshold, transitions to out of sync", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() ch := make(chan Head, 1) ch <- head{BlockNumber: 10}.ToMockHead(t) @@ -530,7 +528,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when no new finalized heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), newSub(t), nil).Once() lggr, observed := logger.TestObserved(t, zap.DebugLevel) @@ -557,9 +555,9 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("If finalized subscription returns an error, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - sub := mocks.NewSubscription(t) + sub := newMockSubscription(t) errCh := make(chan error, 1) errCh <- errors.New("subscription failed") sub.On("Err").Return((<-chan error)(errCh)) @@ -607,9 +605,9 @@ func writeHeads(t *testing.T, ch chan<- Head, heads ...head) { } } -func setupRPCForAliveLoop(t *testing.T, rpc *mockRPCClient[types.ID, Head]) { +func setupRPCForAliveLoop(t *testing.T, rpc *mockRPCClient[ID, Head]) { rpc.On("Dial", mock.Anything).Return(nil).Maybe() - aliveSubscription := mocks.NewSubscription(t) + aliveSubscription := newMockSubscription(t) aliveSubscription.On("Err").Return(nil).Maybe() aliveSubscription.On("Unsubscribe").Maybe() rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() @@ -637,8 +635,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("on old blocks stays outOfSync and returns on close", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr := logger.Test(t) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -652,7 +650,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: 0}, ChainInfo{BlockNumber: 13}).Once() - outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription := newMockSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() heads := []head{{BlockNumber: 7}, {BlockNumber: 11}, {BlockNumber: 13}} @@ -675,7 +673,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if initial dial fails, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) node := newAliveNode(t, testNodeOpts{ rpc: rpc, }) @@ -692,8 +690,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if fail to get chainID, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - chainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + chainID := RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: chainID, @@ -705,7 +703,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() // for unreachable rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - sub := mocks.NewSubscription(t) + sub := newMockSubscription(t) errChan := make(chan error, 1) errChan <- errors.New("subscription was terminate") sub.On("Err").Return((<-chan error)(errChan)) @@ -715,7 +713,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { expectedError := errors.New("failed to get chain ID") // might be called multiple times - rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError) + rpc.On("ChainID", mock.Anything).Return(NewIDFromInt(0), expectedError) node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable @@ -723,9 +721,9 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if chainID does not match, transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) - rpcChainID := types.NewIDFromInt(11) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) + rpcChainID := NewIDFromInt(11) node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -744,8 +742,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if syncing, transitions to syncing", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -765,8 +763,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if fails to fetch syncing status, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -789,8 +787,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if fails to subscribe, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -810,8 +808,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("on subscription termination becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -824,7 +822,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - sub := mocks.NewSubscription(t) + sub := newMockSubscription(t) errChan := make(chan error, 1) errChan <- errors.New("subscription was terminate") sub.On("Err").Return((<-chan error)(errChan)) @@ -839,8 +837,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -868,8 +866,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("becomes alive if it receives a newer head", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -881,7 +879,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription := newMockSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() const highestBlock = 1000 @@ -901,8 +899,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("becomes alive if there is no other nodes", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ chainConfig: clientMocks.ChainConfig{ @@ -924,7 +922,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription := newMockSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), outOfSyncSubscription, nil).Once() @@ -938,8 +936,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("Stays out-of-sync if received new head, but lags behind pool", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ chainConfig: clientMocks.ChainConfig{ @@ -966,7 +964,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription := newMockSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() ch := make(chan Head) @@ -983,8 +981,8 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) // creates RPC mock with all calls necessary to create heads subscription that won't produce any events - newRPCWithNoOpHeads := func(t *testing.T, chainID types.ID) *mockRPCClient[types.ID, Head] { - rpc := newMockRPCClient[types.ID, Head](t) + newRPCWithNoOpHeads := func(t *testing.T, chainID ID) *mockRPCClient[ID, Head] { + rpc := newMockRPCClient[ID, Head](t) rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(chainID, nil).Once() sub := newSub(t) @@ -994,7 +992,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { t.Run("if fails to subscribe to finalized, becomes unreachable", func(t *testing.T) { t.Parallel() - nodeChainID := types.RandomID() + nodeChainID := RandomID() rpc := newRPCWithNoOpHeads(t, nodeChainID) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -1016,7 +1014,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("on subscription termination becomes unreachable", func(t *testing.T) { t.Parallel() - nodeChainID := types.RandomID() + nodeChainID := RandomID() rpc := newRPCWithNoOpHeads(t, nodeChainID) lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newAliveNode(t, testNodeOpts{ @@ -1029,7 +1027,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - sub := mocks.NewSubscription(t) + sub := newMockSubscription(t) errChan := make(chan error, 1) errChan <- errors.New("subscription was terminate") sub.On("Err").Return((<-chan error)(errChan)) @@ -1047,7 +1045,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { t.Parallel() - nodeChainID := types.RandomID() + nodeChainID := RandomID() rpc := newRPCWithNoOpHeads(t, nodeChainID) lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newAliveNode(t, testNodeOpts{ @@ -1077,7 +1075,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("becomes alive on new finalized block", func(t *testing.T) { t.Parallel() - nodeChainID := types.RandomID() + nodeChainID := RandomID() rpc := newRPCWithNoOpHeads(t, nodeChainID) lggr := logger.Test(t) node := newAliveNode(t, testNodeOpts{ @@ -1094,7 +1092,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { const highestBlock = 13 rpc.On("GetInterceptedChainInfo").Return(ChainInfo{FinalizedBlockNumber: highestBlock}, ChainInfo{FinalizedBlockNumber: highestBlock}) - outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription := newMockSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() ch := make(chan Head) @@ -1113,7 +1111,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("adds finalized block is not increasing flag, if there is no new finalized heads for too long", func(t *testing.T) { t.Parallel() - nodeChainID := types.RandomID() + nodeChainID := RandomID() rpc := newRPCWithNoOpHeads(t, nodeChainID) lggr, observed := logger.TestObserved(t, zap.DebugLevel) const noNewFinalizedHeads = tests.TestInterval @@ -1131,7 +1129,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { const highestBlock = 13 rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{FinalizedBlockNumber: highestBlock}) - outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription := newMockSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() ch := make(chan Head) @@ -1165,8 +1163,8 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on failed redial, keeps trying", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -1181,8 +1179,8 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on failed chainID verification, keep trying", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -1200,9 +1198,9 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) - rpcChainID := types.NewIDFromInt(11) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) + rpcChainID := NewIDFromInt(11) node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1219,8 +1217,8 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on syncing status check failure, keeps trying", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -1240,8 +1238,8 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on syncing, transitions to syncing state", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1262,8 +1260,8 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1282,8 +1280,8 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1320,8 +1318,8 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on invalid dial becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() node := newDialedNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1338,8 +1336,8 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1361,9 +1359,9 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on chainID mismatch keeps trying", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) - rpcChainID := types.NewIDFromInt(11) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) + rpcChainID := NewIDFromInt(11) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1383,9 +1381,9 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) - rpcChainID := types.NewIDFromInt(11) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) + rpcChainID := NewIDFromInt(11) node := newDialedNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1403,9 +1401,9 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) - rpcChainID := types.NewIDFromInt(11) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) + rpcChainID := NewIDFromInt(11) node := newDialedNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1437,8 +1435,8 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { } t.Run("if fails on initial dial, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newNode(t, testNodeOpts{ rpc: rpc, @@ -1457,8 +1455,8 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("if chainID verification fails, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newNode(t, testNodeOpts{ rpc: rpc, @@ -1480,9 +1478,9 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) - rpcChainID := types.NewIDFromInt(11) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) + rpcChainID := NewIDFromInt(11) node := newNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1500,8 +1498,8 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("if syncing verification fails, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newNode(t, testNodeOpts{ rpc: rpc, @@ -1527,8 +1525,8 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on isSyncing transitions to syncing", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) node := newNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1548,8 +1546,8 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() node := newNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1569,8 +1567,8 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() node := newNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1664,7 +1662,7 @@ func TestUnit_NodeLifecycle_outOfSyncWithPool(t *testing.T) { for _, testCase := range testCases { t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: total difficulty: %d", testCase.name, selectionMode, td), func(t *testing.T) { chainInfo := ChainInfo{BlockNumber: testCase.blockNumber, TotalDifficulty: big.NewInt(td)} - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(chainInfo, ChainInfo{}).Once() node.rpc = rpc outOfSync, liveNodes := node.isOutOfSyncWithPool() @@ -1724,7 +1722,7 @@ func TestUnit_NodeLifecycle_outOfSyncWithPool(t *testing.T) { for _, testCase := range testCases { t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: highest block: %d", testCase.name, NodeSelectionModeTotalDifficulty, hb), func(t *testing.T) { chainInfo := ChainInfo{BlockNumber: hb, TotalDifficulty: big.NewInt(testCase.totalDifficulty)} - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(chainInfo, ChainInfo{}).Once() node.rpc = rpc outOfSync, liveNodes := node.isOutOfSyncWithPool() @@ -1755,8 +1753,8 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on invalid dial becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() node := newDialedNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1772,8 +1770,8 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1795,9 +1793,9 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on chainID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.NewIDFromInt(10) - rpcChainID := types.NewIDFromInt(11) + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := NewIDFromInt(10) + rpcChainID := NewIDFromInt(11) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1817,8 +1815,8 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on failed Syncing check - becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1842,8 +1840,8 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on IsSyncing - keeps trying", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1864,8 +1862,8 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockRPCClient[types.ID, Head](t) - nodeChainID := types.RandomID() + rpc := newMockRPCClient[ID, Head](t) + nodeChainID := RandomID() node := newDialedNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1961,7 +1959,7 @@ func TestNode_State(t *testing.T) { } for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - rpc := newMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(tc.NodeChainInfo, tc.PoolChainInfo).Once() node := newTestNode(t, testNodeOpts{ config: testNodeConfig{ diff --git a/multinode/node_selector.go b/multinode/node_selector.go index eaf6a22..ebf5166 100644 --- a/multinode/node_selector.go +++ b/multinode/node_selector.go @@ -2,8 +2,6 @@ package multinode import ( "fmt" - - "github.com/smartcontractkit/chainlink-framework/types" ) const ( @@ -14,7 +12,7 @@ const ( ) type NodeSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ] interface { // Select returns a Node, or nil if none can be selected. @@ -25,7 +23,7 @@ type NodeSelector[ } func newNodeSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ](selectionMode string, nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { switch selectionMode { diff --git a/multinode/node_selector_highest_head.go b/multinode/node_selector_highest_head.go index f00373a..7c850c4 100644 --- a/multinode/node_selector_highest_head.go +++ b/multinode/node_selector_highest_head.go @@ -2,17 +2,15 @@ package multinode import ( "math" - - "github.com/smartcontractkit/chainlink-framework/types" ) type highestHeadNodeSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ] []Node[CHAIN_ID, RPC] func NewHighestHeadNodeSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { return highestHeadNodeSelector[CHAIN_ID, RPC](nodes) diff --git a/multinode/node_selector_highest_head_test.go b/multinode/node_selector_highest_head_test.go index 17eb7d5..1b0e35a 100644 --- a/multinode/node_selector_highest_head_test.go +++ b/multinode/node_selector_highest_head_test.go @@ -4,24 +4,22 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "github.com/smartcontractkit/chainlink-framework/types" ) func TestHighestHeadNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModeHighestHead, nil) + selector := newNodeSelector[ID, RPCClient[ID, Head]](NodeSelectionModeHighestHead, nil) assert.Equal(t, selector.Name(), NodeSelectionModeHighestHead) } func TestHighestHeadNodeSelector(t *testing.T) { t.Parallel() - type nodeClient RPCClient[types.ID, Head] + type nodeClient RPCClient[ID, Head] - var nodes []Node[types.ID, nodeClient] + var nodes []Node[ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: int64(-1)}) @@ -36,11 +34,11 @@ func TestHighestHeadNodeSelector(t *testing.T) { nodes = append(nodes, node) } - selector := newNodeSelector[types.ID, nodeClient](NodeSelectionModeHighestHead, nodes) + selector := newNodeSelector[ID, nodeClient](NodeSelectionModeHighestHead, nodes) assert.Same(t, nodes[2], selector.Select()) t.Run("stick to the same node", func(t *testing.T) { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) // fourth node is alive, LatestReceivedBlockNumber = 2 (same as 3rd) node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(2)}) node.On("Order").Return(int32(1)) @@ -51,7 +49,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { }) t.Run("another best node", func(t *testing.T) { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) // fifth node is alive, LatestReceivedBlockNumber = 3 (better than 3rd and 4th) node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node.On("Order").Return(int32(1)) @@ -62,13 +60,13 @@ func TestHighestHeadNodeSelector(t *testing.T) { }) t.Run("nodes never update latest block number", func(t *testing.T) { - node1 := newMockNode[types.ID, nodeClient](t) + node1 := newMockNode[ID, nodeClient](t) node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(-1)}) node1.On("Order").Return(int32(1)) - node2 := newMockNode[types.ID, nodeClient](t) + node2 := newMockNode[ID, nodeClient](t) node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(-1)}) node2.On("Order").Return(int32(1)) - selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[types.ID, nodeClient]{node1, node2}) + selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[ID, nodeClient]{node1, node2}) assert.Same(t, node1, selector.Select()) }) } @@ -76,11 +74,11 @@ func TestHighestHeadNodeSelector(t *testing.T) { func TestHighestHeadNodeSelector_None(t *testing.T) { t.Parallel() - type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, nodeClient] + type nodeClient RPCClient[ID, Head] + var nodes []Node[ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: int64(-1)}) @@ -98,12 +96,12 @@ func TestHighestHeadNodeSelector_None(t *testing.T) { func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Parallel() - type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, nodeClient] + type nodeClient RPCClient[ID, Head] + var nodes []Node[ID, nodeClient] t.Run("same head and order", func(t *testing.T) { for i := 0; i < 3; i++ { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(1)}) node.On("Order").Return(int32(2)) nodes = append(nodes, node) @@ -114,61 +112,61 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { }) t.Run("same head but different order", func(t *testing.T) { - node1 := newMockNode[types.ID, nodeClient](t) + node1 := newMockNode[ID, nodeClient](t) node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node1.On("Order").Return(int32(3)) - node2 := newMockNode[types.ID, nodeClient](t) + node2 := newMockNode[ID, nodeClient](t) node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node2.On("Order").Return(int32(1)) - node3 := newMockNode[types.ID, nodeClient](t) + node3 := newMockNode[ID, nodeClient](t) node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node3.On("Order").Return(int32(2)) - nodes := []Node[types.ID, nodeClient]{node1, node2, node3} + nodes := []Node[ID, nodeClient]{node1, node2, node3} selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) // Should select the second node as it has the highest priority assert.Same(t, nodes[1], selector.Select()) }) t.Run("different head but same order", func(t *testing.T) { - node1 := newMockNode[types.ID, nodeClient](t) + node1 := newMockNode[ID, nodeClient](t) node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(1)}) node1.On("Order").Maybe().Return(int32(3)) - node2 := newMockNode[types.ID, nodeClient](t) + node2 := newMockNode[ID, nodeClient](t) node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(2)}) node2.On("Order").Maybe().Return(int32(3)) - node3 := newMockNode[types.ID, nodeClient](t) + node3 := newMockNode[ID, nodeClient](t) node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node3.On("Order").Return(int32(3)) - nodes := []Node[types.ID, nodeClient]{node1, node2, node3} + nodes := []Node[ID, nodeClient]{node1, node2, node3} selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) // Should select the third node as it has the highest head assert.Same(t, nodes[2], selector.Select()) }) t.Run("different head and different order", func(t *testing.T) { - node1 := newMockNode[types.ID, nodeClient](t) + node1 := newMockNode[ID, nodeClient](t) node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(10)}) node1.On("Order").Maybe().Return(int32(3)) - node2 := newMockNode[types.ID, nodeClient](t) + node2 := newMockNode[ID, nodeClient](t) node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(11)}) node2.On("Order").Maybe().Return(int32(4)) - node3 := newMockNode[types.ID, nodeClient](t) + node3 := newMockNode[ID, nodeClient](t) node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(11)}) node3.On("Order").Maybe().Return(int32(3)) - node4 := newMockNode[types.ID, nodeClient](t) + node4 := newMockNode[ID, nodeClient](t) node4.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(10)}) node4.On("Order").Maybe().Return(int32(1)) - nodes := []Node[types.ID, nodeClient]{node1, node2, node3, node4} + nodes := []Node[ID, nodeClient]{node1, node2, node3, node4} selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) // Should select the third node as it has the highest head and will win the priority tie-breaker assert.Same(t, nodes[2], selector.Select()) diff --git a/multinode/node_selector_priority_level.go b/multinode/node_selector_priority_level.go index bedbf36..3fcf83a 100644 --- a/multinode/node_selector_priority_level.go +++ b/multinode/node_selector_priority_level.go @@ -4,12 +4,10 @@ import ( "math" "sort" "sync/atomic" - - "github.com/smartcontractkit/chainlink-framework/types" ) type priorityLevelNodeSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ] struct { nodes []Node[CHAIN_ID, RPC] @@ -17,7 +15,7 @@ type priorityLevelNodeSelector[ } type nodeWithPriority[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ] struct { node Node[CHAIN_ID, RPC] @@ -25,7 +23,7 @@ type nodeWithPriority[ } func NewPriorityLevelNodeSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { return &priorityLevelNodeSelector[CHAIN_ID, RPC]{ @@ -72,7 +70,7 @@ func (s priorityLevelNodeSelector[CHAIN_ID, RPC]) getHighestPriorityAliveTier() // removeLowerTiers take a slice of nodeWithPriority[CHAIN_ID, BLOCK_HASH, HEAD, RPC] and keeps only the highest tier func removeLowerTiers[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ](nodes []nodeWithPriority[CHAIN_ID, RPC]) []nodeWithPriority[CHAIN_ID, RPC] { sort.SliceStable(nodes, func(i, j int) bool { @@ -93,7 +91,7 @@ func removeLowerTiers[ // nrOfPriorityTiers calculates the total number of priority tiers func nrOfPriorityTiers[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ](nodes []Node[CHAIN_ID, RPC]) int32 { highestPriority := int32(0) @@ -108,7 +106,7 @@ func nrOfPriorityTiers[ // firstOrHighestPriority takes a list of nodes and returns the first one with the highest priority func firstOrHighestPriority[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ](nodes []Node[CHAIN_ID, RPC]) Node[CHAIN_ID, RPC] { hp := int32(math.MaxInt32) diff --git a/multinode/node_selector_priority_level_test.go b/multinode/node_selector_priority_level_test.go index a89a9e9..40c81bc 100644 --- a/multinode/node_selector_priority_level_test.go +++ b/multinode/node_selector_priority_level_test.go @@ -4,19 +4,17 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "github.com/smartcontractkit/chainlink-framework/types" ) func TestPriorityLevelNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModePriorityLevel, nil) + selector := newNodeSelector[ID, RPCClient[ID, Head]](NodeSelectionModePriorityLevel, nil) assert.Equal(t, selector.Name(), NodeSelectionModePriorityLevel) } func TestPriorityLevelNodeSelector(t *testing.T) { t.Parallel() - type nodeClient RPCClient[types.ID, Head] + type nodeClient RPCClient[ID, Head] type testNode struct { order int32 state nodeState @@ -66,9 +64,9 @@ func TestPriorityLevelNodeSelector(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - var nodes []Node[types.ID, nodeClient] + var nodes []Node[ID, nodeClient] for _, tn := range tc.nodes { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) node.On("State").Return(tn.state) node.On("Order").Return(tn.order) nodes = append(nodes, node) diff --git a/multinode/node_selector_round_robin.go b/multinode/node_selector_round_robin.go index 68c819f..256d7dd 100644 --- a/multinode/node_selector_round_robin.go +++ b/multinode/node_selector_round_robin.go @@ -2,12 +2,10 @@ package multinode import ( "sync/atomic" - - "github.com/smartcontractkit/chainlink-framework/types" ) type roundRobinSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ] struct { nodes []Node[CHAIN_ID, RPC] @@ -15,7 +13,7 @@ type roundRobinSelector[ } func NewRoundRobinSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { return &roundRobinSelector[CHAIN_ID, RPC]{ diff --git a/multinode/node_selector_round_robin_test.go b/multinode/node_selector_round_robin_test.go index 6f7af4d..180db36 100644 --- a/multinode/node_selector_round_robin_test.go +++ b/multinode/node_selector_round_robin_test.go @@ -4,23 +4,21 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "github.com/smartcontractkit/chainlink-framework/types" ) func TestRoundRobinNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModeRoundRobin, nil) + selector := newNodeSelector[ID, RPCClient[ID, Head]](NodeSelectionModeRoundRobin, nil) assert.Equal(t, selector.Name(), NodeSelectionModeRoundRobin) } func TestRoundRobinNodeSelector(t *testing.T) { t.Parallel() - type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, nodeClient] + type nodeClient RPCClient[ID, Head] + var nodes []Node[ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("State").Return(nodeStateOutOfSync) @@ -41,11 +39,11 @@ func TestRoundRobinNodeSelector(t *testing.T) { func TestRoundRobinNodeSelector_None(t *testing.T) { t.Parallel() - type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, nodeClient] + type nodeClient RPCClient[ID, Head] + var nodes []Node[ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("State").Return(nodeStateOutOfSync) diff --git a/multinode/node_selector_test.go b/multinode/node_selector_test.go index 3b889d9..2befd38 100644 --- a/multinode/node_selector_test.go +++ b/multinode/node_selector_test.go @@ -4,15 +4,13 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "github.com/smartcontractkit/chainlink-framework/types" ) func TestNodeSelector(t *testing.T) { // rest of the tests are located in specific node selectors tests t.Run("panics on unknown type", func(t *testing.T) { assert.Panics(t, func() { - _ = newNodeSelector[types.ID, RPCClient[types.ID, Head]]("unknown", nil) + _ = newNodeSelector[ID, RPCClient[ID, Head]]("unknown", nil) }) }) } diff --git a/multinode/node_selector_total_difficulty.go b/multinode/node_selector_total_difficulty.go index 718a207..b3aa47c 100644 --- a/multinode/node_selector_total_difficulty.go +++ b/multinode/node_selector_total_difficulty.go @@ -2,17 +2,15 @@ package multinode import ( "math/big" - - "github.com/smartcontractkit/chainlink-framework/types" ) type totalDifficultyNodeSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ] []Node[CHAIN_ID, RPC] func NewTotalDifficultyNodeSelector[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { return totalDifficultyNodeSelector[CHAIN_ID, RPC](nodes) diff --git a/multinode/node_selector_total_difficulty_test.go b/multinode/node_selector_total_difficulty_test.go index 1e399da..5c970b5 100644 --- a/multinode/node_selector_total_difficulty_test.go +++ b/multinode/node_selector_total_difficulty_test.go @@ -5,23 +5,21 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "github.com/smartcontractkit/chainlink-framework/types" ) func TestTotalDifficultyNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModeTotalDifficulty, nil) + selector := newNodeSelector[ID, RPCClient[ID, Head]](NodeSelectionModeTotalDifficulty, nil) assert.Equal(t, selector.Name(), NodeSelectionModeTotalDifficulty) } func TestTotalDifficultyNodeSelector(t *testing.T) { t.Parallel() - type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, nodeClient] + type nodeClient RPCClient[ID, Head] + var nodes []Node[ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1}) @@ -40,7 +38,7 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { assert.Same(t, nodes[2], selector.Select()) t.Run("stick to the same node", func(t *testing.T) { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) // fourth node is alive (same as 3rd) node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2, TotalDifficulty: big.NewInt(8)}) node.On("Order").Maybe().Return(int32(1)) @@ -51,7 +49,7 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { }) t.Run("another best node", func(t *testing.T) { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) // fifth node is alive (better than 3rd and 4th) node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(11)}) node.On("Order").Maybe().Return(int32(1)) @@ -62,13 +60,13 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { }) t.Run("nodes never update latest block number", func(t *testing.T) { - node1 := newMockNode[types.ID, nodeClient](t) + node1 := newMockNode[ID, nodeClient](t) node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1, TotalDifficulty: nil}) node1.On("Order").Maybe().Return(int32(1)) - node2 := newMockNode[types.ID, nodeClient](t) + node2 := newMockNode[ID, nodeClient](t) node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1, TotalDifficulty: nil}) node2.On("Order").Maybe().Return(int32(1)) - nodes := []Node[types.ID, nodeClient]{node1, node2} + nodes := []Node[ID, nodeClient]{node1, node2} selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) assert.Same(t, node1, selector.Select()) @@ -78,11 +76,11 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { func TestTotalDifficultyNodeSelector_None(t *testing.T) { t.Parallel() - type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, nodeClient] + type nodeClient RPCClient[ID, Head] + var nodes []Node[ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1, TotalDifficulty: nil}) @@ -100,12 +98,12 @@ func TestTotalDifficultyNodeSelector_None(t *testing.T) { func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Parallel() - type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, nodeClient] + type nodeClient RPCClient[ID, Head] + var nodes []Node[ID, nodeClient] t.Run("same td and order", func(t *testing.T) { for i := 0; i < 3; i++ { - node := newMockNode[types.ID, nodeClient](t) + node := newMockNode[ID, nodeClient](t) node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(10)}) node.On("Order").Return(int32(2)) nodes = append(nodes, node) @@ -116,61 +114,61 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { }) t.Run("same td but different order", func(t *testing.T) { - node1 := newMockNode[types.ID, nodeClient](t) + node1 := newMockNode[ID, nodeClient](t) node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(10)}) node1.On("Order").Return(int32(3)) - node2 := newMockNode[types.ID, nodeClient](t) + node2 := newMockNode[ID, nodeClient](t) node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(10)}) node2.On("Order").Return(int32(1)) - node3 := newMockNode[types.ID, nodeClient](t) + node3 := newMockNode[ID, nodeClient](t) node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(10)}) node3.On("Order").Return(int32(2)) - nodes := []Node[types.ID, nodeClient]{node1, node2, node3} + nodes := []Node[ID, nodeClient]{node1, node2, node3} selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) // Should select the second node as it has the highest priority assert.Same(t, nodes[1], selector.Select()) }) t.Run("different td but same order", func(t *testing.T) { - node1 := newMockNode[types.ID, nodeClient](t) + node1 := newMockNode[ID, nodeClient](t) node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(10)}) node1.On("Order").Maybe().Return(int32(3)) - node2 := newMockNode[types.ID, nodeClient](t) + node2 := newMockNode[ID, nodeClient](t) node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(11)}) node2.On("Order").Maybe().Return(int32(3)) - node3 := newMockNode[types.ID, nodeClient](t) + node3 := newMockNode[ID, nodeClient](t) node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(12)}) node3.On("Order").Return(int32(3)) - nodes := []Node[types.ID, nodeClient]{node1, node2, node3} + nodes := []Node[ID, nodeClient]{node1, node2, node3} selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) // Should select the third node as it has the highest td assert.Same(t, nodes[2], selector.Select()) }) t.Run("different head and different order", func(t *testing.T) { - node1 := newMockNode[types.ID, nodeClient](t) + node1 := newMockNode[ID, nodeClient](t) node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(100)}) node1.On("Order").Maybe().Return(int32(4)) - node2 := newMockNode[types.ID, nodeClient](t) + node2 := newMockNode[ID, nodeClient](t) node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(110)}) node2.On("Order").Maybe().Return(int32(5)) - node3 := newMockNode[types.ID, nodeClient](t) + node3 := newMockNode[ID, nodeClient](t) node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(110)}) node3.On("Order").Maybe().Return(int32(1)) - node4 := newMockNode[types.ID, nodeClient](t) + node4 := newMockNode[ID, nodeClient](t) node4.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(105)}) node4.On("Order").Maybe().Return(int32(2)) - nodes := []Node[types.ID, nodeClient]{node1, node2, node3, node4} + nodes := []Node[ID, nodeClient]{node1, node2, node3, node4} selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) // Should select the third node as it has the highest td and will win the priority tie-breaker assert.Same(t, nodes[2], selector.Select()) diff --git a/multinode/node_test.go b/multinode/node_test.go index 9ffa2d8..54316fd 100644 --- a/multinode/node_test.go +++ b/multinode/node_test.go @@ -8,7 +8,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-framework/multinode/mocks" - "github.com/smartcontractkit/chainlink-framework/types" ) type testNodeConfig struct { @@ -60,7 +59,7 @@ func (n testNodeConfig) DeathDeclarationDelay() time.Duration { } type testNode struct { - *node[types.ID, Head, RPCClient[types.ID, Head]] + *node[ID, Head, RPCClient[ID, Head]] } type testNodeOpts struct { @@ -71,9 +70,9 @@ type testNodeOpts struct { httpuri *url.URL name string id int - chainID types.ID + chainID ID nodeOrder int32 - rpc *mockRPCClient[types.ID, Head] + rpc *mockRPCClient[ID, Head] chainFamily string } @@ -91,17 +90,17 @@ func newTestNode(t *testing.T, opts testNodeOpts) testNode { } if opts.chainID == nil { - opts.chainID = types.RandomID() + opts.chainID = RandomID() } if opts.id == 0 { opts.id = 42 } - nodeI := NewNode[types.ID, Head, RPCClient[types.ID, Head]](opts.config, opts.chainConfig, opts.lggr, + nodeI := NewNode[ID, Head, RPCClient[ID, Head]](opts.config, opts.chainConfig, opts.lggr, opts.wsuri, opts.httpuri, opts.name, opts.id, opts.chainID, opts.nodeOrder, opts.rpc, opts.chainFamily) return testNode{ - nodeI.(*node[types.ID, Head, RPCClient[types.ID, Head]]), + nodeI.(*node[ID, Head, RPCClient[ID, Head]]), } } diff --git a/multinode/poller.go b/multinode/poller.go index 8726bb6..9db0544 100644 --- a/multinode/poller.go +++ b/multinode/poller.go @@ -6,8 +6,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" - - "github.com/smartcontractkit/chainlink-framework/types" ) // Poller is a component that polls a function at a given interval @@ -44,7 +42,7 @@ func NewPoller[ return p, channel } -var _ types.Subscription = &Poller[any]{} +var _ Subscription = &Poller[any]{} func (p *Poller[T]) start(ctx context.Context) error { p.eng.Go(p.pollingLoop) diff --git a/multinode/send_only_node.go b/multinode/send_only_node.go index 1913af4..68ef25d 100644 --- a/multinode/send_only_node.go +++ b/multinode/send_only_node.go @@ -8,12 +8,10 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" - - "github.com/smartcontractkit/chainlink-framework/types" ) type sendOnlyClient[ - CHAIN_ID types.ID, + CHAIN_ID ID, ] interface { Close() ChainID(context.Context) (CHAIN_ID, error) @@ -22,7 +20,7 @@ type sendOnlyClient[ // SendOnlyNode represents one node used as a sendonly type SendOnlyNode[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC any, ] interface { // Start may attempt to connect to the node, but should only return error for misconfiguration - never for temporary errors. @@ -42,7 +40,7 @@ type SendOnlyNode[ // It only supports sending transactions // It must use an http(s) url type sendOnlyNode[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC sendOnlyClient[CHAIN_ID], ] struct { services.StateMachine @@ -61,7 +59,7 @@ type sendOnlyNode[ // NewSendOnlyNode returns a new sendonly node func NewSendOnlyNode[ - CHAIN_ID types.ID, + CHAIN_ID ID, RPC sendOnlyClient[CHAIN_ID], ]( lggr logger.Logger, diff --git a/multinode/send_only_node_lifecycle.go b/multinode/send_only_node_lifecycle.go index 6266e14..fe88465 100644 --- a/multinode/send_only_node_lifecycle.go +++ b/multinode/send_only_node_lifecycle.go @@ -3,8 +3,6 @@ package multinode import ( "fmt" "time" - - "github.com/smartcontractkit/chainlink-framework/utils" ) // verifyLoop may only be triggered once, on Start, if initial chain ID check @@ -16,7 +14,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) verifyLoop() { ctx, cancel := s.chStop.NewCtx() defer cancel() - backoff := utils.NewRedialBackoff() + backoff := NewRedialBackoff() for { select { case <-ctx.Done(): diff --git a/multinode/send_only_node_test.go b/multinode/send_only_node_test.go index 285d083..fa239bd 100644 --- a/multinode/send_only_node_test.go +++ b/multinode/send_only_node_test.go @@ -13,8 +13,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - - "github.com/smartcontractkit/chainlink-framework/types" ) func TestNewSendOnlyNode(t *testing.T) { @@ -27,8 +25,8 @@ func TestNewSendOnlyNode(t *testing.T) { redacted := fmt.Sprintf(urlFormat, "xxxxx") lggr := logger.Test(t) name := "TestNewSendOnlyNode" - chainID := types.RandomID() - client := newMockSendOnlyClient[types.ID](t) + chainID := RandomID() + client := newMockSendOnlyClient[ID](t) node := NewSendOnlyNode(lggr, *u, name, chainID, client) assert.NotNil(t, node) @@ -43,11 +41,11 @@ func TestStartSendOnlyNode(t *testing.T) { t.Run("becomes unusable if initial dial fails", func(t *testing.T) { t.Parallel() lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) - client := newMockSendOnlyClient[types.ID](t) + client := newMockSendOnlyClient[ID](t) client.On("Close").Once() expectedError := errors.New("some http error") client.On("Dial", mock.Anything).Return(expectedError).Once() - s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.RandomID(), client) + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), RandomID(), client) defer func() { assert.NoError(t, s.Close()) }() err := s.Start(tests.Context(t)) @@ -59,10 +57,10 @@ func TestStartSendOnlyNode(t *testing.T) { t.Run("Default ChainID(0) produces warn and skips checks", func(t *testing.T) { t.Parallel() lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) - client := newMockSendOnlyClient[types.ID](t) + client := newMockSendOnlyClient[ID](t) client.On("Close").Once() client.On("Dial", mock.Anything).Return(nil).Once() - s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.NewIDFromInt(0), client) + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), NewIDFromInt(0), client) defer func() { assert.NoError(t, s.Close()) }() err := s.Start(tests.Context(t)) @@ -74,13 +72,13 @@ func TestStartSendOnlyNode(t *testing.T) { t.Run("Can recover from chainID verification failure", func(t *testing.T) { t.Parallel() lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) - client := newMockSendOnlyClient[types.ID](t) + client := newMockSendOnlyClient[ID](t) client.On("Close").Once() client.On("Dial", mock.Anything).Return(nil) expectedError := errors.New("failed to get chain ID") - chainID := types.RandomID() + chainID := RandomID() const failuresCount = 2 - client.On("ChainID", mock.Anything).Return(types.RandomID(), expectedError).Times(failuresCount) + client.On("ChainID", mock.Anything).Return(RandomID(), expectedError).Times(failuresCount) client.On("ChainID", mock.Anything).Return(chainID, nil) s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), chainID, client) @@ -98,11 +96,11 @@ func TestStartSendOnlyNode(t *testing.T) { t.Run("Can recover from chainID mismatch", func(t *testing.T) { t.Parallel() lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) - client := newMockSendOnlyClient[types.ID](t) + client := newMockSendOnlyClient[ID](t) client.On("Close").Once() client.On("Dial", mock.Anything).Return(nil).Once() - configuredChainID := types.NewIDFromInt(11) - rpcChainID := types.NewIDFromInt(20) + configuredChainID := NewIDFromInt(11) + rpcChainID := NewIDFromInt(20) const failuresCount = 2 client.On("ChainID", mock.Anything).Return(rpcChainID, nil).Times(failuresCount) client.On("ChainID", mock.Anything).Return(configuredChainID, nil) @@ -121,10 +119,10 @@ func TestStartSendOnlyNode(t *testing.T) { t.Run("Start with Random ChainID", func(t *testing.T) { t.Parallel() lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) - client := newMockSendOnlyClient[types.ID](t) + client := newMockSendOnlyClient[ID](t) client.On("Close").Once() client.On("Dial", mock.Anything).Return(nil).Once() - configuredChainID := types.RandomID() + configuredChainID := RandomID() client.On("ChainID", mock.Anything).Return(configuredChainID, nil) s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), configuredChainID, client) diff --git a/multinode/transaction_sender.go b/multinode/transaction_sender.go index c516579..49322d4 100644 --- a/multinode/transaction_sender.go +++ b/multinode/transaction_sender.go @@ -13,8 +13,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" - - "github.com/smartcontractkit/chainlink-framework/types" ) var ( @@ -38,7 +36,7 @@ type SendTxRPCClient[TX any, RESULT SendTxResult] interface { SendTransaction(ctx context.Context, tx TX) RESULT } -func NewTransactionSender[TX any, RESULT SendTxResult, CHAIN_ID types.ID, RPC SendTxRPCClient[TX, RESULT]]( +func NewTransactionSender[TX any, RESULT SendTxResult, CHAIN_ID ID, RPC SendTxRPCClient[TX, RESULT]]( lggr logger.Logger, chainID CHAIN_ID, chainFamily string, @@ -60,7 +58,7 @@ func NewTransactionSender[TX any, RESULT SendTxResult, CHAIN_ID types.ID, RPC Se } } -type TransactionSender[TX any, RESULT SendTxResult, CHAIN_ID types.ID, RPC SendTxRPCClient[TX, RESULT]] struct { +type TransactionSender[TX any, RESULT SendTxResult, CHAIN_ID ID, RPC SendTxRPCClient[TX, RESULT]] struct { services.StateMachine chainID CHAIN_ID chainFamily string diff --git a/multinode/transaction_sender_test.go b/multinode/transaction_sender_test.go index b601417..6405639 100644 --- a/multinode/transaction_sender_test.go +++ b/multinode/transaction_sender_test.go @@ -14,13 +14,12 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - "github.com/smartcontractkit/chainlink-framework/types" ) type TestSendTxRPCClient SendTxRPCClient[any, *sendTxResult] type sendTxMultiNode struct { - *MultiNode[types.ID, TestSendTxRPCClient] + *MultiNode[ID, TestSendTxRPCClient] } type sendTxRPC struct { @@ -65,14 +64,14 @@ func (rpc *sendTxRPC) SendTransaction(ctx context.Context, _ any) *sendTxResult // newTestTransactionSender returns a sendTxMultiNode and TransactionSender. // Only the TransactionSender is run via Start/Close. -func newTestTransactionSender(t *testing.T, chainID types.ID, lggr logger.Logger, - nodes []Node[types.ID, TestSendTxRPCClient], - sendOnlyNodes []SendOnlyNode[types.ID, TestSendTxRPCClient], -) (*sendTxMultiNode, *TransactionSender[any, *sendTxResult, types.ID, TestSendTxRPCClient]) { - mn := sendTxMultiNode{NewMultiNode[types.ID, TestSendTxRPCClient]( +func newTestTransactionSender(t *testing.T, chainID ID, lggr logger.Logger, + nodes []Node[ID, TestSendTxRPCClient], + sendOnlyNodes []SendOnlyNode[ID, TestSendTxRPCClient], +) (*sendTxMultiNode, *TransactionSender[any, *sendTxResult, ID, TestSendTxRPCClient]) { + mn := sendTxMultiNode{NewMultiNode[ID, TestSendTxRPCClient]( lggr, NodeSelectionModeRoundRobin, 0, nodes, sendOnlyNodes, chainID, "chainFamily", 0)} - txSender := NewTransactionSender[any, *sendTxResult, types.ID, TestSendTxRPCClient](lggr, chainID, mn.chainFamily, mn.MultiNode, NewSendTxResult, tests.TestInterval) + txSender := NewTransactionSender[any, *sendTxResult, ID, TestSendTxRPCClient](lggr, chainID, mn.chainFamily, mn.MultiNode, NewSendTxResult, tests.TestInterval) servicetest.Run(t, txSender) return &mn, txSender } @@ -87,9 +86,9 @@ func classifySendTxError(_ any, err error) SendTxReturnCode { func TestTransactionSender_SendTransaction(t *testing.T) { t.Parallel() - newNodeWithState := func(t *testing.T, state nodeState, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[types.ID, TestSendTxRPCClient] { + newNodeWithState := func(t *testing.T, state nodeState, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[ID, TestSendTxRPCClient] { rpc := newSendTxRPC(txErr, sendTxRun) - node := newMockNode[types.ID, TestSendTxRPCClient](t) + node := newMockNode[ID, TestSendTxRPCClient](t) node.On("String").Return("node name").Maybe() node.On("RPC").Return(rpc).Maybe() node.On("State").Return(state).Maybe() @@ -99,13 +98,13 @@ func TestTransactionSender_SendTransaction(t *testing.T) { return node } - newNode := func(t *testing.T, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[types.ID, TestSendTxRPCClient] { + newNode := func(t *testing.T, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[ID, TestSendTxRPCClient] { return newNodeWithState(t, nodeStateAlive, txErr, sendTxRun) } t.Run("Fails if there is no nodes available", func(t *testing.T) { lggr := logger.Test(t) - _, txSender := newTestTransactionSender(t, types.RandomID(), lggr, nil, nil) + _, txSender := newTestTransactionSender(t, RandomID(), lggr, nil, nil) result := txSender.SendTransaction(tests.Context(t), nil) assert.EqualError(t, result.Error(), ErroringNodeError.Error()) }) @@ -115,9 +114,9 @@ func TestTransactionSender_SendTransaction(t *testing.T) { mainNode := newNode(t, expectedError, nil) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) - _, txSender := newTestTransactionSender(t, types.RandomID(), lggr, - []Node[types.ID, TestSendTxRPCClient]{mainNode}, - []SendOnlyNode[types.ID, TestSendTxRPCClient]{newNode(t, errors.New("unexpected error"), nil)}) + _, txSender := newTestTransactionSender(t, RandomID(), lggr, + []Node[ID, TestSendTxRPCClient]{mainNode}, + []SendOnlyNode[ID, TestSendTxRPCClient]{newNode(t, errors.New("unexpected error"), nil)}) result := txSender.SendTransaction(tests.Context(t), nil) require.ErrorIs(t, result.Error(), expectedError) @@ -130,9 +129,9 @@ func TestTransactionSender_SendTransaction(t *testing.T) { mainNode := newNode(t, nil, nil) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) - _, txSender := newTestTransactionSender(t, types.RandomID(), lggr, - []Node[types.ID, TestSendTxRPCClient]{mainNode}, - []SendOnlyNode[types.ID, TestSendTxRPCClient]{newNode(t, errors.New("unexpected error"), nil)}) + _, txSender := newTestTransactionSender(t, RandomID(), lggr, + []Node[ID, TestSendTxRPCClient]{mainNode}, + []SendOnlyNode[ID, TestSendTxRPCClient]{newNode(t, errors.New("unexpected error"), nil)}) result := txSender.SendTransaction(tests.Context(t), nil) require.NoError(t, result.Error()) @@ -152,8 +151,8 @@ func TestTransactionSender_SendTransaction(t *testing.T) { lggr := logger.Test(t) - _, txSender := newTestTransactionSender(t, types.RandomID(), lggr, - []Node[types.ID, TestSendTxRPCClient]{mainNode}, nil) + _, txSender := newTestTransactionSender(t, RandomID(), lggr, + []Node[ID, TestSendTxRPCClient]{mainNode}, nil) requestContext, cancel := context.WithCancel(tests.Context(t)) cancel() @@ -162,7 +161,7 @@ func TestTransactionSender_SendTransaction(t *testing.T) { }) t.Run("Soft timeout stops results collection", func(t *testing.T) { - chainID := types.RandomID() + chainID := RandomID() expectedError := errors.New("transaction failed") fastNode := newNode(t, expectedError, nil) @@ -176,12 +175,12 @@ func TestTransactionSender_SendTransaction(t *testing.T) { lggr := logger.Test(t) - _, txSender := newTestTransactionSender(t, chainID, lggr, []Node[types.ID, TestSendTxRPCClient]{fastNode, slowNode}, nil) + _, txSender := newTestTransactionSender(t, chainID, lggr, []Node[ID, TestSendTxRPCClient]{fastNode, slowNode}, nil) result := txSender.SendTransaction(tests.Context(t), nil) require.EqualError(t, result.Error(), expectedError.Error()) }) t.Run("Returns success without waiting for the rest of the nodes", func(t *testing.T) { - chainID := types.RandomID() + chainID := RandomID() fastNode := newNode(t, nil, nil) // hold reply from the node till end of the test testContext, testCancel := context.WithCancel(tests.Context(t)) @@ -196,15 +195,15 @@ func TestTransactionSender_SendTransaction(t *testing.T) { }) lggr, _ := logger.TestObserved(t, zap.WarnLevel) _, txSender := newTestTransactionSender(t, chainID, lggr, - []Node[types.ID, TestSendTxRPCClient]{fastNode, slowNode}, - []SendOnlyNode[types.ID, TestSendTxRPCClient]{slowSendOnly}) + []Node[ID, TestSendTxRPCClient]{fastNode, slowNode}, + []SendOnlyNode[ID, TestSendTxRPCClient]{slowSendOnly}) result := txSender.SendTransaction(tests.Context(t), nil) require.NoError(t, result.Error()) require.Equal(t, Successful, result.Code()) }) t.Run("Fails when multinode is closed", func(t *testing.T) { - chainID := types.RandomID() + chainID := RandomID() fastNode := newNode(t, nil, nil) fastNode.On("ConfiguredChainID").Return(chainID).Maybe() // hold reply from the node till end of the test @@ -224,8 +223,8 @@ func TestTransactionSender_SendTransaction(t *testing.T) { lggr, _ := logger.TestObserved(t, zap.DebugLevel) mn, txSender := newTestTransactionSender(t, chainID, lggr, - []Node[types.ID, TestSendTxRPCClient]{fastNode, slowNode}, - []SendOnlyNode[types.ID, TestSendTxRPCClient]{slowSendOnly}) + []Node[ID, TestSendTxRPCClient]{fastNode, slowNode}, + []SendOnlyNode[ID, TestSendTxRPCClient]{slowSendOnly}) require.NoError(t, mn.Start(tests.Context(t))) require.NoError(t, mn.Close()) @@ -233,7 +232,7 @@ func TestTransactionSender_SendTransaction(t *testing.T) { require.EqualError(t, result.Error(), "service is stopped") }) t.Run("Fails when closed", func(t *testing.T) { - chainID := types.RandomID() + chainID := RandomID() fastNode := newNode(t, nil, nil) // hold reply from the node till end of the test testContext, testCancel := context.WithCancel(tests.Context(t)) @@ -247,7 +246,7 @@ func TestTransactionSender_SendTransaction(t *testing.T) { <-testContext.Done() }) - var txSender *TransactionSender[any, *sendTxResult, types.ID, TestSendTxRPCClient] + var txSender *TransactionSender[any, *sendTxResult, ID, TestSendTxRPCClient] t.Cleanup(func() { // after txSender.Close() result := txSender.SendTransaction(tests.Context(t), nil) @@ -255,27 +254,27 @@ func TestTransactionSender_SendTransaction(t *testing.T) { }) _, txSender = newTestTransactionSender(t, chainID, logger.Test(t), - []Node[types.ID, TestSendTxRPCClient]{fastNode, slowNode}, - []SendOnlyNode[types.ID, TestSendTxRPCClient]{slowSendOnly}) + []Node[ID, TestSendTxRPCClient]{fastNode, slowNode}, + []SendOnlyNode[ID, TestSendTxRPCClient]{slowSendOnly}) }) t.Run("Returns error if there is no healthy primary nodes", func(t *testing.T) { - chainID := types.RandomID() + chainID := RandomID() primary := newNodeWithState(t, nodeStateUnreachable, nil, nil) sendOnly := newNodeWithState(t, nodeStateUnreachable, nil, nil) lggr := logger.Test(t) _, txSender := newTestTransactionSender(t, chainID, lggr, - []Node[types.ID, TestSendTxRPCClient]{primary}, - []SendOnlyNode[types.ID, TestSendTxRPCClient]{sendOnly}) + []Node[ID, TestSendTxRPCClient]{primary}, + []SendOnlyNode[ID, TestSendTxRPCClient]{sendOnly}) result := txSender.SendTransaction(tests.Context(t), nil) assert.EqualError(t, result.Error(), ErroringNodeError.Error()) }) t.Run("Transaction success even if one of the nodes is unhealthy", func(t *testing.T) { - chainID := types.RandomID() + chainID := RandomID() mainNode := newNode(t, nil, nil) unexpectedCall := func(args mock.Arguments) { panic("SendTx must not be called for unhealthy node") @@ -286,8 +285,8 @@ func TestTransactionSender_SendTransaction(t *testing.T) { lggr := logger.Test(t) _, txSender := newTestTransactionSender(t, chainID, lggr, - []Node[types.ID, TestSendTxRPCClient]{mainNode, unhealthyNode}, - []SendOnlyNode[types.ID, TestSendTxRPCClient]{unhealthySendOnlyNode}) + []Node[ID, TestSendTxRPCClient]{mainNode, unhealthyNode}, + []SendOnlyNode[ID, TestSendTxRPCClient]{unhealthySendOnlyNode}) result := txSender.SendTransaction(tests.Context(t), nil) require.NoError(t, result.Error()) diff --git a/multinode/types.go b/multinode/types.go index ab5513e..d26c25e 100644 --- a/multinode/types.go +++ b/multinode/types.go @@ -2,14 +2,39 @@ package multinode import ( "context" + "fmt" "math/big" - - "github.com/smartcontractkit/chainlink-framework/types" ) +// ID represents the base type, for any chain's ID. +// It should be convertible to a string, that can uniquely identify this chain +type ID fmt.Stringer + +// StringID enables using string directly as a ChainID +type StringID string + +func (s StringID) String() string { + return string(s) +} + +// Subscription represents an event subscription where events are +// delivered on a data channel. +// This is a generic interface for Subscription to represent used by clients. +type Subscription interface { + // Unsubscribe cancels the sending of events to the data channel + // and closes the error channel. Unsubscribe should be callable multiple + // times without causing an error. + Unsubscribe() + // Err returns the subscription error channel. The error channel receives + // a value if there is an issue with the subscription (e.g. the network connection + // delivering the events has been closed). Only one value will ever be sent. + // The error channel is closed by Unsubscribe. + Err() <-chan error +} + // RPCClient includes all the necessary generalized RPC methods used by Node to perform health checks type RPCClient[ - CHAIN_ID types.ID, + CHAIN_ID ID, HEAD Head, ] interface { // ChainID - fetches ChainID from the RPC to verify that it matches config @@ -17,15 +42,15 @@ type RPCClient[ // Dial - prepares the RPC for usage. Can be called on fresh or closed RPC Dial(ctx context.Context) error // SubscribeToHeads - returns channel and subscription for new heads. - SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) + SubscribeToHeads(ctx context.Context) (<-chan HEAD, Subscription, error) // SubscribeToFinalizedHeads - returns channel and subscription for finalized heads. - SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) + SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, Subscription, error) // Ping - returns error if RPC is not reachable Ping(context.Context) error // IsSyncing - returns true if the RPC is in Syncing state and can not process calls IsSyncing(ctx context.Context) (bool, error) // UnsubscribeAllExcept - close all subscriptions except `subs` - UnsubscribeAllExcept(subs ...types.Subscription) + UnsubscribeAllExcept(subs ...Subscription) // Close - closes all subscriptions and aborts all RPC calls Close() // GetInterceptedChainInfo - returns latest and highest observed by application layer ChainInfo. diff --git a/multinode/utils.go b/multinode/utils.go new file mode 100644 index 0000000..7fcac2a --- /dev/null +++ b/multinode/utils.go @@ -0,0 +1,29 @@ +package multinode + +import ( + "math" + "math/big" + "math/rand" + "time" + + "github.com/jpillora/backoff" +) + +func RandomID() ID { + id := rand.Int63n(math.MaxInt32) + 10000 + return big.NewInt(id) +} + +func NewIDFromInt(id int64) ID { + return big.NewInt(id) +} + +// NewRedialBackoff is a standard backoff to use for redialling or reconnecting to +// unreachable network endpoints +func NewRedialBackoff() backoff.Backoff { + return backoff.Backoff{ + Min: 1 * time.Second, + Max: 15 * time.Second, + Jitter: true, + } +} diff --git a/types/chain.go b/types/chain.go deleted file mode 100644 index e928cf1..0000000 --- a/types/chain.go +++ /dev/null @@ -1,32 +0,0 @@ -package types - -import ( - "fmt" - - "github.com/smartcontractkit/chainlink-common/pkg/types" -) - -// Sequence represents the base type, for any chain's sequence object. -// It should be convertible to a string -type Sequence interface { - fmt.Stringer - Int64() int64 // needed for numeric sequence confirmation - to be removed with confirmation logic generalization: https://smartcontract-it.atlassian.net/browse/BCI-860 -} - -// ID represents the base type, for any chain's ID. -// It should be convertible to a string, that can uniquely identify this chain -type ID fmt.Stringer - -// StringID enables using string directly as a ChainID -type StringID string - -func (s StringID) String() string { - return string(s) -} - -// ChainStatusWithID compose of ChainStatus and RelayID. This is useful for -// storing the Network associated with the ChainStatus. -type ChainStatusWithID struct { - types.ChainStatus - types.RelayID -} diff --git a/types/hashable.go b/types/hashable.go deleted file mode 100644 index 2d16650..0000000 --- a/types/hashable.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -import "fmt" - -// A chain-agnostic generic interface to represent the following native types on various chains: -// PublicKey, Address, Account, BlockHash, TxHash -type Hashable interface { - fmt.Stringer - comparable - - Bytes() []byte -} diff --git a/types/head.go b/types/head.go deleted file mode 100644 index 5252c8f..0000000 --- a/types/head.go +++ /dev/null @@ -1,45 +0,0 @@ -package types - -import ( - "math/big" - "time" -) - -// Head provides access to a chain's head, as needed by the TxManager. -// This is a generic interface which ALL chains will implement. -type Head[BLOCK_HASH Hashable] interface { - // BlockNumber is the head's block number - BlockNumber() int64 - - // Timestamp the time of mining of the block - GetTimestamp() time.Time - - // ChainLength returns the length of the chain followed by recursively looking up parents - ChainLength() uint32 - - // EarliestHeadInChain traverses through parents until it finds the earliest one - EarliestHeadInChain() Head[BLOCK_HASH] - - // Parent is the head's parent block - GetParent() Head[BLOCK_HASH] - - // Hash is the head's block hash - BlockHash() BLOCK_HASH - GetParentHash() BLOCK_HASH - - // HashAtHeight returns the hash of the block at the given height, if it is in the chain. - // If not in chain, returns the zero hash - HashAtHeight(blockNum int64) BLOCK_HASH - - // HeadAtHeight returns head at specified height or an error, if one does not exist in provided chain. - HeadAtHeight(blockNum int64) (Head[BLOCK_HASH], error) - - // Returns the total difficulty of the block. For chains who do not have a concept of block - // difficulty, return 0. - BlockDifficulty() *big.Int - // IsValid returns true if the head is valid. - IsValid() bool - - // Returns the latest finalized based on finality tag or depth - LatestFinalizedHead() Head[BLOCK_HASH] -} diff --git a/types/mocks/head.go b/types/mocks/head.go deleted file mode 100644 index bc59669..0000000 --- a/types/mocks/head.go +++ /dev/null @@ -1,601 +0,0 @@ -// Code generated by mockery v2.46.3. DO NOT EDIT. - -package mocks - -import ( - big "math/big" - time "time" - - mock "github.com/stretchr/testify/mock" - - types "github.com/smartcontractkit/chainlink-framework/types" -) - -// Head is an autogenerated mock type for the Head type -type Head[BLOCK_HASH types.Hashable] struct { - mock.Mock -} - -type Head_Expecter[BLOCK_HASH types.Hashable] struct { - mock *mock.Mock -} - -func (_m *Head[BLOCK_HASH]) EXPECT() *Head_Expecter[BLOCK_HASH] { - return &Head_Expecter[BLOCK_HASH]{mock: &_m.Mock} -} - -// BlockDifficulty provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) BlockDifficulty() *big.Int { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for BlockDifficulty") - } - - var r0 *big.Int - if rf, ok := ret.Get(0).(func() *big.Int); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - return r0 -} - -// Head_BlockDifficulty_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockDifficulty' -type Head_BlockDifficulty_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// BlockDifficulty is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) BlockDifficulty() *Head_BlockDifficulty_Call[BLOCK_HASH] { - return &Head_BlockDifficulty_Call[BLOCK_HASH]{Call: _e.mock.On("BlockDifficulty")} -} - -func (_c *Head_BlockDifficulty_Call[BLOCK_HASH]) Run(run func()) *Head_BlockDifficulty_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_BlockDifficulty_Call[BLOCK_HASH]) Return(_a0 *big.Int) *Head_BlockDifficulty_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_BlockDifficulty_Call[BLOCK_HASH]) RunAndReturn(run func() *big.Int) *Head_BlockDifficulty_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// BlockHash provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) BlockHash() BLOCK_HASH { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for BlockHash") - } - - var r0 BLOCK_HASH - if rf, ok := ret.Get(0).(func() BLOCK_HASH); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(BLOCK_HASH) - } - - return r0 -} - -// Head_BlockHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockHash' -type Head_BlockHash_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// BlockHash is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) BlockHash() *Head_BlockHash_Call[BLOCK_HASH] { - return &Head_BlockHash_Call[BLOCK_HASH]{Call: _e.mock.On("BlockHash")} -} - -func (_c *Head_BlockHash_Call[BLOCK_HASH]) Run(run func()) *Head_BlockHash_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_BlockHash_Call[BLOCK_HASH]) Return(_a0 BLOCK_HASH) *Head_BlockHash_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_BlockHash_Call[BLOCK_HASH]) RunAndReturn(run func() BLOCK_HASH) *Head_BlockHash_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// BlockNumber provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) BlockNumber() int64 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for BlockNumber") - } - - var r0 int64 - if rf, ok := ret.Get(0).(func() int64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(int64) - } - - return r0 -} - -// Head_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' -type Head_BlockNumber_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// BlockNumber is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) BlockNumber() *Head_BlockNumber_Call[BLOCK_HASH] { - return &Head_BlockNumber_Call[BLOCK_HASH]{Call: _e.mock.On("BlockNumber")} -} - -func (_c *Head_BlockNumber_Call[BLOCK_HASH]) Run(run func()) *Head_BlockNumber_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_BlockNumber_Call[BLOCK_HASH]) Return(_a0 int64) *Head_BlockNumber_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_BlockNumber_Call[BLOCK_HASH]) RunAndReturn(run func() int64) *Head_BlockNumber_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// ChainLength provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) ChainLength() uint32 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for ChainLength") - } - - var r0 uint32 - if rf, ok := ret.Get(0).(func() uint32); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint32) - } - - return r0 -} - -// Head_ChainLength_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChainLength' -type Head_ChainLength_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// ChainLength is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) ChainLength() *Head_ChainLength_Call[BLOCK_HASH] { - return &Head_ChainLength_Call[BLOCK_HASH]{Call: _e.mock.On("ChainLength")} -} - -func (_c *Head_ChainLength_Call[BLOCK_HASH]) Run(run func()) *Head_ChainLength_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_ChainLength_Call[BLOCK_HASH]) Return(_a0 uint32) *Head_ChainLength_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_ChainLength_Call[BLOCK_HASH]) RunAndReturn(run func() uint32) *Head_ChainLength_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// EarliestHeadInChain provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) EarliestHeadInChain() types.Head[BLOCK_HASH] { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for EarliestHeadInChain") - } - - var r0 types.Head[BLOCK_HASH] - if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Head[BLOCK_HASH]) - } - } - - return r0 -} - -// Head_EarliestHeadInChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EarliestHeadInChain' -type Head_EarliestHeadInChain_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// EarliestHeadInChain is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) EarliestHeadInChain() *Head_EarliestHeadInChain_Call[BLOCK_HASH] { - return &Head_EarliestHeadInChain_Call[BLOCK_HASH]{Call: _e.mock.On("EarliestHeadInChain")} -} - -func (_c *Head_EarliestHeadInChain_Call[BLOCK_HASH]) Run(run func()) *Head_EarliestHeadInChain_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_EarliestHeadInChain_Call[BLOCK_HASH]) Return(_a0 types.Head[BLOCK_HASH]) *Head_EarliestHeadInChain_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_EarliestHeadInChain_Call[BLOCK_HASH]) RunAndReturn(run func() types.Head[BLOCK_HASH]) *Head_EarliestHeadInChain_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// GetParent provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) GetParent() types.Head[BLOCK_HASH] { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetParent") - } - - var r0 types.Head[BLOCK_HASH] - if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Head[BLOCK_HASH]) - } - } - - return r0 -} - -// Head_GetParent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetParent' -type Head_GetParent_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// GetParent is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) GetParent() *Head_GetParent_Call[BLOCK_HASH] { - return &Head_GetParent_Call[BLOCK_HASH]{Call: _e.mock.On("GetParent")} -} - -func (_c *Head_GetParent_Call[BLOCK_HASH]) Run(run func()) *Head_GetParent_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_GetParent_Call[BLOCK_HASH]) Return(_a0 types.Head[BLOCK_HASH]) *Head_GetParent_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_GetParent_Call[BLOCK_HASH]) RunAndReturn(run func() types.Head[BLOCK_HASH]) *Head_GetParent_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// GetParentHash provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) GetParentHash() BLOCK_HASH { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetParentHash") - } - - var r0 BLOCK_HASH - if rf, ok := ret.Get(0).(func() BLOCK_HASH); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(BLOCK_HASH) - } - - return r0 -} - -// Head_GetParentHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetParentHash' -type Head_GetParentHash_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// GetParentHash is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) GetParentHash() *Head_GetParentHash_Call[BLOCK_HASH] { - return &Head_GetParentHash_Call[BLOCK_HASH]{Call: _e.mock.On("GetParentHash")} -} - -func (_c *Head_GetParentHash_Call[BLOCK_HASH]) Run(run func()) *Head_GetParentHash_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_GetParentHash_Call[BLOCK_HASH]) Return(_a0 BLOCK_HASH) *Head_GetParentHash_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_GetParentHash_Call[BLOCK_HASH]) RunAndReturn(run func() BLOCK_HASH) *Head_GetParentHash_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// GetTimestamp provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) GetTimestamp() time.Time { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetTimestamp") - } - - var r0 time.Time - if rf, ok := ret.Get(0).(func() time.Time); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(time.Time) - } - - return r0 -} - -// Head_GetTimestamp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTimestamp' -type Head_GetTimestamp_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// GetTimestamp is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) GetTimestamp() *Head_GetTimestamp_Call[BLOCK_HASH] { - return &Head_GetTimestamp_Call[BLOCK_HASH]{Call: _e.mock.On("GetTimestamp")} -} - -func (_c *Head_GetTimestamp_Call[BLOCK_HASH]) Run(run func()) *Head_GetTimestamp_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_GetTimestamp_Call[BLOCK_HASH]) Return(_a0 time.Time) *Head_GetTimestamp_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_GetTimestamp_Call[BLOCK_HASH]) RunAndReturn(run func() time.Time) *Head_GetTimestamp_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// HashAtHeight provides a mock function with given fields: blockNum -func (_m *Head[BLOCK_HASH]) HashAtHeight(blockNum int64) BLOCK_HASH { - ret := _m.Called(blockNum) - - if len(ret) == 0 { - panic("no return value specified for HashAtHeight") - } - - var r0 BLOCK_HASH - if rf, ok := ret.Get(0).(func(int64) BLOCK_HASH); ok { - r0 = rf(blockNum) - } else { - r0 = ret.Get(0).(BLOCK_HASH) - } - - return r0 -} - -// Head_HashAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HashAtHeight' -type Head_HashAtHeight_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// HashAtHeight is a helper method to define mock.On call -// - blockNum int64 -func (_e *Head_Expecter[BLOCK_HASH]) HashAtHeight(blockNum interface{}) *Head_HashAtHeight_Call[BLOCK_HASH] { - return &Head_HashAtHeight_Call[BLOCK_HASH]{Call: _e.mock.On("HashAtHeight", blockNum)} -} - -func (_c *Head_HashAtHeight_Call[BLOCK_HASH]) Run(run func(blockNum int64)) *Head_HashAtHeight_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(int64)) - }) - return _c -} - -func (_c *Head_HashAtHeight_Call[BLOCK_HASH]) Return(_a0 BLOCK_HASH) *Head_HashAtHeight_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_HashAtHeight_Call[BLOCK_HASH]) RunAndReturn(run func(int64) BLOCK_HASH) *Head_HashAtHeight_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// HeadAtHeight provides a mock function with given fields: blockNum -func (_m *Head[BLOCK_HASH]) HeadAtHeight(blockNum int64) (types.Head[BLOCK_HASH], error) { - ret := _m.Called(blockNum) - - if len(ret) == 0 { - panic("no return value specified for HeadAtHeight") - } - - var r0 types.Head[BLOCK_HASH] - var r1 error - if rf, ok := ret.Get(0).(func(int64) (types.Head[BLOCK_HASH], error)); ok { - return rf(blockNum) - } - if rf, ok := ret.Get(0).(func(int64) types.Head[BLOCK_HASH]); ok { - r0 = rf(blockNum) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Head[BLOCK_HASH]) - } - } - - if rf, ok := ret.Get(1).(func(int64) error); ok { - r1 = rf(blockNum) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Head_HeadAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeadAtHeight' -type Head_HeadAtHeight_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// HeadAtHeight is a helper method to define mock.On call -// - blockNum int64 -func (_e *Head_Expecter[BLOCK_HASH]) HeadAtHeight(blockNum interface{}) *Head_HeadAtHeight_Call[BLOCK_HASH] { - return &Head_HeadAtHeight_Call[BLOCK_HASH]{Call: _e.mock.On("HeadAtHeight", blockNum)} -} - -func (_c *Head_HeadAtHeight_Call[BLOCK_HASH]) Run(run func(blockNum int64)) *Head_HeadAtHeight_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(int64)) - }) - return _c -} - -func (_c *Head_HeadAtHeight_Call[BLOCK_HASH]) Return(_a0 types.Head[BLOCK_HASH], _a1 error) *Head_HeadAtHeight_Call[BLOCK_HASH] { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Head_HeadAtHeight_Call[BLOCK_HASH]) RunAndReturn(run func(int64) (types.Head[BLOCK_HASH], error)) *Head_HeadAtHeight_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// IsValid provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) IsValid() bool { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for IsValid") - } - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Head_IsValid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsValid' -type Head_IsValid_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// IsValid is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) IsValid() *Head_IsValid_Call[BLOCK_HASH] { - return &Head_IsValid_Call[BLOCK_HASH]{Call: _e.mock.On("IsValid")} -} - -func (_c *Head_IsValid_Call[BLOCK_HASH]) Run(run func()) *Head_IsValid_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_IsValid_Call[BLOCK_HASH]) Return(_a0 bool) *Head_IsValid_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_IsValid_Call[BLOCK_HASH]) RunAndReturn(run func() bool) *Head_IsValid_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// LatestFinalizedHead provides a mock function with given fields: -func (_m *Head[BLOCK_HASH]) LatestFinalizedHead() types.Head[BLOCK_HASH] { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for LatestFinalizedHead") - } - - var r0 types.Head[BLOCK_HASH] - if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Head[BLOCK_HASH]) - } - } - - return r0 -} - -// Head_LatestFinalizedHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestFinalizedHead' -type Head_LatestFinalizedHead_Call[BLOCK_HASH types.Hashable] struct { - *mock.Call -} - -// LatestFinalizedHead is a helper method to define mock.On call -func (_e *Head_Expecter[BLOCK_HASH]) LatestFinalizedHead() *Head_LatestFinalizedHead_Call[BLOCK_HASH] { - return &Head_LatestFinalizedHead_Call[BLOCK_HASH]{Call: _e.mock.On("LatestFinalizedHead")} -} - -func (_c *Head_LatestFinalizedHead_Call[BLOCK_HASH]) Run(run func()) *Head_LatestFinalizedHead_Call[BLOCK_HASH] { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Head_LatestFinalizedHead_Call[BLOCK_HASH]) Return(_a0 types.Head[BLOCK_HASH]) *Head_LatestFinalizedHead_Call[BLOCK_HASH] { - _c.Call.Return(_a0) - return _c -} - -func (_c *Head_LatestFinalizedHead_Call[BLOCK_HASH]) RunAndReturn(run func() types.Head[BLOCK_HASH]) *Head_LatestFinalizedHead_Call[BLOCK_HASH] { - _c.Call.Return(run) - return _c -} - -// NewHead creates a new instance of Head. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewHead[BLOCK_HASH types.Hashable](t interface { - mock.TestingT - Cleanup(func()) -}) *Head[BLOCK_HASH] { - mock := &Head[BLOCK_HASH]{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/types/mocks/subscription.go b/types/mocks/subscription.go deleted file mode 100644 index b0b87c7..0000000 --- a/types/mocks/subscription.go +++ /dev/null @@ -1,111 +0,0 @@ -// Code generated by mockery v2.46.3. DO NOT EDIT. - -package mocks - -import mock "github.com/stretchr/testify/mock" - -// Subscription is an autogenerated mock type for the Subscription type -type Subscription struct { - mock.Mock -} - -type Subscription_Expecter struct { - mock *mock.Mock -} - -func (_m *Subscription) EXPECT() *Subscription_Expecter { - return &Subscription_Expecter{mock: &_m.Mock} -} - -// Err provides a mock function with given fields: -func (_m *Subscription) Err() <-chan error { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Err") - } - - var r0 <-chan error - if rf, ok := ret.Get(0).(func() <-chan error); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan error) - } - } - - return r0 -} - -// Subscription_Err_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Err' -type Subscription_Err_Call struct { - *mock.Call -} - -// Err is a helper method to define mock.On call -func (_e *Subscription_Expecter) Err() *Subscription_Err_Call { - return &Subscription_Err_Call{Call: _e.mock.On("Err")} -} - -func (_c *Subscription_Err_Call) Run(run func()) *Subscription_Err_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Subscription_Err_Call) Return(_a0 <-chan error) *Subscription_Err_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Subscription_Err_Call) RunAndReturn(run func() <-chan error) *Subscription_Err_Call { - _c.Call.Return(run) - return _c -} - -// Unsubscribe provides a mock function with given fields: -func (_m *Subscription) Unsubscribe() { - _m.Called() -} - -// Subscription_Unsubscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Unsubscribe' -type Subscription_Unsubscribe_Call struct { - *mock.Call -} - -// Unsubscribe is a helper method to define mock.On call -func (_e *Subscription_Expecter) Unsubscribe() *Subscription_Unsubscribe_Call { - return &Subscription_Unsubscribe_Call{Call: _e.mock.On("Unsubscribe")} -} - -func (_c *Subscription_Unsubscribe_Call) Run(run func()) *Subscription_Unsubscribe_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Subscription_Unsubscribe_Call) Return() *Subscription_Unsubscribe_Call { - _c.Call.Return() - return _c -} - -func (_c *Subscription_Unsubscribe_Call) RunAndReturn(run func()) *Subscription_Unsubscribe_Call { - _c.Call.Return(run) - return _c -} - -// NewSubscription creates a new instance of Subscription. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewSubscription(t interface { - mock.TestingT - Cleanup(func()) -}) *Subscription { - mock := &Subscription{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/types/receipt.go b/types/receipt.go deleted file mode 100644 index 01d5a72..0000000 --- a/types/receipt.go +++ /dev/null @@ -1,14 +0,0 @@ -package types - -import "math/big" - -type Receipt[TX_HASH Hashable, BLOCK_HASH Hashable] interface { - GetStatus() uint64 - GetTxHash() TX_HASH - GetBlockNumber() *big.Int - IsZero() bool - IsUnmined() bool - GetFeeUsed() uint64 - GetTransactionIndex() uint - GetBlockHash() BLOCK_HASH -} diff --git a/types/subscription.go b/types/subscription.go deleted file mode 100644 index 3c4fd4c..0000000 --- a/types/subscription.go +++ /dev/null @@ -1,16 +0,0 @@ -package types - -// Subscription represents an event subscription where events are -// delivered on a data channel. -// This is a generic interface for Subscription to represent used by clients. -type Subscription interface { - // Unsubscribe cancels the sending of events to the data channel - // and closes the error channel. Unsubscribe should be callable multiple - // times without causing an error. - Unsubscribe() - // Err returns the subscription error channel. The error channel receives - // a value if there is an issue with the subscription (e.g. the network connection - // delivering the events has been closed). Only one value will ever be sent. - // The error channel is closed by Unsubscribe. - Err() <-chan error -} diff --git a/types/test_utils.go b/types/test_utils.go deleted file mode 100644 index 40560f7..0000000 --- a/types/test_utils.go +++ /dev/null @@ -1,16 +0,0 @@ -package types - -import ( - "math" - "math/big" - "math/rand" -) - -func RandomID() ID { - id := rand.Int63n(math.MaxInt32) + 10000 - return big.NewInt(id) -} - -func NewIDFromInt(id int64) ID { - return big.NewInt(id) -} diff --git a/utils/utils.go b/utils/utils.go deleted file mode 100644 index aeaad34..0000000 --- a/utils/utils.go +++ /dev/null @@ -1,35 +0,0 @@ -package utils - -import ( - "cmp" - "slices" - "time" - - "github.com/jpillora/backoff" - "golang.org/x/exp/constraints" -) - -// NewRedialBackoff is a standard backoff to use for redialling or reconnecting to -// unreachable network endpoints -func NewRedialBackoff() backoff.Backoff { - return backoff.Backoff{ - Min: 1 * time.Second, - Max: 15 * time.Second, - Jitter: true, - } -} - -// MinFunc returns the minimum value of the given element array with respect -// to the given key function. In the event U is not a compound type (e.g a -// struct) an identity function can be provided. -func MinFunc[U any, T constraints.Ordered](elems []U, f func(U) T) T { - var min T - if len(elems) == 0 { - return min - } - - e := slices.MinFunc(elems, func(a, b U) int { - return cmp.Compare(f(a), f(b)) - }) - return f(e) -} From 11912f9d607d2fcdc725c624d6b428f946971b0b Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 10 Dec 2024 13:05:43 -0500 Subject: [PATCH 09/11] Add Tx Sender go routines leak fix --- multinode/transaction_sender.go | 37 ++++++++++++++++++++-------- multinode/transaction_sender_test.go | 21 ++++++++++++++++ 2 files changed, 48 insertions(+), 10 deletions(-) diff --git a/multinode/transaction_sender.go b/multinode/transaction_sender.go index 49322d4..c0c8a9b 100644 --- a/multinode/transaction_sender.go +++ b/multinode/transaction_sender.go @@ -91,6 +91,8 @@ type TransactionSender[TX any, RESULT SendTxResult, CHAIN_ID ID, RPC SendTxRPCCl // * Otherwise, returns any (effectively random) of the errors. func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) SendTransaction(ctx context.Context, tx TX) RESULT { var result RESULT + ctx, cancel := txSender.chStop.Ctx(ctx) + defer cancel() if !txSender.IfStarted(func() { txResults := make(chan RESULT) txResultsToReport := make(chan RESULT) @@ -101,8 +103,6 @@ func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) SendTransaction(ct if isSendOnly { txSender.wg.Add(1) go func(ctx context.Context) { - ctx, cancel := txSender.chStop.Ctx(context.WithoutCancel(ctx)) - defer cancel() defer txSender.wg.Done() // Send-only nodes' results are ignored as they tend to return false-positive responses. // Broadcast to them is necessary to speed up the propagation of TX in the network. @@ -115,8 +115,9 @@ func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) SendTransaction(ct healthyNodesNum++ primaryNodeWg.Add(1) go func(ctx context.Context) { - ctx, cancel := txSender.chStop.Ctx(context.WithoutCancel(ctx)) - defer cancel() + // Broadcasting transaction and results reporting for invariant detection are background jobs that must be detached from + // callers cancellation. + // Results reporting to SendTransaction caller must respect caller's context to avoid goroutine leak. defer primaryNodeWg.Done() r := txSender.broadcastTxAsync(ctx, rpc, tx) select { @@ -126,6 +127,8 @@ func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) SendTransaction(ct case txResults <- r: } + ctx, cancel := txSender.chStop.Ctx(context.WithoutCancel(ctx)) + defer cancel() select { case <-ctx.Done(): txSender.lggr.Debugw("Failed to send tx results to report", "err", ctx.Err()) @@ -149,8 +152,13 @@ func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) SendTransaction(ct return } + if healthyNodesNum == 0 { + result = txSender.newResult(ErroringNodeError) + return + } + txSender.wg.Add(1) - go txSender.reportSendTxAnomalies(ctx, tx, txResultsToReport) + go txSender.reportSendTxAnomalies(tx, txResultsToReport) result = txSender.collectTxResults(ctx, tx, healthyNodesNum, txResults) }) { @@ -161,6 +169,9 @@ func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) SendTransaction(ct } func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) broadcastTxAsync(ctx context.Context, rpc RPC, tx TX) RESULT { + // broadcast is a background job, so always detach from caller's cancellation + ctx, cancel := txSender.chStop.Ctx(context.WithoutCancel(ctx)) + defer cancel() result := rpc.SendTransaction(ctx, tx) txSender.lggr.Debugw("Node sent transaction", "tx", tx, "err", result.Error()) if !slices.Contains(sendTxSuccessfulCodes, result.Code()) && ctx.Err() == nil { @@ -169,7 +180,7 @@ func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) broadcastTxAsync(c return result } -func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) reportSendTxAnomalies(ctx context.Context, tx TX, txResults <-chan RESULT) { +func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) reportSendTxAnomalies(tx TX, txResults <-chan RESULT) { defer txSender.wg.Done() resultsByCode := sendTxResults[RESULT]{} // txResults eventually will be closed @@ -177,8 +188,17 @@ func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) reportSendTxAnomal resultsByCode[txResult.Code()] = append(resultsByCode[txResult.Code()], txResult) } + select { + case <-txSender.chStop: + // it's ok to receive no results if txSender is closing. Return early to prevent false reporting of invariant violation. + if len(resultsByCode) == 0 { + return + } + default: + } + _, criticalErr := aggregateTxResults[RESULT](resultsByCode) - if criticalErr != nil && ctx.Err() == nil { + if criticalErr != nil { txSender.lggr.Criticalw("observed invariant violation on SendTransaction", "tx", tx, "resultsByCode", resultsByCode, "err", criticalErr) PromMultiNodeInvariantViolations.WithLabelValues(txSender.chainFamily, txSender.chainID.String(), criticalErr.Error()).Inc() } @@ -216,9 +236,6 @@ func aggregateTxResults[RESULT any](resultsByCode sendTxResults[RESULT]) (result } func (txSender *TransactionSender[TX, RESULT, CHAIN_ID, RPC]) collectTxResults(ctx context.Context, tx TX, healthyNodesNum int, txResults <-chan RESULT) RESULT { - if healthyNodesNum == 0 { - return txSender.newResult(ErroringNodeError) - } requiredResults := int(math.Ceil(float64(healthyNodesNum) * sendTxQuorum)) errorsByCode := sendTxResults[RESULT]{} var softTimeoutChan <-chan time.Time diff --git a/multinode/transaction_sender_test.go b/multinode/transaction_sender_test.go index 6405639..62d6523 100644 --- a/multinode/transaction_sender_test.go +++ b/multinode/transaction_sender_test.go @@ -292,6 +292,27 @@ func TestTransactionSender_SendTransaction(t *testing.T) { require.NoError(t, result.Error()) require.Equal(t, Successful, result.Code()) }) + t.Run("All background jobs stop even if RPC returns result after soft timeout", func(t *testing.T) { + chainID := RandomID() + expectedError := errors.New("transaction failed") + fastNode := newNode(t, expectedError, nil) + + // hold reply from the node till SendTransaction returns result + sendTxContext, sendTxCancel := context.WithCancel(tests.Context(t)) + slowNode := newNode(t, errors.New("transaction failed"), func(_ mock.Arguments) { + <-sendTxContext.Done() + }) + + lggr := logger.Test(t) + + _, txSender := newTestTransactionSender(t, chainID, lggr, []Node[ID, TestSendTxRPCClient]{fastNode, slowNode}, nil) + result := txSender.SendTransaction(sendTxContext, nil) + sendTxCancel() + require.EqualError(t, result.Error(), expectedError.Error()) + // TxSender should stop all background go routines after SendTransaction is done and before test is done. + // Otherwise, it signals that we have a goroutine leak. + txSender.wg.Wait() + }) } func TestTransactionSender_SendTransaction_aggregateTxResults(t *testing.T) { From e4f027ed9c82cf3af8cc8068fc4b76582f23ee6f Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 12 Dec 2024 10:43:11 -0500 Subject: [PATCH 10/11] Update license to 2024 --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index e6bed04..2d7bdc8 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2018 SmartContract ChainLink Limited SEZC +Copyright (c) 2024 SmartContract ChainLink Limited SEZC Portions of this software are licensed as follows: From 9698192f48998eec3fd23eb2978721224ce34a1d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 12 Dec 2024 10:45:14 -0500 Subject: [PATCH 11/11] tidy --- go.mod | 4 +--- go.sum | 14 -------------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/go.mod b/go.mod index e30ee48..0807889 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,6 @@ require ( github.com/smartcontractkit/chainlink-common v0.3.1-0.20241127162636-07aa781ee1f4 github.com/stretchr/testify v1.10.0 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 ) require ( @@ -37,9 +36,8 @@ require ( go.opentelemetry.io/otel/trace v1.30.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.28.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/sys v0.26.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index c9289ca..e5e0ef1 100644 --- a/go.sum +++ b/go.sum @@ -8,15 +8,11 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= -github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= -github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -69,8 +65,6 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= @@ -87,16 +81,8 @@ golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=