diff --git a/Makefile b/Makefile index 9bdc2e15a..9ca974a96 100644 --- a/Makefile +++ b/Makefile @@ -263,7 +263,7 @@ docsgen-cli: curio sptool .PHONY: docsgen-cli go-generate: - $(GOCC) generate ./... + LANG=en-US $(GOCC) generate ./... .PHONY: go-generate gen: gensimple @@ -317,6 +317,9 @@ update/lotus: $(lotus_src_dir) cd $(lotus_src_dir) && git pull .PHONY: update/lotus +docker/testdb: + docker run -d --name yugabyte -p7001:7000 -p9000:9000 -p15433:15433 -p5433:5433 -p9042:9042 yugabytedb/yugabyte bin/yugabyted start --background=false + docker/lotus-all-in-one: info/lotus-all-in-one | $(lotus_src_dir) cd $(lotus_src_dir) && $(curio_docker_build_cmd) -f Dockerfile --target lotus-all-in-one \ -t $(lotus_base_image) --build-arg GOFLAGS=-tags=debug . diff --git a/alertmanager/alerts.go b/alertmanager/alerts.go index 056f7d47b..aff948111 100644 --- a/alertmanager/alerts.go +++ b/alertmanager/alerts.go @@ -107,6 +107,7 @@ func balanceCheck(al *alerts) { return } + minBalance := abi.TokenAmount(al.cfg.MinimumWalletBalance.Get()) for _, addr := range uniqueAddrs { keyAddr, err := al.api.StateAccountKey(al.ctx, addr, types.EmptyTSK) if err != nil { @@ -129,7 +130,7 @@ func balanceCheck(al *alerts) { al.alertMap[Name].err = err } - if abi.TokenAmount(al.cfg.MinimumWalletBalance).GreaterThanEqual(balance) { + if minBalance.GreaterThanEqual(balance) { ret += fmt.Sprintf("Balance for wallet %s (%s) is below 5 Fil. ", addr, keyAddr) } } diff --git a/alertmanager/plugin/pager_duty.go b/alertmanager/plugin/pager_duty.go index 20c70b549..5a04338d9 100644 --- a/alertmanager/plugin/pager_duty.go +++ b/alertmanager/plugin/pager_duty.go @@ -49,7 +49,7 @@ func (p *PagerDuty) SendAlert(data *AlertPayload) error { } payload := &pdData{ - RoutingKey: p.cfg.PageDutyIntegrationKey, + RoutingKey: p.cfg.PageDutyIntegrationKey.Get(), EventAction: "trigger", Payload: &pdPayload{ Summary: data.Summary, @@ -64,7 +64,7 @@ func (p *PagerDuty) SendAlert(data *AlertPayload) error { return fmt.Errorf("error marshaling JSON: %w", err) } - req, err := http.NewRequest("POST", p.cfg.PagerDutyEventURL, bytes.NewBuffer(jsonData)) + req, err := http.NewRequest("POST", p.cfg.PagerDutyEventURL.Get(), bytes.NewBuffer(jsonData)) if err != nil { return fmt.Errorf("error creating request: %w", err) } diff --git a/alertmanager/plugin/plugin.go b/alertmanager/plugin/plugin.go index 6068263cc..19ab2e7e0 100644 --- a/alertmanager/plugin/plugin.go +++ b/alertmanager/plugin/plugin.go @@ -24,19 +24,34 @@ type AlertPayload struct { var TestPlugins []Plugin -func LoadAlertPlugins(cfg config.CurioAlertingConfig) []Plugin { - var plugins []Plugin - if cfg.PagerDuty.Enable { - plugins = append(plugins, NewPagerDuty(cfg.PagerDuty)) +func LoadAlertPlugins(cfg config.CurioAlertingConfig) *config.Dynamic[[]Plugin] { + pluginsDynamic := config.NewDynamic([]Plugin{}) + collectPlugins := func() []Plugin { + var plugins []Plugin + if cfg.PagerDuty.Enable.Get() { + plugins = append(plugins, NewPagerDuty(cfg.PagerDuty)) + } + if cfg.PrometheusAlertManager.Enable.Get() { + plugins = append(plugins, NewPrometheusAlertManager(cfg.PrometheusAlertManager)) + } + if cfg.SlackWebhook.Enable.Get() { + plugins = append(plugins, NewSlackWebhook(cfg.SlackWebhook)) + } + if len(TestPlugins) > 0 { + plugins = append(plugins, TestPlugins...) + } + return plugins } - if cfg.PrometheusAlertManager.Enable { - plugins = append(plugins, NewPrometheusAlertManager(cfg.PrometheusAlertManager)) - } - if cfg.SlackWebhook.Enable { - plugins = append(plugins, NewSlackWebhook(cfg.SlackWebhook)) - } - if len(TestPlugins) > 0 { - plugins = append(plugins, TestPlugins...) - } - return plugins + pluginsDynamic.Set(collectPlugins()) + cfg.PagerDuty.Enable.OnChange(func() { + pluginsDynamic.Set(collectPlugins()) + }) + cfg.PrometheusAlertManager.Enable.OnChange(func() { + pluginsDynamic.Set(collectPlugins()) + }) + cfg.SlackWebhook.Enable.OnChange(func() { + pluginsDynamic.Set(collectPlugins()) + }) + + return pluginsDynamic } diff --git a/alertmanager/plugin/prometheus_alertmanager.go b/alertmanager/plugin/prometheus_alertmanager.go index 0784da96a..a587c5bb3 100644 --- a/alertmanager/plugin/prometheus_alertmanager.go +++ b/alertmanager/plugin/prometheus_alertmanager.go @@ -60,7 +60,7 @@ func (p *PrometheusAlertManager) SendAlert(data *AlertPayload) error { if err != nil { return fmt.Errorf("error marshaling JSON: %w", err) } - req, err := http.NewRequest("POST", p.cfg.AlertManagerURL, bytes.NewBuffer(jsonData)) + req, err := http.NewRequest("POST", p.cfg.AlertManagerURL.Get(), bytes.NewBuffer(jsonData)) if err != nil { return fmt.Errorf("error creating request: %w", err) } diff --git a/alertmanager/plugin/slack_webhook.go b/alertmanager/plugin/slack_webhook.go index b524a036f..182a5786f 100644 --- a/alertmanager/plugin/slack_webhook.go +++ b/alertmanager/plugin/slack_webhook.go @@ -127,7 +127,7 @@ func (s *SlackWebhook) SendAlert(data *AlertPayload) error { return xerrors.Errorf("Error marshaling JSON: %w", err) } - req, err := http.NewRequest("POST", s.cfg.WebHookURL, bytes.NewBuffer(jsonData)) + req, err := http.NewRequest("POST", s.cfg.WebHookURL.Get(), bytes.NewBuffer(jsonData)) if err != nil { return fmt.Errorf("error creating request: %w", err) } diff --git a/alertmanager/task_alert.go b/alertmanager/task_alert.go index 564a71e5b..2cabce325 100644 --- a/alertmanager/task_alert.go +++ b/alertmanager/task_alert.go @@ -49,7 +49,7 @@ type AlertTask struct { api AlertAPI cfg config.CurioAlertingConfig db *harmonydb.DB - plugins []plugin.Plugin + plugins *config.Dynamic[[]plugin.Plugin] al *curioalerting.AlertingSystem } @@ -95,7 +95,7 @@ func NewAlertTask( } func (a *AlertTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - if len(a.plugins) == 0 { + if len(a.plugins.Get()) == 0 { log.Warnf("No alert plugins enabled, not sending an alert") return true, nil } @@ -148,7 +148,7 @@ func (a *AlertTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done } var errs []error - for _, ap := range a.plugins { + for _, ap := range a.plugins.Get() { err = ap.SendAlert(payloadData) if err != nil { log.Errorf("Error sending alert: %s", err) diff --git a/api/ethclient.go b/api/ethclient.go new file mode 100644 index 000000000..b5bcb8a57 --- /dev/null +++ b/api/ethclient.go @@ -0,0 +1,53 @@ +package api + +import ( + "context" + mathbig "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + erpc "github.com/ethereum/go-ethereum/rpc" +) + +type EthClientInterface interface { + ChainID(ctx context.Context) (*mathbig.Int, error) + BlockByHash(ctx context.Context, hash common.Hash) (*ethtypes.Block, error) + BlockByNumber(ctx context.Context, number *mathbig.Int) (*ethtypes.Block, error) + BlockNumber(ctx context.Context) (uint64, error) + PeerCount(ctx context.Context) (uint64, error) + BlockReceipts(ctx context.Context, blockNrOrHash erpc.BlockNumberOrHash) ([]*ethtypes.Receipt, error) + HeaderByHash(ctx context.Context, hash common.Hash) (*ethtypes.Header, error) + HeaderByNumber(ctx context.Context, number *mathbig.Int) (*ethtypes.Header, error) + TransactionByHash(ctx context.Context, hash common.Hash) (tx *ethtypes.Transaction, isPending bool, err error) + TransactionSender(ctx context.Context, tx *ethtypes.Transaction, block common.Hash, index uint) (common.Address, error) + TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) + TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*ethtypes.Transaction, error) + TransactionReceipt(ctx context.Context, txHash common.Hash) (*ethtypes.Receipt, error) + SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) + SubscribeNewHead(ctx context.Context, ch chan<- *ethtypes.Header) (ethereum.Subscription, error) + NetworkID(ctx context.Context) (*mathbig.Int, error) + BalanceAt(ctx context.Context, account common.Address, blockNumber *mathbig.Int) (*mathbig.Int, error) + BalanceAtHash(ctx context.Context, account common.Address, blockHash common.Hash) (*mathbig.Int, error) + StorageAt(ctx context.Context, account common.Address, key common.Hash, blockNumber *mathbig.Int) ([]byte, error) + StorageAtHash(ctx context.Context, account common.Address, key common.Hash, blockHash common.Hash) ([]byte, error) + CodeAt(ctx context.Context, account common.Address, blockNumber *mathbig.Int) ([]byte, error) + CodeAtHash(ctx context.Context, account common.Address, blockHash common.Hash) ([]byte, error) + NonceAt(ctx context.Context, account common.Address, blockNumber *mathbig.Int) (uint64, error) + NonceAtHash(ctx context.Context, account common.Address, blockHash common.Hash) (uint64, error) + FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]ethtypes.Log, error) + SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- ethtypes.Log) (ethereum.Subscription, error) + PendingBalanceAt(ctx context.Context, account common.Address) (*mathbig.Int, error) + PendingStorageAt(ctx context.Context, account common.Address, key common.Hash) ([]byte, error) + PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) + PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) + PendingTransactionCount(ctx context.Context) (uint, error) + CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *mathbig.Int) ([]byte, error) + CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) + PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) + SuggestGasPrice(ctx context.Context) (*mathbig.Int, error) + SuggestGasTipCap(ctx context.Context) (*mathbig.Int, error) + FeeHistory(ctx context.Context, blockCount uint64, lastBlock *mathbig.Int, rewardPercentiles []float64) (*ethereum.FeeHistory, error) + EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) + SendTransaction(ctx context.Context, tx *ethtypes.Transaction) error +} diff --git a/api/gen/api/proxygen.go b/api/gen/api/proxygen.go index af45ff330..25bd22be3 100644 --- a/api/gen/api/proxygen.go +++ b/api/gen/api/proxygen.go @@ -103,10 +103,13 @@ func typeName(e ast.Expr, pkg string) (string, error) { if err != nil { return "", err } - if t.Dir == ast.SEND { - subt = "->chan " + subt - } else { + switch t.Dir { + case ast.SEND: + subt = "chan<- " + subt + case ast.RECV: subt = "<-chan " + subt + default: + subt = "chan " + subt } return subt, nil default: @@ -217,20 +220,26 @@ func generate(path, pkg, outpkg, outfile string) error { defRes := "" if len(results) > 1 { - defRes = results[0] - switch { - case defRes[0] == '*' || defRes[0] == '<', defRes == "interface{}": - defRes = "nil" - case defRes == "bool": - defRes = "false" - case defRes == "string": - defRes = `""` - case defRes == "int", defRes == "int64", defRes == "uint64", defRes == "uint": - defRes = "0" - default: - defRes = "*new(" + defRes + ")" + // Generate default values for all non-error return values + var defaults []string + for i := 0; i < len(results)-1; i++ { + r := results[i] + var def string + switch { + case r[0] == '*' || r[0] == '<', r == "interface{}", strings.HasPrefix(r, "chan"): + def = "nil" + case r == "bool": + def = "false" + case r == "string": + def = `""` + case r == "int", r == "int64", r == "uint64", r == "uint": + def = "0" + default: + def = "*new(" + r + ")" + } + defaults = append(defaults, def) } - defRes += ", " + defRes = strings.Join(defaults, ", ") + ", " } info.Methods[mname] = &methodInfo{ @@ -275,7 +284,7 @@ func generate(path, pkg, outpkg, outfile string) error { return err } - err = doTemplate(w, m, `// Code generated by github.com/filecoin-project/curio/gen/api. DO NOT EDIT. + err = doTemplate(w, m, `// Code generated by github.com/filecoin-project/curio/api/gen. DO NOT EDIT. package {{.OutPkg}} diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 114481675..77b2c1c62 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -1,13 +1,18 @@ -// Code generated by github.com/filecoin-project/curio/gen/api. DO NOT EDIT. +// Code generated by github.com/filecoin-project/curio/api/gen. DO NOT EDIT. package api import ( "context" + mathbig "math/big" "net/http" "net/url" "reflect" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + erpc "github.com/ethereum/go-ethereum/rpc" "github.com/google/uuid" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" @@ -236,6 +241,93 @@ type CurioChainRPCMethods struct { type CurioChainRPCStub struct { } +type EthClientInterfaceStruct struct { + Internal EthClientInterfaceMethods +} + +type EthClientInterfaceMethods struct { + BalanceAt func(p0 context.Context, p1 common.Address, p2 *mathbig.Int) (*mathbig.Int, error) `` + + BalanceAtHash func(p0 context.Context, p1 common.Address, p2 common.Hash) (*mathbig.Int, error) `` + + BlockByHash func(p0 context.Context, p1 common.Hash) (*ethtypes.Block, error) `` + + BlockByNumber func(p0 context.Context, p1 *mathbig.Int) (*ethtypes.Block, error) `` + + BlockNumber func(p0 context.Context) (uint64, error) `` + + BlockReceipts func(p0 context.Context, p1 erpc.BlockNumberOrHash) ([]*ethtypes.Receipt, error) `` + + CallContract func(p0 context.Context, p1 ethereum.CallMsg, p2 *mathbig.Int) ([]byte, error) `` + + CallContractAtHash func(p0 context.Context, p1 ethereum.CallMsg, p2 common.Hash) ([]byte, error) `` + + ChainID func(p0 context.Context) (*mathbig.Int, error) `` + + CodeAt func(p0 context.Context, p1 common.Address, p2 *mathbig.Int) ([]byte, error) `` + + CodeAtHash func(p0 context.Context, p1 common.Address, p2 common.Hash) ([]byte, error) `` + + EstimateGas func(p0 context.Context, p1 ethereum.CallMsg) (uint64, error) `` + + FeeHistory func(p0 context.Context, p1 uint64, p2 *mathbig.Int, p3 []float64) (*ethereum.FeeHistory, error) `` + + FilterLogs func(p0 context.Context, p1 ethereum.FilterQuery) ([]ethtypes.Log, error) `` + + HeaderByHash func(p0 context.Context, p1 common.Hash) (*ethtypes.Header, error) `` + + HeaderByNumber func(p0 context.Context, p1 *mathbig.Int) (*ethtypes.Header, error) `` + + NetworkID func(p0 context.Context) (*mathbig.Int, error) `` + + NonceAt func(p0 context.Context, p1 common.Address, p2 *mathbig.Int) (uint64, error) `` + + NonceAtHash func(p0 context.Context, p1 common.Address, p2 common.Hash) (uint64, error) `` + + PeerCount func(p0 context.Context) (uint64, error) `` + + PendingBalanceAt func(p0 context.Context, p1 common.Address) (*mathbig.Int, error) `` + + PendingCallContract func(p0 context.Context, p1 ethereum.CallMsg) ([]byte, error) `` + + PendingCodeAt func(p0 context.Context, p1 common.Address) ([]byte, error) `` + + PendingNonceAt func(p0 context.Context, p1 common.Address) (uint64, error) `` + + PendingStorageAt func(p0 context.Context, p1 common.Address, p2 common.Hash) ([]byte, error) `` + + PendingTransactionCount func(p0 context.Context) (uint, error) `` + + SendTransaction func(p0 context.Context, p1 *ethtypes.Transaction) error `` + + StorageAt func(p0 context.Context, p1 common.Address, p2 common.Hash, p3 *mathbig.Int) ([]byte, error) `` + + StorageAtHash func(p0 context.Context, p1 common.Address, p2 common.Hash, p3 common.Hash) ([]byte, error) `` + + SubscribeFilterLogs func(p0 context.Context, p1 ethereum.FilterQuery, p2 chan<- ethtypes.Log) (ethereum.Subscription, error) `` + + SubscribeNewHead func(p0 context.Context, p1 chan<- *ethtypes.Header) (ethereum.Subscription, error) `` + + SuggestGasPrice func(p0 context.Context) (*mathbig.Int, error) `` + + SuggestGasTipCap func(p0 context.Context) (*mathbig.Int, error) `` + + SyncProgress func(p0 context.Context) (*ethereum.SyncProgress, error) `` + + TransactionByHash func(p0 context.Context, p1 common.Hash) (*ethtypes.Transaction, bool, error) `` + + TransactionCount func(p0 context.Context, p1 common.Hash) (uint, error) `` + + TransactionInBlock func(p0 context.Context, p1 common.Hash, p2 uint) (*ethtypes.Transaction, error) `` + + TransactionReceipt func(p0 context.Context, p1 common.Hash) (*ethtypes.Receipt, error) `` + + TransactionSender func(p0 context.Context, p1 *ethtypes.Transaction, p2 common.Hash, p3 uint) (common.Address, error) `` +} + +type EthClientInterfaceStub struct { +} + func (s *CurioStruct) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 lpiece.PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (api.SectorOffset, error) { if s.Internal.AllocatePieceToSector == nil { return *new(api.SectorOffset), ErrNotSupported @@ -1226,5 +1318,435 @@ func (s *CurioChainRPCStub) WalletSignMessage(p0 context.Context, p1 address.Add return nil, ErrNotSupported } +func (s *EthClientInterfaceStruct) BalanceAt(p0 context.Context, p1 common.Address, p2 *mathbig.Int) (*mathbig.Int, error) { + if s.Internal.BalanceAt == nil { + return nil, ErrNotSupported + } + return s.Internal.BalanceAt(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) BalanceAt(p0 context.Context, p1 common.Address, p2 *mathbig.Int) (*mathbig.Int, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) BalanceAtHash(p0 context.Context, p1 common.Address, p2 common.Hash) (*mathbig.Int, error) { + if s.Internal.BalanceAtHash == nil { + return nil, ErrNotSupported + } + return s.Internal.BalanceAtHash(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) BalanceAtHash(p0 context.Context, p1 common.Address, p2 common.Hash) (*mathbig.Int, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) BlockByHash(p0 context.Context, p1 common.Hash) (*ethtypes.Block, error) { + if s.Internal.BlockByHash == nil { + return nil, ErrNotSupported + } + return s.Internal.BlockByHash(p0, p1) +} + +func (s *EthClientInterfaceStub) BlockByHash(p0 context.Context, p1 common.Hash) (*ethtypes.Block, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) BlockByNumber(p0 context.Context, p1 *mathbig.Int) (*ethtypes.Block, error) { + if s.Internal.BlockByNumber == nil { + return nil, ErrNotSupported + } + return s.Internal.BlockByNumber(p0, p1) +} + +func (s *EthClientInterfaceStub) BlockByNumber(p0 context.Context, p1 *mathbig.Int) (*ethtypes.Block, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) BlockNumber(p0 context.Context) (uint64, error) { + if s.Internal.BlockNumber == nil { + return 0, ErrNotSupported + } + return s.Internal.BlockNumber(p0) +} + +func (s *EthClientInterfaceStub) BlockNumber(p0 context.Context) (uint64, error) { + return 0, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) BlockReceipts(p0 context.Context, p1 erpc.BlockNumberOrHash) ([]*ethtypes.Receipt, error) { + if s.Internal.BlockReceipts == nil { + return *new([]*ethtypes.Receipt), ErrNotSupported + } + return s.Internal.BlockReceipts(p0, p1) +} + +func (s *EthClientInterfaceStub) BlockReceipts(p0 context.Context, p1 erpc.BlockNumberOrHash) ([]*ethtypes.Receipt, error) { + return *new([]*ethtypes.Receipt), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) CallContract(p0 context.Context, p1 ethereum.CallMsg, p2 *mathbig.Int) ([]byte, error) { + if s.Internal.CallContract == nil { + return *new([]byte), ErrNotSupported + } + return s.Internal.CallContract(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) CallContract(p0 context.Context, p1 ethereum.CallMsg, p2 *mathbig.Int) ([]byte, error) { + return *new([]byte), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) CallContractAtHash(p0 context.Context, p1 ethereum.CallMsg, p2 common.Hash) ([]byte, error) { + if s.Internal.CallContractAtHash == nil { + return *new([]byte), ErrNotSupported + } + return s.Internal.CallContractAtHash(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) CallContractAtHash(p0 context.Context, p1 ethereum.CallMsg, p2 common.Hash) ([]byte, error) { + return *new([]byte), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) ChainID(p0 context.Context) (*mathbig.Int, error) { + if s.Internal.ChainID == nil { + return nil, ErrNotSupported + } + return s.Internal.ChainID(p0) +} + +func (s *EthClientInterfaceStub) ChainID(p0 context.Context) (*mathbig.Int, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) CodeAt(p0 context.Context, p1 common.Address, p2 *mathbig.Int) ([]byte, error) { + if s.Internal.CodeAt == nil { + return *new([]byte), ErrNotSupported + } + return s.Internal.CodeAt(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) CodeAt(p0 context.Context, p1 common.Address, p2 *mathbig.Int) ([]byte, error) { + return *new([]byte), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) CodeAtHash(p0 context.Context, p1 common.Address, p2 common.Hash) ([]byte, error) { + if s.Internal.CodeAtHash == nil { + return *new([]byte), ErrNotSupported + } + return s.Internal.CodeAtHash(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) CodeAtHash(p0 context.Context, p1 common.Address, p2 common.Hash) ([]byte, error) { + return *new([]byte), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) EstimateGas(p0 context.Context, p1 ethereum.CallMsg) (uint64, error) { + if s.Internal.EstimateGas == nil { + return 0, ErrNotSupported + } + return s.Internal.EstimateGas(p0, p1) +} + +func (s *EthClientInterfaceStub) EstimateGas(p0 context.Context, p1 ethereum.CallMsg) (uint64, error) { + return 0, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) FeeHistory(p0 context.Context, p1 uint64, p2 *mathbig.Int, p3 []float64) (*ethereum.FeeHistory, error) { + if s.Internal.FeeHistory == nil { + return nil, ErrNotSupported + } + return s.Internal.FeeHistory(p0, p1, p2, p3) +} + +func (s *EthClientInterfaceStub) FeeHistory(p0 context.Context, p1 uint64, p2 *mathbig.Int, p3 []float64) (*ethereum.FeeHistory, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) FilterLogs(p0 context.Context, p1 ethereum.FilterQuery) ([]ethtypes.Log, error) { + if s.Internal.FilterLogs == nil { + return *new([]ethtypes.Log), ErrNotSupported + } + return s.Internal.FilterLogs(p0, p1) +} + +func (s *EthClientInterfaceStub) FilterLogs(p0 context.Context, p1 ethereum.FilterQuery) ([]ethtypes.Log, error) { + return *new([]ethtypes.Log), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) HeaderByHash(p0 context.Context, p1 common.Hash) (*ethtypes.Header, error) { + if s.Internal.HeaderByHash == nil { + return nil, ErrNotSupported + } + return s.Internal.HeaderByHash(p0, p1) +} + +func (s *EthClientInterfaceStub) HeaderByHash(p0 context.Context, p1 common.Hash) (*ethtypes.Header, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) HeaderByNumber(p0 context.Context, p1 *mathbig.Int) (*ethtypes.Header, error) { + if s.Internal.HeaderByNumber == nil { + return nil, ErrNotSupported + } + return s.Internal.HeaderByNumber(p0, p1) +} + +func (s *EthClientInterfaceStub) HeaderByNumber(p0 context.Context, p1 *mathbig.Int) (*ethtypes.Header, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) NetworkID(p0 context.Context) (*mathbig.Int, error) { + if s.Internal.NetworkID == nil { + return nil, ErrNotSupported + } + return s.Internal.NetworkID(p0) +} + +func (s *EthClientInterfaceStub) NetworkID(p0 context.Context) (*mathbig.Int, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) NonceAt(p0 context.Context, p1 common.Address, p2 *mathbig.Int) (uint64, error) { + if s.Internal.NonceAt == nil { + return 0, ErrNotSupported + } + return s.Internal.NonceAt(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) NonceAt(p0 context.Context, p1 common.Address, p2 *mathbig.Int) (uint64, error) { + return 0, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) NonceAtHash(p0 context.Context, p1 common.Address, p2 common.Hash) (uint64, error) { + if s.Internal.NonceAtHash == nil { + return 0, ErrNotSupported + } + return s.Internal.NonceAtHash(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) NonceAtHash(p0 context.Context, p1 common.Address, p2 common.Hash) (uint64, error) { + return 0, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) PeerCount(p0 context.Context) (uint64, error) { + if s.Internal.PeerCount == nil { + return 0, ErrNotSupported + } + return s.Internal.PeerCount(p0) +} + +func (s *EthClientInterfaceStub) PeerCount(p0 context.Context) (uint64, error) { + return 0, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) PendingBalanceAt(p0 context.Context, p1 common.Address) (*mathbig.Int, error) { + if s.Internal.PendingBalanceAt == nil { + return nil, ErrNotSupported + } + return s.Internal.PendingBalanceAt(p0, p1) +} + +func (s *EthClientInterfaceStub) PendingBalanceAt(p0 context.Context, p1 common.Address) (*mathbig.Int, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) PendingCallContract(p0 context.Context, p1 ethereum.CallMsg) ([]byte, error) { + if s.Internal.PendingCallContract == nil { + return *new([]byte), ErrNotSupported + } + return s.Internal.PendingCallContract(p0, p1) +} + +func (s *EthClientInterfaceStub) PendingCallContract(p0 context.Context, p1 ethereum.CallMsg) ([]byte, error) { + return *new([]byte), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) PendingCodeAt(p0 context.Context, p1 common.Address) ([]byte, error) { + if s.Internal.PendingCodeAt == nil { + return *new([]byte), ErrNotSupported + } + return s.Internal.PendingCodeAt(p0, p1) +} + +func (s *EthClientInterfaceStub) PendingCodeAt(p0 context.Context, p1 common.Address) ([]byte, error) { + return *new([]byte), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) PendingNonceAt(p0 context.Context, p1 common.Address) (uint64, error) { + if s.Internal.PendingNonceAt == nil { + return 0, ErrNotSupported + } + return s.Internal.PendingNonceAt(p0, p1) +} + +func (s *EthClientInterfaceStub) PendingNonceAt(p0 context.Context, p1 common.Address) (uint64, error) { + return 0, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) PendingStorageAt(p0 context.Context, p1 common.Address, p2 common.Hash) ([]byte, error) { + if s.Internal.PendingStorageAt == nil { + return *new([]byte), ErrNotSupported + } + return s.Internal.PendingStorageAt(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) PendingStorageAt(p0 context.Context, p1 common.Address, p2 common.Hash) ([]byte, error) { + return *new([]byte), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) PendingTransactionCount(p0 context.Context) (uint, error) { + if s.Internal.PendingTransactionCount == nil { + return 0, ErrNotSupported + } + return s.Internal.PendingTransactionCount(p0) +} + +func (s *EthClientInterfaceStub) PendingTransactionCount(p0 context.Context) (uint, error) { + return 0, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) SendTransaction(p0 context.Context, p1 *ethtypes.Transaction) error { + if s.Internal.SendTransaction == nil { + return ErrNotSupported + } + return s.Internal.SendTransaction(p0, p1) +} + +func (s *EthClientInterfaceStub) SendTransaction(p0 context.Context, p1 *ethtypes.Transaction) error { + return ErrNotSupported +} + +func (s *EthClientInterfaceStruct) StorageAt(p0 context.Context, p1 common.Address, p2 common.Hash, p3 *mathbig.Int) ([]byte, error) { + if s.Internal.StorageAt == nil { + return *new([]byte), ErrNotSupported + } + return s.Internal.StorageAt(p0, p1, p2, p3) +} + +func (s *EthClientInterfaceStub) StorageAt(p0 context.Context, p1 common.Address, p2 common.Hash, p3 *mathbig.Int) ([]byte, error) { + return *new([]byte), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) StorageAtHash(p0 context.Context, p1 common.Address, p2 common.Hash, p3 common.Hash) ([]byte, error) { + if s.Internal.StorageAtHash == nil { + return *new([]byte), ErrNotSupported + } + return s.Internal.StorageAtHash(p0, p1, p2, p3) +} + +func (s *EthClientInterfaceStub) StorageAtHash(p0 context.Context, p1 common.Address, p2 common.Hash, p3 common.Hash) ([]byte, error) { + return *new([]byte), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) SubscribeFilterLogs(p0 context.Context, p1 ethereum.FilterQuery, p2 chan<- ethtypes.Log) (ethereum.Subscription, error) { + if s.Internal.SubscribeFilterLogs == nil { + return *new(ethereum.Subscription), ErrNotSupported + } + return s.Internal.SubscribeFilterLogs(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) SubscribeFilterLogs(p0 context.Context, p1 ethereum.FilterQuery, p2 chan<- ethtypes.Log) (ethereum.Subscription, error) { + return *new(ethereum.Subscription), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) SubscribeNewHead(p0 context.Context, p1 chan<- *ethtypes.Header) (ethereum.Subscription, error) { + if s.Internal.SubscribeNewHead == nil { + return *new(ethereum.Subscription), ErrNotSupported + } + return s.Internal.SubscribeNewHead(p0, p1) +} + +func (s *EthClientInterfaceStub) SubscribeNewHead(p0 context.Context, p1 chan<- *ethtypes.Header) (ethereum.Subscription, error) { + return *new(ethereum.Subscription), ErrNotSupported +} + +func (s *EthClientInterfaceStruct) SuggestGasPrice(p0 context.Context) (*mathbig.Int, error) { + if s.Internal.SuggestGasPrice == nil { + return nil, ErrNotSupported + } + return s.Internal.SuggestGasPrice(p0) +} + +func (s *EthClientInterfaceStub) SuggestGasPrice(p0 context.Context) (*mathbig.Int, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) SuggestGasTipCap(p0 context.Context) (*mathbig.Int, error) { + if s.Internal.SuggestGasTipCap == nil { + return nil, ErrNotSupported + } + return s.Internal.SuggestGasTipCap(p0) +} + +func (s *EthClientInterfaceStub) SuggestGasTipCap(p0 context.Context) (*mathbig.Int, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) SyncProgress(p0 context.Context) (*ethereum.SyncProgress, error) { + if s.Internal.SyncProgress == nil { + return nil, ErrNotSupported + } + return s.Internal.SyncProgress(p0) +} + +func (s *EthClientInterfaceStub) SyncProgress(p0 context.Context) (*ethereum.SyncProgress, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) TransactionByHash(p0 context.Context, p1 common.Hash) (*ethtypes.Transaction, bool, error) { + if s.Internal.TransactionByHash == nil { + return nil, false, ErrNotSupported + } + return s.Internal.TransactionByHash(p0, p1) +} + +func (s *EthClientInterfaceStub) TransactionByHash(p0 context.Context, p1 common.Hash) (*ethtypes.Transaction, bool, error) { + return nil, false, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) TransactionCount(p0 context.Context, p1 common.Hash) (uint, error) { + if s.Internal.TransactionCount == nil { + return 0, ErrNotSupported + } + return s.Internal.TransactionCount(p0, p1) +} + +func (s *EthClientInterfaceStub) TransactionCount(p0 context.Context, p1 common.Hash) (uint, error) { + return 0, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) TransactionInBlock(p0 context.Context, p1 common.Hash, p2 uint) (*ethtypes.Transaction, error) { + if s.Internal.TransactionInBlock == nil { + return nil, ErrNotSupported + } + return s.Internal.TransactionInBlock(p0, p1, p2) +} + +func (s *EthClientInterfaceStub) TransactionInBlock(p0 context.Context, p1 common.Hash, p2 uint) (*ethtypes.Transaction, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) TransactionReceipt(p0 context.Context, p1 common.Hash) (*ethtypes.Receipt, error) { + if s.Internal.TransactionReceipt == nil { + return nil, ErrNotSupported + } + return s.Internal.TransactionReceipt(p0, p1) +} + +func (s *EthClientInterfaceStub) TransactionReceipt(p0 context.Context, p1 common.Hash) (*ethtypes.Receipt, error) { + return nil, ErrNotSupported +} + +func (s *EthClientInterfaceStruct) TransactionSender(p0 context.Context, p1 *ethtypes.Transaction, p2 common.Hash, p3 uint) (common.Address, error) { + if s.Internal.TransactionSender == nil { + return *new(common.Address), ErrNotSupported + } + return s.Internal.TransactionSender(p0, p1, p2, p3) +} + +func (s *EthClientInterfaceStub) TransactionSender(p0 context.Context, p1 *ethtypes.Transaction, p2 common.Hash, p3 uint) (common.Address, error) { + return *new(common.Address), ErrNotSupported +} + var _ Curio = new(CurioStruct) var _ CurioChainRPC = new(CurioChainRPCStruct) +var _ EthClientInterface = new(EthClientInterfaceStruct) diff --git a/build/openrpc/curio.json b/build/openrpc/curio.json index 63a827cd0..692158a80 100644 --- a/build/openrpc/curio.json +++ b/build/openrpc/curio.json @@ -312,7 +312,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L340" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L475" } }, { @@ -335,7 +335,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L351" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L486" } }, { @@ -400,7 +400,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L362" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L497" } }, { @@ -544,7 +544,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L373" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L508" } }, { @@ -580,7 +580,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L384" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L519" } }, { @@ -634,7 +634,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L395" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L530" } }, { @@ -657,7 +657,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L406" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L541" } }, { @@ -696,7 +696,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L417" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L552" } }, { @@ -735,7 +735,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L428" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L563" } }, { @@ -929,7 +929,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L439" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L574" } }, { @@ -990,7 +990,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L450" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L585" } }, { @@ -1122,7 +1122,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L461" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L596" } }, { @@ -1256,7 +1256,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L472" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L607" } }, { @@ -1310,7 +1310,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L483" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L618" } }, { @@ -1344,7 +1344,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L494" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L629" } }, { @@ -1398,7 +1398,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L505" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L640" } }, { @@ -1475,7 +1475,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L516" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L651" } }, { @@ -1498,7 +1498,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L527" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L662" } }, { @@ -1536,7 +1536,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L538" + "url": "https://github.com/filecoin-project/curio/blob/master/api/proxy_gen.go#L673" } } ] diff --git a/cmd/curio/guidedsetup/guidedsetup.go b/cmd/curio/guidedsetup/guidedsetup.go index 6a37002af..99cefcae2 100644 --- a/cmd/curio/guidedsetup/guidedsetup.go +++ b/cmd/curio/guidedsetup/guidedsetup.go @@ -594,7 +594,7 @@ func stepNewMinerConfig(d *MigrationData) { os.Exit(1) } - curioCfg.Apis.ChainApiInfo = append(curioCfg.Apis.ChainApiInfo, fmt.Sprintf("%s:%s", string(token), ainfo.Addr)) + curioCfg.Apis.ChainApiInfo.Set(append(curioCfg.Apis.ChainApiInfo.Get(), fmt.Sprintf("%s:%s", string(token), ainfo.Addr))) // write config var titles []string diff --git a/cmd/curio/guidedsetup/shared.go b/cmd/curio/guidedsetup/shared.go index 07455d4ae..e6c5ee326 100644 --- a/cmd/curio/guidedsetup/shared.go +++ b/cmd/curio/guidedsetup/shared.go @@ -171,7 +171,7 @@ func SaveConfigToLayerMigrateSectors(db *harmonydb.DB, minerRepoPath, chainApiIn curioCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(js.PrivateKey) - curioCfg.Apis.ChainApiInfo = append(curioCfg.Apis.ChainApiInfo, chainApiInfo) + curioCfg.Apis.ChainApiInfo.Set(append(curioCfg.Apis.ChainApiInfo.Get(), chainApiInfo)) // Express as configTOML configTOMLBytes, err := config.TransparentMarshal(curioCfg) if err != nil { @@ -207,7 +207,7 @@ func SaveConfigToLayerMigrateSectors(db *harmonydb.DB, minerRepoPath, chainApiIn return len(a.MinerAddresses) > 0 })) if baseCfg.Apis.ChainApiInfo == nil { - baseCfg.Apis.ChainApiInfo = append(baseCfg.Apis.ChainApiInfo, chainApiInfo) + baseCfg.Apis.ChainApiInfo.Set(append(baseCfg.Apis.ChainApiInfo.Get(), chainApiInfo)) } if baseCfg.Apis.StorageRPCSecret == "" { baseCfg.Apis.StorageRPCSecret = curioCfg.Apis.StorageRPCSecret @@ -309,8 +309,8 @@ func ensureEmptyArrays(cfg *config.CurioConfig) { } cfg.Addresses.Set(addrs) } - if cfg.Apis.ChainApiInfo == nil { - cfg.Apis.ChainApiInfo = []string{} + if len(cfg.Apis.ChainApiInfo.Get()) == 0 { + cfg.Apis.ChainApiInfo.Set([]string{}) } } diff --git a/cmd/curio/tasks/tasks.go b/cmd/curio/tasks/tasks.go index 95f33e41c..1a8c3311d 100644 --- a/cmd/curio/tasks/tasks.go +++ b/cmd/curio/tasks/tasks.go @@ -75,12 +75,12 @@ func WindowPostScheduler(ctx context.Context, fc config.CurioFees, pc config.Cur return nil, nil, nil, err } - submitTask, err := window2.NewWdPostSubmitTask(chainSched, sender, db, api, fc.MaxWindowPoStGasFee, as) + submitTask, err := window2.NewWdPostSubmitTask(chainSched, sender, db, api, fc.MaxWindowPoStGasFee.Get(), as) if err != nil { return nil, nil, nil, err } - recoverTask, err := window2.NewWdPostRecoverDeclareTask(sender, db, api, ft, as, chainSched, fc.MaxWindowPoStGasFee, addresses) + recoverTask, err := window2.NewWdPostRecoverDeclareTask(sender, db, api, ft, as, chainSched, fc.MaxWindowPoStGasFee.Get(), addresses) if err != nil { return nil, nil, nil, err } @@ -259,7 +259,7 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps, shutdownChan chan var dm *storage_market.CurioStorageDealMarket if cfg.Subsystems.EnableDealMarket { // Main market poller should run on all nodes - dm = storage_market.NewCurioStorageDealMarket(miners, db, cfg, must.One(dependencies.EthClient.Val()), si, full, as, must.One(slrLazy.Val())) + dm = storage_market.NewCurioStorageDealMarket(miners, db, cfg, dependencies.EthClient, si, full, as, must.One(slrLazy.Val())) err := dm.StartMarket(ctx) if err != nil { return nil, err diff --git a/deps/apiinfo.go b/deps/apiinfo.go index cb6d59b15..79c4f9971 100644 --- a/deps/apiinfo.go +++ b/deps/apiinfo.go @@ -15,96 +15,128 @@ import ( erpc "github.com/ethereum/go-ethereum/rpc" "github.com/gorilla/websocket" logging "github.com/ipfs/go-log/v2" + "github.com/samber/lo" "github.com/urfave/cli/v2" "golang.org/x/xerrors" "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-state-types/big" + fbig "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/build" + "github.com/filecoin-project/curio/deps/config" lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" + ltypes "github.com/filecoin-project/lotus/chain/types" cliutil "github.com/filecoin-project/lotus/cli/util" ) var clog = logging.Logger("curio/chain") -func GetFullNodeAPIV1Curio(ctx *cli.Context, ainfoCfg []string) (api.Chain, jsonrpc.ClientCloser, error) { +func GetFullNodeAPIV1Curio(ctx *cli.Context, ainfoCfg *config.Dynamic[[]string]) (api.Chain, jsonrpc.ClientCloser, error) { if tn, ok := ctx.App.Metadata["testnode-full"]; ok { return tn.(api.Chain), func() {}, nil } - if len(ainfoCfg) == 0 { - return nil, nil, xerrors.Errorf("could not get API info: none configured. \nConsider getting base.toml with './curio config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './curio config set /tmp/base.toml'") - } - - var httpHeads []httpHead - version := "v1" - for _, i := range ainfoCfg { - ainfo := cliutil.ParseApiInfo(i) - addr, err := ainfo.DialArgs(version) - if err != nil { - return nil, nil, xerrors.Errorf("could not get DialArgs: %w", err) + connections := map[string]api.Chain{} + var closers []jsonrpc.ClientCloser + var existingConnectionsMutex sync.Mutex + var fullNodes *config.Dynamic[[]api.Chain] + + var addresses []string + updateDynamic := func() error { + existingConnectionsMutex.Lock() + defer existingConnectionsMutex.Unlock() + if len(ainfoCfg.Get()) == 0 { + return fmt.Errorf("could not get API info: none configured. \nConsider getting base.toml with './curio config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './curio config set /tmp/base.toml'") } - httpHeads = append(httpHeads, httpHead{addr: addr, header: ainfo.AuthHeader()}) - } - if cliutil.IsVeryVerbose { - _, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", httpHeads[0].addr) - } + httpHeads := make(map[string]httpHead) + version := "v1" + for _, i := range ainfoCfg.Get() { + if _, ok := connections[i]; ok { + continue + } + ainfo := cliutil.ParseApiInfo(i) + addr, err := ainfo.DialArgs(version) + if err != nil { + return xerrors.Errorf("could not get DialArgs: %w", err) + } + addresses = append(addresses, addr) + httpHeads[i] = httpHead{addr: addr, header: ainfo.AuthHeader()} + } - var fullNodes []api.Chain - var closers []jsonrpc.ClientCloser + /// At this point we have a valid, dynamic httpHeads, but we don't want to rebuild existing connections. - // Check network compatibility for each node - for _, head := range httpHeads { - v1api, closer, err := newChainNodeRPCV1(ctx.Context, head.addr, head.header) - if err != nil { - clog.Warnf("Not able to establish connection to node with addr: %s, Reason: %s", head.addr, err.Error()) - continue + if cliutil.IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", strings.Join(addresses, ", ")) } - // Validate network match - networkName, err := v1api.StateNetworkName(ctx.Context) - if err != nil { - clog.Warnf("Failed to get network name from node %s: %s", head.addr, err.Error()) - closer() - continue - } + // Check network compatibility for each node + for identifier, head := range httpHeads { + if connections[identifier] != nil { + continue + } - // Compare with binary's network using BuildTypeString() - if !strings.HasPrefix(string(networkName), "test") && !strings.HasPrefix(string(networkName), "local") { - if networkName == "calibrationnet" { - networkName = "calibnet" + v1api, closer, err := newChainNodeRPCV1(ctx.Context, head.addr, head.header) + if err != nil { + clog.Warnf("Not able to establish connection to node with addr: %s, Reason: %s", head.addr, err.Error()) + continue } - if string(networkName) != build.BuildTypeString()[1:] { - clog.Warnf("Network mismatch for node %s: binary built for %s but node is on %s", - head.addr, build.BuildTypeString()[1:], networkName) + // Validate network match + networkName, err := v1api.StateNetworkName(ctx.Context) + if err != nil { + clog.Warnf("Failed to get network name from node %s: %s", head.addr, err.Error()) closer() continue } + + // Compare with binary's network using BuildTypeString() + if !strings.HasPrefix(string(networkName), "test") && !strings.HasPrefix(string(networkName), "local") { + if networkName == "calibrationnet" { + networkName = "calibnet" + } + + if string(networkName) != build.BuildTypeString()[1:] { + clog.Warnf("Network mismatch for node %s: binary built for %s but node is on %s", + head.addr, build.BuildTypeString()[1:], networkName) + closer() + continue + } + } + + connections[identifier] = v1api + closers = append(closers, closer) } - fullNodes = append(fullNodes, v1api) - closers = append(closers, closer) - } + if len(connections) == 0 { + return xerrors.Errorf("failed to establish connection with all nodes") + } - if len(fullNodes) == 0 { - return nil, nil, xerrors.Errorf("failed to establish connection with all nodes") + fullNodes.Set(lo.Map(ainfoCfg.Get(), func(i string, _ int) api.Chain { return connections[i] })) + return nil } + err := updateDynamic() + if err != nil { + return nil, nil, err + } + ainfoCfg.OnChange(func() { + if err := updateDynamic(); err != nil { + clog.Errorf("failed to update http heads: %s", err) + } + }) + + var v1API api.ChainStruct + FullNodeProxy(fullNodes, &v1API) finalCloser := func() { + existingConnectionsMutex.Lock() + defer existingConnectionsMutex.Unlock() for _, c := range closers { c() } } - - var v1API api.ChainStruct - FullNodeProxy(fullNodes, &v1API) - return &v1API, finalCloser, nil } @@ -144,8 +176,8 @@ var errorsToRetry = []error{&jsonrpc.RPCConnectionError{}, &jsonrpc.ErrClient{}} const preferredAllBad = -1 // FullNodeProxy creates a proxy for the Chain API -func FullNodeProxy[T api.Chain](ins []T, outstr *api.ChainStruct) { - providerCount := len(ins) +func FullNodeProxy[T api.Chain](ins *config.Dynamic[[]T], outstr *api.ChainStruct) { + providerCount := len(ins.Get()) var healthyLk sync.Mutex unhealthyProviders := make([]bool, providerCount) @@ -165,7 +197,7 @@ func FullNodeProxy[T api.Chain](ins []T, outstr *api.ChainStruct) { // watch provider health startWatch := func() { - if len(ins) == 1 { + if len(ins.Get()) == 1 { // not like we have any onter node to go to.. return } @@ -173,7 +205,7 @@ func FullNodeProxy[T api.Chain](ins []T, outstr *api.ChainStruct) { // don't bother for short-running commands time.Sleep(250 * time.Millisecond) - var bestKnownTipset, nextBestKnownTipset *types.TipSet + var bestKnownTipset, nextBestKnownTipset *ltypes.TipSet for { var wg sync.WaitGroup @@ -184,7 +216,7 @@ func FullNodeProxy[T api.Chain](ins []T, outstr *api.ChainStruct) { defer wg.Done() toctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) // todo better timeout - ch, err := ins[i].ChainHead(toctx) + ch, err := ins.Get()[i].ChainHead(toctx) cancel() // error is definitely not healthy @@ -199,7 +231,7 @@ func FullNodeProxy[T api.Chain](ins []T, outstr *api.ChainStruct) { healthyLk.Lock() // maybe set best next - if nextBestKnownTipset == nil || big.Cmp(ch.ParentWeight(), nextBestKnownTipset.ParentWeight()) > 0 || len(ch.Blocks()) > len(nextBestKnownTipset.Blocks()) { + if nextBestKnownTipset == nil || fbig.Cmp(ch.ParentWeight(), nextBestKnownTipset.ParentWeight()) > 0 || len(ch.Blocks()) > len(nextBestKnownTipset.Blocks()) { nextBestKnownTipset = ch } @@ -223,29 +255,64 @@ func FullNodeProxy[T api.Chain](ins []T, outstr *api.ChainStruct) { var starWatchOnce sync.Once // populate output api proxy + populateProxyMethods(outstr, ins, nextHealthyProvider, startWatch, &starWatchOnce, providerCount) +} +// populateProxyMethods sets up the proxy methods for the API struct with retry and health monitoring +func populateProxyMethods[T, U any](outstr U, ins *config.Dynamic[[]T], nextHealthyProvider func(int) int, startWatch func(), starWatchOnce *sync.Once, providerCount int) { outs := api.GetInternalStructs(outstr) var apiProviders []reflect.Value - for _, in := range ins { - apiProviders = append(apiProviders, reflect.ValueOf(in)) + apiProvidersMx := sync.Mutex{} + setupProviders := func() { + for _, in := range ins.Get() { + apiProviders = append(apiProviders, reflect.ValueOf(in)) + } } - - for _, out := range outs { + setupProviders() + ins.OnChange(func() { + apiProvidersMx.Lock() + apiProviders = nil + setupProviders() + apiProvidersMx.Unlock() + }) + + providerFuncs := make([][][]reflect.Value, len(outs)) + setProviderFuncs := func() { + for outIdx, out := range outs { + rOutStruct := reflect.ValueOf(out).Elem() + providerFuncs[outIdx] = make([][]reflect.Value, rOutStruct.NumField()) + + for f := 0; f < rOutStruct.NumField(); f++ { + field := rOutStruct.Type().Field(f) + + var p []reflect.Value + apiProvidersMx.Lock() + p = apiProviders + apiProvidersMx.Unlock() + + providerFuncs[outIdx][f] = make([]reflect.Value, len(p)) + for pIdx, rin := range p { + mv := rin.MethodByName(field.Name) + if !mv.IsValid() { + continue + } + providerFuncs[outIdx][f][pIdx] = mv + } + } + } + } + setProviderFuncs() + ins.OnChange(func() { + apiProvidersMx.Lock() + apiProviders = nil + setProviderFuncs() + apiProvidersMx.Unlock() + }) + for outIdx, out := range outs { rOutStruct := reflect.ValueOf(out).Elem() - for f := 0; f < rOutStruct.NumField(); f++ { field := rOutStruct.Type().Field(f) - - var providerFuncs []reflect.Value - for _, rin := range apiProviders { - mv := rin.MethodByName(field.Name) - if !mv.IsValid() { - continue - } - providerFuncs = append(providerFuncs, mv) - } - rOutStruct.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) { starWatchOnce.Do(func() { go startWatch() @@ -279,7 +346,11 @@ func FullNodeProxy[T api.Chain](ins []T, outstr *api.ChainStruct) { *preferredProvider = pp } - result := providerFuncs[*preferredProvider].Call(args) + apiProvidersMx.Lock() + fn := providerFuncs[outIdx][f][*preferredProvider] + apiProvidersMx.Unlock() + + result := fn.Call(args) if result[len(result)-1].IsNil() { return result, nil } @@ -333,56 +404,96 @@ func ErrorIsIn(err error, errorTypes []error) bool { return false } -func GetEthClient(cctx *cli.Context, ainfoCfg []string) (*ethclient.Client, error) { - if len(ainfoCfg) == 0 { - return nil, xerrors.Errorf("could not get API info: none configured. \nConsider getting base.toml with './curio config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './curio config set /tmp/base.toml'") - } - +func GetEthClient(cctx *cli.Context, ainfoCfg *config.Dynamic[[]string]) (api.EthClientInterface, error) { version := "v1" - var httpHeads []httpHead - for _, i := range ainfoCfg { - ainfo := cliutil.ParseApiInfo(i) - addr, err := ainfo.DialArgs(version) - if err != nil { - return nil, xerrors.Errorf("could not get eth DialArgs: %w", err) + var ethClientDynamic = config.NewDynamic([]*ethclient.Client{}) + updateDynamic := func() error { + if len(ainfoCfg.Get()) == 0 { + return xerrors.Errorf("could not get API info: none configured. \nConsider getting base.toml with './curio config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './curio config set /tmp/base.toml'") } - httpHeads = append(httpHeads, httpHead{addr: addr, header: ainfo.AuthHeader()}) - } - - var clients []*ethclient.Client - - for _, head := range httpHeads { - if cliutil.IsVeryVerbose { - _, _ = fmt.Fprintln(cctx.App.Writer, "using eth client endpoint:", head.addr) + var httpHeads []httpHead + for _, i := range ainfoCfg.Get() { + ainfo := cliutil.ParseApiInfo(i) + addr, err := ainfo.DialArgs(version) + if err != nil { + return xerrors.Errorf("could not get eth DialArgs: %w", err) + } + httpHeads = append(httpHeads, httpHead{addr: addr, header: ainfo.AuthHeader()}) } - d := websocket.Dialer{ - HandshakeTimeout: 10 * time.Second, - ReadBufferSize: 4096, - WriteBufferSize: 4096, - } + var clients []*ethclient.Client + for _, head := range httpHeads { + if cliutil.IsVeryVerbose { + _, _ = fmt.Fprintln(cctx.App.Writer, "using eth client endpoint:", head.addr) + } + + d := websocket.Dialer{ + HandshakeTimeout: 10 * time.Second, + ReadBufferSize: 4096, + WriteBufferSize: 4096, + } - wopts := erpc.WithWebsocketDialer(d) - hopts := erpc.WithHeaders(head.header) + wopts := erpc.WithWebsocketDialer(d) + hopts := erpc.WithHeaders(head.header) - rpcClient, err := erpc.DialOptions(cctx.Context, head.addr, wopts, hopts) - if err != nil { - log.Warnf("failed to dial eth client: %s", err) - continue + rpcClient, err := erpc.DialOptions(cctx.Context, head.addr, wopts, hopts) + if err != nil { + log.Warnf("failed to dial eth client: %s", err) + continue + } + client := ethclient.NewClient(rpcClient) + _, err = client.BlockNumber(cctx.Context) + if err != nil { + log.Warnf("failed to get eth block number: %s", err) + continue + } + clients = append(clients, client) } - client := ethclient.NewClient(rpcClient) - _, err = client.BlockNumber(cctx.Context) - if err != nil { - log.Warnf("failed to get eth block number: %s", err) - continue + + if len(clients) == 0 { + return errors.New("failed to establish connection with all nodes") } - clients = append(clients, client) + + ethClientDynamic.Set(clients) + return nil + } + if err := updateDynamic(); err != nil { + return nil, err } + ainfoCfg.OnChange(func() { + if err := updateDynamic(); err != nil { + clog.Errorf("failed to update eth client: %s", err) + } + }) + + var ethClient api.EthClientInterfaceStruct + EthClientProxy(ethClientDynamic, ðClient) + return ðClient, nil +} + +func EthClientProxy(ins *config.Dynamic[[]*ethclient.Client], outstr api.EthClientInterface) { + providerCount := len(ins.Get()) - if len(clients) == 0 { - return nil, xerrors.Errorf("failed to establish connection with all nodes") + var healthyLk sync.Mutex + unhealthyProviders := make([]bool, providerCount) + + nextHealthyProvider := func(start int) int { + healthyLk.Lock() + defer healthyLk.Unlock() + + for i := 0; i < providerCount; i++ { + idx := (start + i) % providerCount + if !unhealthyProviders[idx] { + return idx + } + } + return preferredAllBad } - return clients[0], nil + // Create a no-op start watch function since eth client doesn't need health monitoring like chain + startWatch := func() {} + var starWatchOnce sync.Once + // Use the existing populateProxyMethods function + populateProxyMethods(outstr, ins, nextHealthyProvider, startWatch, &starWatchOnce, providerCount) } diff --git a/deps/config/doc_gen.go b/deps/config/doc_gen.go index 271b6d822..de33cfa5f 100644 --- a/deps/config/doc_gen.go +++ b/deps/config/doc_gen.go @@ -14,7 +14,8 @@ var Doc = map[string][]DocField{ Name: "ChainApiInfo", Type: "[]string", - Comment: `ChainApiInfo is the API endpoint for the Lotus daemon.`, + Comment: `ChainApiInfo is the API endpoint for the Lotus daemon. +Updates will affect running instances.`, }, { Name: "StorageRPCSecret", @@ -36,36 +37,33 @@ var Doc = map[string][]DocField{ Name: "Base", Type: "types.FIL", - Comment: `Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix.`, + Comment: `Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. +Updates will affect running instances.`, }, { Name: "PerSector", Type: "types.FIL", - Comment: `Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix.`, + Comment: `Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. +Updates will affect running instances.`, }, }, "CommitBatchingConfig": { - { - Name: "BaseFeeThreshold", - Type: "types.FIL", - - Comment: `Base fee value below which we should try to send Commit messages immediately -Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL")`, - }, { Name: "Timeout", Type: "time.Duration", Comment: `Maximum amount of time any given sector in the batch can wait for the batch to accumulate -Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s")`, +Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") +Updates will affect running instances.`, }, { Name: "Slack", Type: "time.Duration", Comment: `Time buffer for forceful batch submission before sectors/deals in batch would start expiring -Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s")`, +Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") +Updates will affect running instances.`, }, }, "CompressionConfig": { @@ -150,7 +148,8 @@ including collateral and other operational resources.`, Comment: `MinimumWalletBalance is the minimum balance all active wallets. If the balance is below this value, an alerts will be triggered for the wallet -Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "5 FIL")`, +Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "5 FIL") +Updates will affect running instances.`, }, { Name: "PagerDuty", @@ -287,19 +286,22 @@ Updates will affect running instances.`, Type: "types.FIL", Comment: `WindowPoSt is a high-value operation, so the default fee should be high. -Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. (Default: "5 fil")`, +Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. (Default: "5 fil") +Updates will affect running instances.`, }, { Name: "CollateralFromMinerBalance", Type: "bool", - Comment: `Whether to use available miner balance for sector collateral instead of sending it with each message (Default: false)`, + Comment: `Whether to use available miner balance for sector collateral instead of sending it with each message (Default: false) +Updates will affect running instances.`, }, { Name: "DisableCollateralFallback", Type: "bool", - Comment: `Don't send collateral with messages even if there is no available balance in the miner actor (Default: false)`, + Comment: `Don't send collateral with messages even if there is no available balance in the miner actor (Default: false) +Updates will affect running instances.`, }, { Name: "MaximizeFeeCap", @@ -307,7 +309,8 @@ Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffi Comment: `MaximizeFeeCap makes the sender set maximum allowed FeeCap on all sent messages. This generally doesn't increase message cost, but in highly congested network messages -are much less likely to get stuck in mempool. (Default: true)`, +are much less likely to get stuck in mempool. (Default: true) +Updates will affect running instances.`, }, }, "CurioIngestConfig": { @@ -1180,7 +1183,8 @@ If True then all deals coming from unknown clients will be rejected. (Default: f Name: "Enable", Type: "bool", - Comment: `Enable is a flag to enable or disable the PagerDuty integration.`, + Comment: `Enable is a flag to enable or disable the PagerDuty integration. +Updates will affect running instances.`, }, { Name: "PagerDutyEventURL", @@ -1188,14 +1192,16 @@ If True then all deals coming from unknown clients will be rejected. (Default: f Comment: `PagerDutyEventURL is URL for PagerDuty.com Events API v2 URL. Events sent to this API URL are ultimately routed to a PagerDuty.com service and processed. -The default is sufficient for integration with the stock commercial PagerDuty.com company's service.`, +The default is sufficient for integration with the stock commercial PagerDuty.com company's service. +Updates will affect running instances.`, }, { Name: "PageDutyIntegrationKey", Type: "string", Comment: `PageDutyIntegrationKey is the integration key for a PagerDuty.com service. You can find this unique service -identifier in the integration page for the service.`, +identifier in the integration page for the service. +Updates will affect running instances.`, }, }, "PieceLocatorConfig": { @@ -1213,26 +1219,21 @@ identifier in the integration page for the service.`, }, }, "PreCommitBatchingConfig": { - { - Name: "BaseFeeThreshold", - Type: "types.FIL", - - Comment: `Base fee value below which we should try to send Precommit messages immediately -Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL")`, - }, { Name: "Timeout", Type: "time.Duration", Comment: `Maximum amount of time any given sector in the batch can wait for the batch to accumulate -Time duration string (e.g., "1h2m3s") in TOML format. (Default: "4h0m0s")`, +Time duration string (e.g., "1h2m3s") in TOML format. (Default: "4h0m0s") +Updates will affect running instances.`, }, { Name: "Slack", Type: "time.Duration", Comment: `Time buffer for forceful batch submission before sectors/deal in batch would start expiring -Time duration string (e.g., "1h2m3s") in TOML format. (Default: "6h0m0s")`, +Time duration string (e.g., "1h2m3s") in TOML format. (Default: "6h0m0s") +Updates will affect running instances.`, }, }, "PrometheusAlertManagerConfig": { @@ -1240,13 +1241,15 @@ Time duration string (e.g., "1h2m3s") in TOML format. (Default: "6h0m0s")`, Name: "Enable", Type: "bool", - Comment: `Enable is a flag to enable or disable the Prometheus AlertManager integration.`, + Comment: `Enable is a flag to enable or disable the Prometheus AlertManager integration. +Updates will affect running instances.`, }, { Name: "AlertManagerURL", Type: "string", - Comment: `AlertManagerURL is the URL for the Prometheus AlertManager API v2 URL.`, + Comment: `AlertManagerURL is the URL for the Prometheus AlertManager API v2 URL. +Updates will affect running instances.`, }, }, "SlackWebhookConfig": { @@ -1254,14 +1257,16 @@ Time duration string (e.g., "1h2m3s") in TOML format. (Default: "6h0m0s")`, Name: "Enable", Type: "bool", - Comment: `Enable is a flag to enable or disable the Prometheus AlertManager integration.`, + Comment: `Enable is a flag to enable or disable the Prometheus AlertManager integration. +Updates will affect running instances.`, }, { Name: "WebHookURL", Type: "string", Comment: `WebHookURL is the URL for the URL for slack Webhook. -Example: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX`, +Example: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX +Updates will affect running instances.`, }, }, "StorageMarketConfig": { @@ -1297,7 +1302,8 @@ Example: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXX User can run a remote file server which can host all the pieces over the HTTP and supply a reader when requested. The server must support "HEAD" request and "GET" request. 1. ?id=pieceCID with "HEAD" request responds with 200 if found or 404 if not. Must send header "Content-Length" with file size as value -2. ?id=pieceCID must provide a reader for the requested piece along with header "Content-Length" with file size as value`, +2. ?id=pieceCID must provide a reader for the requested piece along with header "Content-Length" with file size as value +Updates will affect running instances.`, }, }, "UpdateBatchingConfig": { @@ -1306,21 +1312,24 @@ The server must support "HEAD" request and "GET" request. Type: "types.FIL", Comment: `Base fee value below which we should try to send Commit messages immediately -Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL")`, +Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL") +Updates will affect running instances.`, }, { Name: "Timeout", Type: "time.Duration", Comment: `Maximum amount of time any given sector in the batch can wait for the batch to accumulate -Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s")`, +Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") +Updates will affect running instances.`, }, { Name: "Slack", Type: "time.Duration", Comment: `Time buffer for forceful batch submission before sectors/deals in batch would start expiring -Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s")`, +Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") +Updates will affect running instances.`, }, }, } diff --git a/deps/config/dynamic.go b/deps/config/dynamic.go index 85cd438f5..ad9a06684 100644 --- a/deps/config/dynamic.go +++ b/deps/config/dynamic.go @@ -52,11 +52,22 @@ func (d *Dynamic[T]) OnChange(fn func()) { } } +// It locks dynamicLocker, unless we're already in an updating context. func (d *Dynamic[T]) Set(value T) { - dynamicLocker.Lock() - defer dynamicLocker.Unlock() - dynamicLocker.inform(reflect.ValueOf(d).Pointer(), d.value, value) - d.value = value + // Check if we're already in an updating context (changeMonitor) + updating := atomic.LoadInt32(&dynamicLocker.updating) + + if updating != 0 { + // We're in changeMonitor context, don't acquire the lock to avoid deadlock + dynamicLocker.inform(reflect.ValueOf(d).Pointer(), d.value, value) + d.value = value + } else { + // Normal case - acquire the lock + dynamicLocker.Lock() + defer dynamicLocker.Unlock() + dynamicLocker.inform(reflect.ValueOf(d).Pointer(), d.value, value) + d.value = value + } } // SetWithoutLock sets the value without acquiring a lock. @@ -69,19 +80,23 @@ func (d *Dynamic[T]) SetWithoutLock(value T) { func (d *Dynamic[T]) Get() T { dynamicLocker.RLock() defer dynamicLocker.RUnlock() - return d.value + return d.GetWithoutLock() } // GetWithoutLock gets the value without acquiring a lock. // Only use this when you're already holding the top-level write lock (e.g., during FixTOML). func (d *Dynamic[T]) GetWithoutLock() T { + if d == nil { + var zero T + return zero + } return d.value } // Equal is used by cmp.Equal for custom comparison. -// If used from deps, requires a lock. +// It doesn't lock dynamicLocker as this typically is used for an update test. func (d *Dynamic[T]) Equal(other *Dynamic[T]) bool { - return cmp.Equal(d.value, other.value, BigIntComparer, cmpopts.EquateEmpty()) + return cmp.Equal(d.GetWithoutLock(), other.GetWithoutLock(), BigIntComparer, cmpopts.EquateEmpty()) } // MarshalTOML cannot be implemented for struct types because it won't be boxed correctly. @@ -184,8 +199,11 @@ func (r *cfgRoot[T]) changeMonitor() { lastTimestamp := time.Time{} // lets do a read at startup for { + time.Sleep(30 * time.Second) configCount := 0 - err := r.db.QueryRow(context.Background(), `SELECT COUNT(*) FROM harmony_config WHERE timestamp > $1 AND title IN ($2)`, lastTimestamp, strings.Join(r.layers, ",")).Scan(&configCount) + // Note: We need to prepend "base" layer like GetConfigs does + layers := append([]string{"base"}, r.layers...) + err := r.db.QueryRow(context.Background(), `SELECT COUNT(*) FROM harmony_config WHERE timestamp > $1 AND title = ANY($2)`, lastTimestamp, layers).Scan(&configCount) if err != nil { logger.Errorf("error selecting configs: %s", err) continue @@ -206,15 +224,31 @@ func (r *cfgRoot[T]) changeMonitor() { // inconsistent state. FixTOML uses GetWithoutLock() and TransparentDecode uses // SetWithoutLock() to avoid deadlocks. func() { + // Set a flag to indicate we're in change monitor context dynamicLocker.Lock() defer dynamicLocker.Unlock() err = ApplyLayers(context.Background(), r.treeCopy, configs, r.fixupFn) if err != nil { logger.Errorf("dynamic config failed to ApplyLayers: %s", err) + // Reset updating flag on error + atomic.StoreInt32(&dynamicLocker.updating, 0) return } + + // Process change notifications (we already hold the lock) + atomic.StoreInt32(&dynamicLocker.updating, 0) + dynamicLocker.cdmx.Lock() + for k, v := range dynamicLocker.latest { + if !cmp.Equal(v, dynamicLocker.originally[k], BigIntComparer, cmp.Reporter(&reportHandler{})) { + if notifier := dynamicLocker.notifier[k]; notifier != nil { + go notifier() + } + } + } + dynamicLocker.originally = make(map[uintptr]any) + dynamicLocker.latest = make(map[uintptr]any) + dynamicLocker.cdmx.Unlock() }() - time.Sleep(30 * time.Second) } } @@ -226,8 +260,14 @@ var dynamicLocker = changeNotifier{diff: diff{ } type changeNotifier struct { - sync.RWMutex // this protects the dynamic[T] reads from getting a race with the updating - updating int32 // atomic: 1 if updating, 0 if not. determines which mode we are in: updating or querying + sync.RWMutex // Protects Dynamic[T] reads/writes during config updates + + // updating is an atomic flag (1=updating, 0=idle) that indicates whether + // changeMonitor is currently applying new config layers. When set, Dynamic.Set() + // skips locking to avoid deadlock, since changeMonitor already holds the write lock. + // This allows config reload (via TransparentDecode) to update Dynamic values without + // re-acquiring locks. Always access via atomic.LoadInt32/StoreInt32. + updating int32 diff @@ -244,13 +284,13 @@ func (c *changeNotifier) Lock() { atomic.StoreInt32(&c.updating, 1) } func (c *changeNotifier) Unlock() { - c.cdmx.Lock() c.RWMutex.Unlock() + c.cdmx.Lock() defer c.cdmx.Unlock() atomic.StoreInt32(&c.updating, 0) for k, v := range c.latest { - if !cmp.Equal(v, c.originally[k], BigIntComparer) { + if !cmp.Equal(v, c.originally[k], BigIntComparer, cmp.Reporter(&reportHandler{})) { if notifier := c.notifier[k]; notifier != nil { go notifier() } @@ -260,6 +300,24 @@ func (c *changeNotifier) Unlock() { c.latest = make(map[uintptr]any) } +type reportHandler struct { + changes []string + newValue any + oldValue any +} + +func (r *reportHandler) PushStep(path cmp.PathStep) { + r.changes = append(r.changes, path.String()) + r.newValue, r.oldValue = path.Values() +} +func (r *reportHandler) Report(result cmp.Result) { + if !result.Equal() { + logger.Infof("Dynamic configuration %s updated from %v to %v", strings.Join(r.changes, "."), r.oldValue, r.newValue) + } +} +func (r *reportHandler) PopStep() { + r.changes = r.changes[:len(r.changes)-1] +} func (c *changeNotifier) inform(ptr uintptr, oldValue any, newValue any) { if atomic.LoadInt32(&c.updating) == 0 { return diff --git a/deps/config/old_lotus_miner.go b/deps/config/old_lotus_miner.go index d4a8d3673..9dda6ce75 100644 --- a/deps/config/old_lotus_miner.go +++ b/deps/config/old_lotus_miner.go @@ -635,12 +635,12 @@ func DefaultStorageMiner() *StorageMiner { MaxCommitGasFee: types.MustParseFIL("0.05"), MaxPreCommitBatchGasFee: BatchFeeConfig{ - Base: types.MustParseFIL("0"), - PerSector: types.MustParseFIL("0.02"), + Base: NewDynamic(types.MustParseFIL("0")), + PerSector: NewDynamic(types.MustParseFIL("0.02")), }, MaxCommitBatchGasFee: BatchFeeConfig{ - Base: types.MustParseFIL("0"), - PerSector: types.MustParseFIL("0.03"), // enough for 6 agg and 1nFIL base fee + Base: NewDynamic(types.MustParseFIL("0")), + PerSector: NewDynamic(types.MustParseFIL("0.03")), // enough for 6 agg and 1nFIL base fee }, MaxTerminateGasFee: types.MustParseFIL("0.5"), diff --git a/deps/config/types.go b/deps/config/types.go index cc1ae5024..a1c10fb44 100644 --- a/deps/config/types.go +++ b/deps/config/types.go @@ -21,22 +21,22 @@ func DefaultCurioConfig() *CurioConfig { }, Fees: CurioFees{ MaxPreCommitBatchGasFee: BatchFeeConfig{ - Base: types.MustParseFIL("0"), - PerSector: types.MustParseFIL("0.02"), + Base: NewDynamic(types.MustParseFIL("0")), + PerSector: NewDynamic(types.MustParseFIL("0.02")), }, MaxCommitBatchGasFee: BatchFeeConfig{ - Base: types.MustParseFIL("0"), - PerSector: types.MustParseFIL("0.03"), // enough for 6 agg and 1nFIL base fee + Base: NewDynamic(types.MustParseFIL("0")), + PerSector: NewDynamic(types.MustParseFIL("0.03")), // enough for 6 agg and 1nFIL base fee }, MaxUpdateBatchGasFee: BatchFeeConfig{ - Base: types.MustParseFIL("0"), - PerSector: types.MustParseFIL("0.03"), + Base: NewDynamic(types.MustParseFIL("0")), + PerSector: NewDynamic(types.MustParseFIL("0.03")), }, - MaxWindowPoStGasFee: types.MustParseFIL("5"), - CollateralFromMinerBalance: false, - DisableCollateralFallback: false, - MaximizeFeeCap: true, + MaxWindowPoStGasFee: NewDynamic(types.MustParseFIL("5")), + CollateralFromMinerBalance: NewDynamic(false), + DisableCollateralFallback: NewDynamic(false), + MaximizeFeeCap: NewDynamic(true), }, Addresses: NewDynamic([]CurioAddresses{{ PreCommitControl: []string{}, @@ -56,6 +56,9 @@ func DefaultCurioConfig() *CurioConfig { BatchSealBatchSize: 32, BatchSealSectorSize: "32GiB", }, + Apis: ApisConfig{ + ChainApiInfo: NewDynamic([]string{}), + }, Ingest: CurioIngestConfig{ MaxMarketRunningPipelines: NewDynamic(64), MaxQueueDownload: NewDynamic(8), @@ -72,34 +75,32 @@ func DefaultCurioConfig() *CurioConfig { MaxDealWaitTime: NewDynamic(time.Hour), }, Alerting: CurioAlertingConfig{ - MinimumWalletBalance: types.MustParseFIL("5"), + MinimumWalletBalance: NewDynamic(types.MustParseFIL("5")), PagerDuty: PagerDutyConfig{ - PagerDutyEventURL: "https://events.pagerduty.com/v2/enqueue", + PagerDutyEventURL: NewDynamic("https://events.pagerduty.com/v2/enqueue"), }, PrometheusAlertManager: PrometheusAlertManagerConfig{ - AlertManagerURL: "http://localhost:9093/api/v2/alerts", + AlertManagerURL: NewDynamic("http://localhost:9093/api/v2/alerts"), }, }, Batching: CurioBatchingConfig{ PreCommit: PreCommitBatchingConfig{ - BaseFeeThreshold: types.MustParseFIL("0.005"), - Timeout: 4 * time.Hour, - Slack: 6 * time.Hour, + Timeout: NewDynamic(4 * time.Hour), + Slack: NewDynamic(6 * time.Hour), }, Commit: CommitBatchingConfig{ - BaseFeeThreshold: types.MustParseFIL("0.005"), - Timeout: time.Hour, - Slack: time.Hour, + Timeout: NewDynamic(time.Hour), + Slack: NewDynamic(time.Hour), }, Update: UpdateBatchingConfig{ - BaseFeeThreshold: types.MustParseFIL("0.005"), - Timeout: time.Hour, - Slack: time.Hour, + BaseFeeThreshold: NewDynamic(types.MustParseFIL("0.005")), + Timeout: NewDynamic(time.Hour), + Slack: NewDynamic(time.Hour), }, }, Market: MarketConfig{ StorageMarketConfig: StorageMarketConfig{ - PieceLocator: []PieceLocatorConfig{}, + PieceLocator: NewDynamic([]PieceLocatorConfig{}), Indexing: IndexingConfig{ InsertConcurrency: 10, InsertBatchSize: 1000, @@ -191,14 +192,14 @@ type CurioConfig struct { type BatchFeeConfig struct { // Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. - Base types.FIL + Base *Dynamic[types.FIL] // Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. - PerSector types.FIL + PerSector *Dynamic[types.FIL] } func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount { - return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector))) + return big.Add(big.Int(b.Base.Get()), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector.Get()))) } type CurioSubsystemsConfig struct { @@ -437,18 +438,18 @@ type CurioFees struct { // WindowPoSt is a high-value operation, so the default fee should be high. // Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. (Default: "5 fil") - MaxWindowPoStGasFee types.FIL + MaxWindowPoStGasFee *Dynamic[types.FIL] // Whether to use available miner balance for sector collateral instead of sending it with each message (Default: false) - CollateralFromMinerBalance bool + CollateralFromMinerBalance *Dynamic[bool] // Don't send collateral with messages even if there is no available balance in the miner actor (Default: false) - DisableCollateralFallback bool + DisableCollateralFallback *Dynamic[bool] // MaximizeFeeCap makes the sender set maximum allowed FeeCap on all sent messages. // This generally doesn't increase message cost, but in highly congested network messages // are much less likely to get stuck in mempool. (Default: true) - MaximizeFeeCap bool + MaximizeFeeCap *Dynamic[bool] } type CurioAddresses struct { @@ -596,7 +597,7 @@ type CurioAlertingConfig struct { // MinimumWalletBalance is the minimum balance all active wallets. If the balance is below this value, an // alerts will be triggered for the wallet // Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "5 FIL") - MinimumWalletBalance types.FIL + MinimumWalletBalance *Dynamic[types.FIL] // PagerDutyConfig is the configuration for the PagerDuty alerting integration. PagerDuty PagerDutyConfig @@ -639,38 +640,38 @@ type CurioSealConfig struct { type PagerDutyConfig struct { // Enable is a flag to enable or disable the PagerDuty integration. - Enable bool + Enable *Dynamic[bool] // PagerDutyEventURL is URL for PagerDuty.com Events API v2 URL. Events sent to this API URL are ultimately // routed to a PagerDuty.com service and processed. // The default is sufficient for integration with the stock commercial PagerDuty.com company's service. - PagerDutyEventURL string + PagerDutyEventURL *Dynamic[string] // PageDutyIntegrationKey is the integration key for a PagerDuty.com service. You can find this unique service // identifier in the integration page for the service. - PageDutyIntegrationKey string + PageDutyIntegrationKey *Dynamic[string] } type PrometheusAlertManagerConfig struct { // Enable is a flag to enable or disable the Prometheus AlertManager integration. - Enable bool + Enable *Dynamic[bool] // AlertManagerURL is the URL for the Prometheus AlertManager API v2 URL. - AlertManagerURL string + AlertManagerURL *Dynamic[string] } type SlackWebhookConfig struct { // Enable is a flag to enable or disable the Prometheus AlertManager integration. - Enable bool + Enable *Dynamic[bool] // WebHookURL is the URL for the URL for slack Webhook. // Example: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX - WebHookURL string + WebHookURL *Dynamic[string] } type ApisConfig struct { // ChainApiInfo is the API endpoint for the Lotus daemon. - ChainApiInfo []string + ChainApiInfo *Dynamic[[]string] // API auth secret for the Curio nodes to use. This value should only be set on the bade layer. StorageRPCSecret string @@ -688,45 +689,37 @@ type CurioBatchingConfig struct { } type PreCommitBatchingConfig struct { - // Base fee value below which we should try to send Precommit messages immediately - // Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL") - BaseFeeThreshold types.FIL - // Maximum amount of time any given sector in the batch can wait for the batch to accumulate // Time duration string (e.g., "1h2m3s") in TOML format. (Default: "4h0m0s") - Timeout time.Duration + Timeout *Dynamic[time.Duration] // Time buffer for forceful batch submission before sectors/deal in batch would start expiring // Time duration string (e.g., "1h2m3s") in TOML format. (Default: "6h0m0s") - Slack time.Duration + Slack *Dynamic[time.Duration] } type CommitBatchingConfig struct { - // Base fee value below which we should try to send Commit messages immediately - // Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL") - BaseFeeThreshold types.FIL - // Maximum amount of time any given sector in the batch can wait for the batch to accumulate // Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") - Timeout time.Duration + Timeout *Dynamic[time.Duration] // Time buffer for forceful batch submission before sectors/deals in batch would start expiring // Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") - Slack time.Duration + Slack *Dynamic[time.Duration] } type UpdateBatchingConfig struct { // Base fee value below which we should try to send Commit messages immediately // Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL") - BaseFeeThreshold types.FIL + BaseFeeThreshold *Dynamic[types.FIL] // Maximum amount of time any given sector in the batch can wait for the batch to accumulate // Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") - Timeout time.Duration + Timeout *Dynamic[time.Duration] // Time buffer for forceful batch submission before sectors/deals in batch would start expiring // Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") - Slack time.Duration + Slack *Dynamic[time.Duration] } type MarketConfig struct { @@ -752,7 +745,7 @@ type StorageMarketConfig struct { // The server must support "HEAD" request and "GET" request. // 1. ?id=pieceCID with "HEAD" request responds with 200 if found or 404 if not. Must send header "Content-Length" with file size as value // 2. ?id=pieceCID must provide a reader for the requested piece along with header "Content-Length" with file size as value - PieceLocator []PieceLocatorConfig + PieceLocator *Dynamic[[]PieceLocatorConfig] } type MK12Config struct { diff --git a/deps/deps.go b/deps/deps.go index 256c68f00..c1a21a2c2 100644 --- a/deps/deps.go +++ b/deps/deps.go @@ -17,7 +17,6 @@ import ( "strings" "github.com/BurntSushi/toml" - "github.com/ethereum/go-ethereum/ethclient" "github.com/gbrlsnchs/jwt/v3" logging "github.com/ipfs/go-log/v2" "github.com/kr/pretty" @@ -175,7 +174,7 @@ type Deps struct { SectorReader *pieceprovider.SectorReader CachedPieceReader *cachedreader.CachedPieceReader ServeChunker *chunker.ServeChunker - EthClient *lazy.Lazy[*ethclient.Client] + EthClient *lazy.Lazy[api.EthClientInterface] Sender *message.Sender } @@ -248,7 +247,7 @@ func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context, var fullCloser func() cfgApiInfo := deps.Cfg.Apis.ChainApiInfo if v := os.Getenv("FULLNODE_API_INFO"); v != "" { - cfgApiInfo = []string{v} + cfgApiInfo = config.NewDynamic([]string{v}) } deps.Chain, fullCloser, err = GetFullNodeAPIV1Curio(cctx, cfgApiInfo) if err != nil { @@ -262,10 +261,10 @@ func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context, } if deps.EthClient == nil { - deps.EthClient = lazy.MakeLazy(func() (*ethclient.Client, error) { + deps.EthClient = lazy.MakeLazy(func() (api.EthClientInterface, error) { cfgApiInfo := deps.Cfg.Apis.ChainApiInfo if v := os.Getenv("FULLNODE_API_INFO"); v != "" { - cfgApiInfo = []string{v} + cfgApiInfo = config.NewDynamic([]string{v}) } return GetEthClient(cctx, cfgApiInfo) }) @@ -598,7 +597,7 @@ func GetDefaultConfig(comment bool) (string, error) { return string(cb), nil } -func GetAPI(ctx context.Context, cctx *cli.Context) (*harmonydb.DB, *config.CurioConfig, api.Chain, jsonrpc.ClientCloser, *lazy.Lazy[*ethclient.Client], error) { +func GetAPI(ctx context.Context, cctx *cli.Context) (*harmonydb.DB, *config.CurioConfig, api.Chain, jsonrpc.ClientCloser, *lazy.Lazy[api.EthClientInterface], error) { db, err := MakeDB(cctx) if err != nil { return nil, nil, nil, nil, nil, err @@ -613,7 +612,7 @@ func GetAPI(ctx context.Context, cctx *cli.Context) (*harmonydb.DB, *config.Curi cfgApiInfo := cfg.Apis.ChainApiInfo if v := os.Getenv("FULLNODE_API_INFO"); v != "" { - cfgApiInfo = []string{v} + cfgApiInfo = config.NewDynamic([]string{v}) } full, fullCloser, err := GetFullNodeAPIV1Curio(cctx, cfgApiInfo) @@ -621,7 +620,7 @@ func GetAPI(ctx context.Context, cctx *cli.Context) (*harmonydb.DB, *config.Curi return nil, nil, nil, nil, nil, err } - ethClient := lazy.MakeLazy(func() (*ethclient.Client, error) { + ethClient := lazy.MakeLazy(func() (api.EthClientInterface, error) { return GetEthClient(cctx, cfgApiInfo) }) @@ -707,7 +706,7 @@ func CreateMinerConfig(ctx context.Context, full CreateMinerConfigChainAPI, db * } { - curioConfig.Apis.ChainApiInfo = append(curioConfig.Apis.ChainApiInfo, info) + curioConfig.Apis.ChainApiInfo.Set(append(curioConfig.Apis.ChainApiInfo.Get(), info)) } curioConfig.Addresses.Set(lo.Filter(curioConfig.Addresses.Get(), func(a config.CurioAddresses, _ int) bool { diff --git a/documentation/en/configuration/default-curio-configuration.md b/documentation/en/configuration/default-curio-configuration.md index 02d9af04d..1af58717d 100644 --- a/documentation/en/configuration/default-curio-configuration.md +++ b/documentation/en/configuration/default-curio-configuration.md @@ -323,16 +323,19 @@ description: The default curio configuration # WindowPoSt is a high-value operation, so the default fee should be high. # Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. (Default: "5 fil") + # Updates will affect running instances. # # type: types.FIL #MaxWindowPoStGasFee = "5 FIL" # Whether to use available miner balance for sector collateral instead of sending it with each message (Default: false) + # Updates will affect running instances. # # type: bool #CollateralFromMinerBalance = false # Don't send collateral with messages even if there is no available balance in the miner actor (Default: false) + # Updates will affect running instances. # # type: bool #DisableCollateralFallback = false @@ -340,6 +343,7 @@ description: The default curio configuration # MaximizeFeeCap makes the sender set maximum allowed FeeCap on all sent messages. # This generally doesn't increase message cost, but in highly congested network messages # are much less likely to get stuck in mempool. (Default: true) + # Updates will affect running instances. # # type: bool #MaximizeFeeCap = true @@ -351,11 +355,13 @@ description: The default curio configuration [Fees.MaxPreCommitBatchGasFee] # Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. + # Updates will affect running instances. # # type: types.FIL #Base = "0 FIL" # Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. + # Updates will affect running instances. # # type: types.FIL #PerSector = "0.02 FIL" @@ -367,11 +373,13 @@ description: The default curio configuration [Fees.MaxCommitBatchGasFee] # Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. + # Updates will affect running instances. # # type: types.FIL #Base = "0 FIL" # Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. + # Updates will affect running instances. # # type: types.FIL #PerSector = "0.03 FIL" @@ -383,11 +391,13 @@ description: The default curio configuration [Fees.MaxUpdateBatchGasFee] # Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. + # Updates will affect running instances. # # type: types.FIL #Base = "0 FIL" # Accepts a decimal string (e.g., "123.45") with optional "fil" or "attofil" suffix. + # Updates will affect running instances. # # type: types.FIL #PerSector = "0.03 FIL" @@ -615,6 +625,7 @@ description: The default curio configuration # The server must support "HEAD" request and "GET" request. # 1. ?id=pieceCID with "HEAD" request responds with 200 if found or 404 if not. Must send header "Content-Length" with file size as value # 2. ?id=pieceCID must provide a reader for the requested piece along with header "Content-Length" with file size as value + # Updates will affect running instances. # # type: []PieceLocatorConfig #PieceLocator = [] @@ -934,6 +945,12 @@ description: The default curio configuration # type: ApisConfig [Apis] + # ChainApiInfo is the API endpoint for the Lotus daemon. + # Updates will affect running instances. + # + # type: []string + #ChainApiInfo = [] + # API auth secret for the Curio nodes to use. This value should only be set on the bade layer. # # type: string @@ -948,6 +965,7 @@ description: The default curio configuration # MinimumWalletBalance is the minimum balance all active wallets. If the balance is below this value, an # alerts will be triggered for the wallet # Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "5 FIL") + # Updates will affect running instances. # # type: types.FIL #MinimumWalletBalance = "5 FIL" @@ -958,6 +976,7 @@ description: The default curio configuration [Alerting.PagerDuty] # Enable is a flag to enable or disable the PagerDuty integration. + # Updates will affect running instances. # # type: bool #Enable = false @@ -965,12 +984,14 @@ description: The default curio configuration # PagerDutyEventURL is URL for PagerDuty.com Events API v2 URL. Events sent to this API URL are ultimately # routed to a PagerDuty.com service and processed. # The default is sufficient for integration with the stock commercial PagerDuty.com company's service. + # Updates will affect running instances. # # type: string #PagerDutyEventURL = "https://events.pagerduty.com/v2/enqueue" # PageDutyIntegrationKey is the integration key for a PagerDuty.com service. You can find this unique service # identifier in the integration page for the service. + # Updates will affect running instances. # # type: string #PageDutyIntegrationKey = "" @@ -981,11 +1002,13 @@ description: The default curio configuration [Alerting.PrometheusAlertManager] # Enable is a flag to enable or disable the Prometheus AlertManager integration. + # Updates will affect running instances. # # type: bool #Enable = false # AlertManagerURL is the URL for the Prometheus AlertManager API v2 URL. + # Updates will affect running instances. # # type: string #AlertManagerURL = "http://localhost:9093/api/v2/alerts" @@ -996,12 +1019,14 @@ description: The default curio configuration [Alerting.SlackWebhook] # Enable is a flag to enable or disable the Prometheus AlertManager integration. + # Updates will affect running instances. # # type: bool #Enable = false # WebHookURL is the URL for the URL for slack Webhook. # Example: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX + # Updates will affect running instances. # # type: string #WebHookURL = "" @@ -1017,20 +1042,16 @@ description: The default curio configuration # type: PreCommitBatchingConfig [Batching.PreCommit] - # Base fee value below which we should try to send Precommit messages immediately - # Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL") - # - # type: types.FIL - #BaseFeeThreshold = "0.005 FIL" - # Maximum amount of time any given sector in the batch can wait for the batch to accumulate # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "4h0m0s") + # Updates will affect running instances. # # type: time.Duration #Timeout = "4h0m0s" # Time buffer for forceful batch submission before sectors/deal in batch would start expiring # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "6h0m0s") + # Updates will affect running instances. # # type: time.Duration #Slack = "6h0m0s" @@ -1040,20 +1061,16 @@ description: The default curio configuration # type: CommitBatchingConfig [Batching.Commit] - # Base fee value below which we should try to send Commit messages immediately - # Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL") - # - # type: types.FIL - #BaseFeeThreshold = "0.005 FIL" - # Maximum amount of time any given sector in the batch can wait for the batch to accumulate # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") + # Updates will affect running instances. # # type: time.Duration #Timeout = "1h0m0s" # Time buffer for forceful batch submission before sectors/deals in batch would start expiring # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") + # Updates will affect running instances. # # type: time.Duration #Slack = "1h0m0s" @@ -1065,18 +1082,21 @@ description: The default curio configuration # Base fee value below which we should try to send Commit messages immediately # Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "attofil" suffix. (Default: "0.005 FIL") + # Updates will affect running instances. # # type: types.FIL #BaseFeeThreshold = "0.005 FIL" # Maximum amount of time any given sector in the batch can wait for the batch to accumulate # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") + # Updates will affect running instances. # # type: time.Duration #Timeout = "1h0m0s" # Time buffer for forceful batch submission before sectors/deals in batch would start expiring # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") + # Updates will affect running instances. # # type: time.Duration #Slack = "1h0m0s" diff --git a/harmony/harmonydb/harmonydb.go b/harmony/harmonydb/harmonydb.go index 3e3d0ff35..c7cbc0931 100644 --- a/harmony/harmonydb/harmonydb.go +++ b/harmony/harmonydb/harmonydb.go @@ -4,7 +4,6 @@ import ( "context" "embed" "fmt" - "math/rand" "net" "os" "regexp" @@ -25,9 +24,26 @@ import ( type ITestID string -// ITestNewID see ITestWithID doc +var itestCounter atomic.Uint64 + +// ITestNewID generates a unique ID for integration tests based on the test name. +// This ensures each test gets its own isolated database schema. func ITestNewID() ITestID { - return ITestID(strconv.Itoa(rand.Intn(99999))) + // Use a combination of timestamp and counter to ensure uniqueness + // even if tests run in parallel or are re-run quickly + counter := itestCounter.Add(1) + timestamp := time.Now().UnixNano() / 1000000 // milliseconds + return ITestID(fmt.Sprintf("%d_%d", timestamp, counter)) +} + +// ITestNewIDForTest generates a unique ID for a specific test using its name. +// This makes it easier to identify which test created which schema. +func ITestNewIDForTest(t *testing.T) ITestID { + // Sanitize test name to be schema-safe (alphanumeric and underscores only) + safeName := schemaRE.ReplaceAllString(t.Name(), "_") + counter := itestCounter.Add(1) + // Keep it reasonably short but unique + return ITestID(fmt.Sprintf("%s_%d", safeName, counter)) } type DB struct { @@ -105,6 +121,12 @@ func NewFromConfigWithITestID(t *testing.T, id ITestID) (*DB, error) { return db, nil } +// NewFromConfigWithTest is a convenience function that automatically generates +// a unique schema for each test based on the test name. +func NewFromConfigWithTest(t *testing.T) (*DB, error) { + return NewFromConfigWithITestID(t, ITestNewIDForTest(t)) +} + // New is to be called once per binary to establish the pool. // log() is for errors. It returns an upgraded database's connection. // This entry point serves both production and integration tests, so it's more DI. diff --git a/harmony/harmonydb/sql/20250505-market-mk20.sql b/harmony/harmonydb/sql/20250505-market-mk20.sql index 9489cfe33..b68e1e782 100644 --- a/harmony/harmonydb/sql/20250505-market-mk20.sql +++ b/harmony/harmonydb/sql/20250505-market-mk20.sql @@ -3,7 +3,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns - WHERE table_name = 'market_mk12_deals' + WHERE table_schema = current_schema() + AND table_name = 'market_mk12_deals' AND column_name = 'raw_size' ) THEN ALTER TABLE market_mk12_deals ADD COLUMN IF NOT EXISTS raw_size BIGINT; @@ -15,7 +16,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns - WHERE table_name = 'market_direct_deals' + WHERE table_schema = current_schema() + AND table_name = 'market_direct_deals' AND column_name = 'raw_size' ) THEN ALTER TABLE market_direct_deals ADD COLUMN IF NOT EXISTS raw_size BIGINT; @@ -35,7 +37,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.table_constraints - WHERE table_name = 'market_piece_metadata' + WHERE table_schema = current_schema() + AND table_name = 'market_piece_metadata' AND constraint_type = 'PRIMARY KEY' ) THEN ALTER TABLE market_piece_metadata ADD PRIMARY KEY (piece_cid, piece_size); @@ -55,7 +58,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.table_constraints - WHERE table_name = 'market_piece_deal' + WHERE table_schema = current_schema() + AND table_name = 'market_piece_deal' AND constraint_type = 'PRIMARY KEY' ) THEN ALTER TABLE market_piece_deal ADD PRIMARY KEY (id, sp_id, piece_cid, piece_length); @@ -67,7 +71,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns - WHERE table_name = 'market_piece_deal' + WHERE table_schema = current_schema() + AND table_name = 'market_piece_deal' AND column_name = 'piece_ref' ) THEN ALTER TABLE market_piece_deal ADD COLUMN IF NOT EXISTS piece_ref BIGINT; @@ -83,7 +88,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns - WHERE table_name = 'parked_pieces' + WHERE table_schema = current_schema() + AND table_name = 'parked_pieces' AND column_name = 'skip' ) THEN ALTER TABLE parked_pieces ADD COLUMN IF NOT EXISTS skip BOOLEAN NOT NULL DEFAULT FALSE; @@ -95,7 +101,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns - WHERE table_name = 'ipni' + WHERE table_schema = current_schema() + AND table_name = 'ipni' AND column_name = 'piece_cid_v2' ) THEN ALTER TABLE ipni ADD COLUMN IF NOT EXISTS piece_cid_v2 TEXT; @@ -107,7 +114,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns - WHERE table_name = 'ipni' + WHERE table_schema = current_schema() + AND table_name = 'ipni' AND column_name = 'metadata' ) THEN ALTER TABLE ipni ADD COLUMN IF NOT EXISTS metadata BYTEA NOT NULL DEFAULT '\xa01200'; @@ -120,7 +128,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns - WHERE table_name = 'ipni_chunks' + WHERE table_schema = current_schema() + AND table_name = 'ipni_chunks' AND column_name = 'is_pdp' ) THEN ALTER TABLE ipni_chunks ADD COLUMN IF NOT EXISTS is_pdp BOOLEAN NOT NULL DEFAULT FALSE; @@ -135,7 +144,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.table_constraints - WHERE table_name = 'ipni_chunks' + WHERE table_schema = current_schema() + AND table_name = 'ipni_chunks' AND constraint_name = 'ipni_chunks_piece_cid_is_pdp_chunk_num_key' ) THEN ALTER TABLE ipni_chunks ADD CONSTRAINT ipni_chunks_piece_cid_is_pdp_chunk_num_key @@ -190,7 +200,8 @@ DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns - WHERE table_name = 'ipni_task' + WHERE table_schema = current_schema() + AND table_name = 'ipni_task' AND column_name = 'id' ) THEN ALTER TABLE ipni_task ADD COLUMN IF NOT EXISTS id TEXT; @@ -248,29 +259,45 @@ $$ LANGUAGE plpgsql; -- Update raw_size for existing deals (One time backfill migration) DO $$ BEGIN - UPDATE market_mk12_deals d - SET raw_size = mpd.raw_size - FROM market_piece_deal mpd - WHERE d.uuid = mpd.id; - - UPDATE market_direct_deals d - SET raw_size = mpd.raw_size - FROM market_piece_deal mpd - WHERE d.uuid = mpd.id; - - UPDATE market_mk12_deals d - SET raw_size = p.raw_size - FROM market_mk12_deal_pipeline p - WHERE d.uuid = p.uuid - AND d.raw_size IS NULL - AND p.raw_size IS NOT NULL; - - UPDATE market_direct_deals d - SET raw_size = p.raw_size - FROM market_mk12_deal_pipeline p - WHERE d.uuid = p.uuid - AND d.raw_size IS NULL - AND p.raw_size IS NOT NULL; + -- Only backfill if market_mk12_deals table and raw_size column exist + IF EXISTS ( + SELECT 1 FROM information_schema.columns + WHERE table_schema = current_schema() + AND table_name = 'market_mk12_deals' + AND column_name = 'raw_size' + ) THEN + UPDATE market_mk12_deals d + SET raw_size = mpd.raw_size + FROM market_piece_deal mpd + WHERE d.uuid = mpd.id; + + UPDATE market_mk12_deals d + SET raw_size = p.raw_size + FROM market_mk12_deal_pipeline p + WHERE d.uuid = p.uuid + AND d.raw_size IS NULL + AND p.raw_size IS NOT NULL; + END IF; + + -- Only backfill if market_direct_deals table and raw_size column exist + IF EXISTS ( + SELECT 1 FROM information_schema.columns + WHERE table_schema = current_schema() + AND table_name = 'market_direct_deals' + AND column_name = 'raw_size' + ) THEN + UPDATE market_direct_deals d + SET raw_size = mpd.raw_size + FROM market_piece_deal mpd + WHERE d.uuid = mpd.id; + + UPDATE market_direct_deals d + SET raw_size = p.raw_size + FROM market_mk12_deal_pipeline p + WHERE d.uuid = p.uuid + AND d.raw_size IS NULL + AND p.raw_size IS NOT NULL; + END IF; END $$; -- This is main MK20 Deal table. Rows are added per deal and some diff --git a/harmony/harmonydb/sql/20251107-fix-ready-at-triggers.sql b/harmony/harmonydb/sql/20251107-fix-ready-at-triggers.sql new file mode 100644 index 000000000..c723e2a45 --- /dev/null +++ b/harmony/harmonydb/sql/20251107-fix-ready-at-triggers.sql @@ -0,0 +1,65 @@ +-- Drop the old triggers +DROP TRIGGER IF EXISTS update_precommit_ready_at ON sectors_sdr_pipeline; +DROP TRIGGER IF EXISTS update_commit_ready_at ON sectors_sdr_pipeline; +DROP TRIGGER IF EXISTS update_update_ready_at ON sectors_snap_pipeline; + +-- Recreate the functions to use BEFORE triggers and directly modify NEW +CREATE OR REPLACE FUNCTION set_precommit_ready_at() +RETURNS TRIGGER AS $$ +BEGIN + -- Check if after_tree_r column is changing from FALSE to TRUE + IF (TG_OP = 'INSERT' OR OLD.after_tree_r = FALSE) AND NEW.after_tree_r = TRUE AND NEW.precommit_ready_at IS NULL THEN + NEW.precommit_ready_at := CURRENT_TIMESTAMP AT TIME ZONE 'UTC'; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION set_commit_ready_at() +RETURNS TRIGGER AS $$ +BEGIN + -- Check if after_porep column is changing from FALSE to TRUE + IF (TG_OP = 'INSERT' OR OLD.after_porep = FALSE) AND NEW.after_porep = TRUE AND NEW.commit_ready_at IS NULL THEN + NEW.commit_ready_at := CURRENT_TIMESTAMP AT TIME ZONE 'UTC'; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION set_update_ready_at() +RETURNS TRIGGER AS $$ +BEGIN + -- Check if after_prove column is changing from FALSE to TRUE + IF (TG_OP = 'INSERT' OR OLD.after_prove = FALSE) AND NEW.after_prove = TRUE AND NEW.update_ready_at IS NULL THEN + NEW.update_ready_at := CURRENT_TIMESTAMP AT TIME ZONE 'UTC'; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Create BEFORE triggers instead of AFTER triggers +CREATE TRIGGER update_precommit_ready_at + BEFORE INSERT OR UPDATE ON sectors_sdr_pipeline + FOR EACH ROW EXECUTE FUNCTION set_precommit_ready_at(); + +CREATE TRIGGER update_commit_ready_at + BEFORE INSERT OR UPDATE ON sectors_sdr_pipeline + FOR EACH ROW EXECUTE FUNCTION set_commit_ready_at(); + +CREATE TRIGGER update_update_ready_at + BEFORE INSERT OR UPDATE ON sectors_snap_pipeline + FOR EACH ROW EXECUTE FUNCTION set_update_ready_at(); + +-- Backfill existing rows that are missing these timestamps +UPDATE sectors_sdr_pipeline +SET precommit_ready_at = CURRENT_TIMESTAMP AT TIME ZONE 'UTC' +WHERE after_tree_r = TRUE AND precommit_ready_at IS NULL; + +UPDATE sectors_sdr_pipeline +SET commit_ready_at = CURRENT_TIMESTAMP AT TIME ZONE 'UTC' +WHERE after_porep = TRUE AND commit_ready_at IS NULL; + +UPDATE sectors_snap_pipeline +SET update_ready_at = CURRENT_TIMESTAMP AT TIME ZONE 'UTC' +WHERE after_prove = TRUE AND update_ready_at IS NULL; + diff --git a/itests/alertnow_test.go b/itests/alertnow_test.go index 0eef39730..84797570c 100644 --- a/itests/alertnow_test.go +++ b/itests/alertnow_test.go @@ -20,8 +20,7 @@ func TestAlertNow(t *testing.T) { tp, } // Create dependencies - sharedITestID := harmonydb.ITestNewID() - db, err := harmonydb.NewFromConfigWithITestID(t, sharedITestID) + db, err := harmonydb.NewFromConfigWithTest(t) require.NoError(t, err) an := alertmanager.NewAlertNow(db, "alertNowMachine") diff --git a/itests/curio_test.go b/itests/curio_test.go index 10f8f47ec..414b61f8d 100644 --- a/itests/curio_test.go +++ b/itests/curio_test.go @@ -8,6 +8,8 @@ import ( "fmt" "net" "os" + "runtime" + "runtime/debug" "testing" "time" @@ -46,7 +48,32 @@ import ( "github.com/filecoin-project/lotus/node" ) +// printAllGoroutines prints all goroutine stack traces for debugging deadlocks +func printAllGoroutines(t *testing.T, label string) { + t.Logf("=== %s: All Goroutines ===", label) + + // Get all goroutine stack traces + buf := make([]byte, 1024*1024) // 1MB buffer + for { + n := runtime.Stack(buf, true) + if n < len(buf) { + buf = buf[:n] + break + } + buf = make([]byte, len(buf)*2) + } + + t.Logf("Goroutine stack traces:\n%s", string(buf)) + + // Also print goroutine count + t.Logf("Total goroutines: %d", runtime.NumGoroutine()) +} + func TestCurioHappyPath(t *testing.T) { + // Enable Go's built-in mutex profiling for deadlock detection + runtime.SetMutexProfileFraction(1) + debug.SetTraceback("all") + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -73,10 +100,7 @@ func TestCurioHappyPath(t *testing.T) { fapi := fmt.Sprintf("%s:%s", string(token), full.ListenAddr) - sharedITestID := harmonydb.ITestNewID() - t.Logf("sharedITestID: %s", sharedITestID) - - db, err := harmonydb.NewFromConfigWithITestID(t, sharedITestID) + db, err := harmonydb.NewFromConfigWithTest(t) require.NoError(t, err) defer db.ITestDeleteAll() @@ -118,8 +142,8 @@ func TestCurioHappyPath(t *testing.T) { require.Contains(t, baseCfg.Addresses.Get()[0].MinerAddresses, maddr.String()) - baseCfg.Batching.PreCommit.Timeout = time.Second - baseCfg.Batching.Commit.Timeout = time.Second + baseCfg.Batching.PreCommit.Timeout.Set(time.Second) + baseCfg.Batching.Commit.Timeout.Set(time.Second) cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) require.NoError(t, err) @@ -248,6 +272,14 @@ func TestCurioHappyPath(t *testing.T) { StartEpoch sql.NullInt64 `db:"start_epoch"` } + // Set up panic recovery to print goroutines on failure + defer func() { + if r := recover(); r != nil { + printAllGoroutines(t, "PANIC RECOVERY") + panic(r) // Re-panic to maintain original behavior + } + }() + require.Eventuallyf(t, func() bool { h, err := full.ChainHead(ctx) require.NoError(t, err) diff --git a/itests/dyncfg_test.go b/itests/dyncfg_test.go index 8b70249f1..91b1d4cb8 100644 --- a/itests/dyncfg_test.go +++ b/itests/dyncfg_test.go @@ -16,8 +16,7 @@ func TestDynamicConfig(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sharedITestID := harmonydb.ITestNewID() - cdb, err := harmonydb.NewFromConfigWithITestID(t, sharedITestID) + cdb, err := harmonydb.NewFromConfigWithTest(t) require.NoError(t, err) databaseContents := &config.CurioConfig{ diff --git a/itests/eth_test.go b/itests/eth_test.go new file mode 100644 index 000000000..f50275ed3 --- /dev/null +++ b/itests/eth_test.go @@ -0,0 +1,140 @@ +package itests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/curio/deps" + "github.com/filecoin-project/curio/deps/config" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/itests/kit" +) + +func TestEthClientFailover(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create ensemble with first node and miner + full1, _, ensemble1 := kit.EnsembleMinimal(t, + kit.LatestActorsAt(-1), + kit.PresealSectors(32), + kit.ThroughRPC(), + ) + + ensemble1.Start() + blockTime := 100 * time.Millisecond + ensemble1.BeginMining(blockTime) + + // Wait for chain to advance + full1.WaitTillChain(ctx, kit.HeightAtLeast(15)) + + // Create second ensemble - nodes will have different genesis blocks (different chains) + full2, _, ensemble2 := kit.EnsembleMinimal(t, + kit.LatestActorsAt(-1), + kit.PresealSectors(32), + kit.ThroughRPC(), + ) + + ensemble2.Start() + ensemble2.BeginMining(blockTime) + + // Wait for node2's chain to advance + full2.WaitTillChain(ctx, kit.HeightAtLeast(15)) + + // Connect the nodes (even though they're on different chains, connection allows failover) + addrs1, err := full1.NetAddrsListen(ctx) + require.NoError(t, err, "should be able to get node1 peer address") + + err = full2.NetConnect(ctx, addrs1) + require.NoError(t, err, "should be able to connect node2 to node1") + + // Create eth client connection with BOTH nodes for failover testing + token1, err := full1.AuthNew(ctx, lapi.AllPermissions) + require.NoError(t, err) + token2, err := full2.AuthNew(ctx, lapi.AllPermissions) + require.NoError(t, err) + + apiInfo1 := fmt.Sprintf("%s:%s", string(token1), full1.ListenAddr) + apiInfo2 := fmt.Sprintf("%s:%s", string(token2), full2.ListenAddr) + // Configure with both nodes - first node will be tried first + apiInfoCfg := config.NewDynamic([]string{apiInfo1, apiInfo2}) + + // Create CLI context for eth client + app := cli.NewApp() + cctx := cli.NewContext(app, nil, nil) + cctx.Context = ctx + + ethClient, err := deps.GetEthClient(cctx, apiInfoCfg) + require.NoError(t, err) + + // Verify eth client works initially (connected to node1) + chainIDFromNode1, err := ethClient.ChainID(ctx) + require.NoError(t, err, "should be able to query node1 initially") + t.Logf("Chain ID from node1: %s", chainIDFromNode1.String()) + + blockNumberFromNode1, err := ethClient.BlockNumber(ctx) + require.NoError(t, err, "should be able to query block number from node1") + t.Logf("Block number from node1: %d", blockNumberFromNode1) + + // Get node2's chain info directly to compare after failover + token2Only, err := full2.AuthNew(ctx, lapi.AllPermissions) + require.NoError(t, err) + apiInfo2Only := fmt.Sprintf("%s:%s", string(token2Only), full2.ListenAddr) + apiInfoCfg2Only := config.NewDynamic([]string{apiInfo2Only}) + + { + ethClient2, err := deps.GetEthClient(cctx, apiInfoCfg2Only) + require.NoError(t, err) + + chainIDFromNode2, err := ethClient2.ChainID(ctx) + require.NoError(t, err, "should be able to query node2 directly") + t.Logf("Chain ID from node2: %s", chainIDFromNode2.String()) + + blockNumberFromNode2, err := ethClient2.BlockNumber(ctx) + require.NoError(t, err, "should be able to query block number from node2") + t.Logf("Block number from node2: %d", blockNumberFromNode2) + + // Verify nodes are on different chains (different genesis blocks) + require.Equal(t, chainIDFromNode1.String(), chainIDFromNode2.String(), "chain IDs should match (same network)") + // Block numbers may differ since they're mining independently + t.Logf("Nodes are mining independently - node1 at height %d, node2 at height %d", blockNumberFromNode1, blockNumberFromNode2) + } + // Test failover: Shutdown the first node and verify automatic failover to node2 + t.Logf("Testing automatic failover - shutting down first node...") + + // Shutdown the first node to simulate failure + err = full1.Shutdown(ctx) + require.NoError(t, err, "should be able to shutdown first node") + + // Wait a moment for connections to detect the failure and close + time.Sleep(3 * time.Second) + + // The eth client should automatically failover to node2 on the next call + t.Logf("Verifying automatic failover - eth client should automatically retry with node2...") + + // The proxy will automatically retry with the next provider (node2) if node1 fails + chainIDAfterFailover, err := ethClient.ChainID(ctx) + require.NoError(t, err, "eth client should automatically failover to node2 when node1 is shutdown") + require.Equal(t, chainIDFromNode1.String(), chainIDAfterFailover.String(), "chain IDs should match") + + // Verify we can query block number after failover (proves failover is working) + blockNumberAfterFailover, err := ethClient.BlockNumber(ctx) + require.NoError(t, err, "should be able to query block number via failover node") + t.Logf("Block number via failover node (node2): %d", blockNumberAfterFailover) + + // Verify we're now querying node2's chain (which is different from node1's chain) + // Since nodes are mining independently, the block numbers will differ + t.Logf("After failover, we're querying node2's chain (block %d), which is different from node1's chain (was at block %d)", + blockNumberAfterFailover, blockNumberFromNode1) + + // The failover test is successful - we've verified that: + // 1. The client automatically fails over to node2 when node1 is shutdown + // 2. We can still make queries (ChainID, BlockNumber) using the failover node + // 3. After failover, we're querying node2's independent chain +} diff --git a/itests/harmonydb_test.go b/itests/harmonydb_test.go index b62b17883..fa7068d59 100644 --- a/itests/harmonydb_test.go +++ b/itests/harmonydb_test.go @@ -17,8 +17,7 @@ func TestCrud(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sharedITestID := harmonydb.ITestNewID() - cdb, err := harmonydb.NewFromConfigWithITestID(t, sharedITestID) + cdb, err := harmonydb.NewFromConfigWithTest(t) require.NoError(t, err) //cdb := miner.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB @@ -49,8 +48,7 @@ func TestTransaction(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - testID := harmonydb.ITestNewID() - cdb, err := harmonydb.NewFromConfigWithITestID(t, testID) + cdb, err := harmonydb.NewFromConfigWithTest(t) require.NoError(t, err) _, err = cdb.Exec(ctx, "INSERT INTO itest_scratch (some_int) VALUES (4), (5), (6)") require.NoError(t, err) @@ -99,8 +97,7 @@ func TestPartialWalk(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - testID := harmonydb.ITestNewID() - cdb, err := harmonydb.NewFromConfigWithITestID(t, testID) + cdb, err := harmonydb.NewFromConfigWithTest(t) require.NoError(t, err) _, err = cdb.Exec(ctx, ` INSERT INTO diff --git a/itests/sql_idempotent_test.go b/itests/sql_idempotent_test.go index 2d11aa450..b2514da33 100644 --- a/itests/sql_idempotent_test.go +++ b/itests/sql_idempotent_test.go @@ -20,8 +20,7 @@ func TestSQLIdempotent(t *testing.T) { require.NoError(t, fmt.Errorf("SQL DDL file failed idempotent check: %s, %w", name, err)) } - testID := harmonydb.ITestNewID() - cdb, err := harmonydb.NewFromConfigWithITestID(t, testID) + cdb, err := harmonydb.NewFromConfigWithTest(t) require.NoError(t, err) ctx := context.Background() diff --git a/lib/paths/local_test.go b/lib/paths/local_test.go index 24282c4df..c4e887707 100644 --- a/lib/paths/local_test.go +++ b/lib/paths/local_test.go @@ -82,9 +82,7 @@ func TestLocalStorage(t *testing.T) { root: root, } - sharedITestID := harmonydb.ITestNewID() - - db, err := harmonydb.NewFromConfigWithITestID(t, sharedITestID) + db, err := harmonydb.NewFromConfigWithTest(t) require.NoError(t, err) index := NewDBIndex(nil, db) diff --git a/lib/paths/remote_test.go b/lib/paths/remote_test.go index 682ded8da..32c3c6384 100644 --- a/lib/paths/remote_test.go +++ b/lib/paths/remote_test.go @@ -59,9 +59,7 @@ func createTestStorage(t *testing.T, p string, seal bool, att ...*paths.Local) s func TestMoveShared(t *testing.T) { logging.SetAllLoggers(logging.LevelDebug) - sharedITestID := harmonydb.ITestNewID() - - db, err := harmonydb.NewFromConfigWithITestID(t, sharedITestID) + db, err := harmonydb.NewFromConfigWithTest(t) require.NoError(t, err) index := paths.NewDBIndex(nil, db) diff --git a/market/http/http.go b/market/http/http.go index 886cca1dd..5b782be08 100644 --- a/market/http/http.go +++ b/market/http/http.go @@ -1,9 +1,9 @@ package http import ( - "github.com/ethereum/go-ethereum/ethclient" "github.com/go-chi/chi/v5" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/lib/paths" @@ -23,7 +23,7 @@ type MarketHandler struct { // NewMarketHandler is used to prepare all the required market handlers. Currently, it supports mk12 deal market. // This function should be used to expand the functionality under "/market" path -func NewMarketHandler(db *harmonydb.DB, cfg *config.CurioConfig, dm *storage_market.CurioStorageDealMarket, eth *ethclient.Client, fc pdp.PDPServiceNodeApi, sn *message.SenderETH, stor paths.StashStore) (*MarketHandler, error) { +func NewMarketHandler(db *harmonydb.DB, cfg *config.CurioConfig, dm *storage_market.CurioStorageDealMarket, eth api.EthClientInterface, fc pdp.PDPServiceNodeApi, sn *message.SenderETH, stor paths.StashStore) (*MarketHandler, error) { mdh12, err := mk12http.NewMK12DealHandler(db, cfg, dm) if err != nil { return nil, err diff --git a/market/mk20/ddo_v1.go b/market/mk20/ddo_v1.go index 65b9893ee..578e8872a 100644 --- a/market/mk20/ddo_v1.go +++ b/market/mk20/ddo_v1.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum" eabi "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" "github.com/samber/lo" "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" @@ -20,10 +19,18 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v16/verifreg" + curioapi "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" + + "github.com/filecoin-project/lotus/lib/lazy" ) +// EthClientInterface defines the minimal interface needed for GetDealID +type EthClientInterface interface { + CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) +} + var ErrUnknowContract = errors.New("provider does not work with this market") // DDOV1 defines a structure for handling provider, client, and piece manager information with associated contract and notification details @@ -117,7 +124,7 @@ func (d *DDOV1) Validate(db *harmonydb.DB, cfg *config.MK20Config) (DealCode, er return Ok, nil } -func (d *DDOV1) GetDealID(ctx context.Context, db *harmonydb.DB, eth *ethclient.Client) (int64, DealCode, error) { +func (d *DDOV1) GetDealID(ctx context.Context, db *harmonydb.DB, eth *lazy.Lazy[curioapi.EthClientInterface]) (int64, DealCode, error) { if d.ContractAddress == "0xtest" { v, err := rand.Int(rand.Reader, big.NewInt(10000000)) if err != nil { @@ -166,7 +173,11 @@ func (d *DDOV1) GetDealID(ctx context.Context, db *harmonydb.DB, eth *ethclient. } // Call contract - output, err := eth.CallContract(ctx, msg, nil) + ethClient, err := eth.Val() + if err != nil { + return -1, ErrServerInternalError, fmt.Errorf("failed to get eth client: %w", err) + } + output, err := ethClient.CallContract(ctx, msg, nil) if err != nil { return -1, ErrServerInternalError, fmt.Errorf("eth_call failed: %w", err) } diff --git a/market/mk20/mk20.go b/market/mk20/mk20.go index 04ef59a12..e22c16224 100644 --- a/market/mk20/mk20.go +++ b/market/mk20/mk20.go @@ -10,7 +10,6 @@ import ( "sync/atomic" "time" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/oklog/ulid" @@ -24,6 +23,7 @@ import ( "github.com/filecoin-project/go-state-types/builtin/v16/verifreg" verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + curioapi "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/build" "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" @@ -34,6 +34,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/lazy" ) var log = logging.Logger("mk20") @@ -47,7 +48,7 @@ type MK20 struct { miners *config.Dynamic[[]address.Address] DB *harmonydb.DB api MK20API - ethClient *ethclient.Client + ethClient *lazy.Lazy[curioapi.EthClientInterface] si paths.SectorIndex cfg *config.CurioConfig sm *config.Dynamic[map[address.Address]abi.SectorSize] @@ -57,7 +58,7 @@ type MK20 struct { unknowClient bool } -func NewMK20Handler(miners *config.Dynamic[[]address.Address], db *harmonydb.DB, si paths.SectorIndex, mapi MK20API, ethClient *ethclient.Client, cfg *config.CurioConfig, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) (*MK20, error) { +func NewMK20Handler(miners *config.Dynamic[[]address.Address], db *harmonydb.DB, si paths.SectorIndex, mapi MK20API, ethClient *lazy.Lazy[curioapi.EthClientInterface], cfg *config.CurioConfig, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) (*MK20, error) { ctx := context.Background() // Ensure MinChunk size and max chunkSize is a power of 2 diff --git a/pdp/contract/utils.go b/pdp/contract/utils.go index 658ee6a41..d63ec355d 100644 --- a/pdp/contract/utils.go +++ b/pdp/contract/utils.go @@ -14,7 +14,6 @@ import ( "github.com/ethereum/go-ethereum/common" etypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" @@ -23,10 +22,11 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/build" "github.com/filecoin-project/curio/harmony/harmonydb" - "github.com/filecoin-project/lotus/api" + lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" ) @@ -36,7 +36,7 @@ var log = logging.Logger("pdp-contract") // GetProvingScheduleFromListener checks if a listener has a view contract and returns // an IPDPProvingSchedule instance bound to the appropriate address. // It uses the view contract address if available, otherwise uses the listener address directly. -func GetProvingScheduleFromListener(listenerAddr common.Address, ethClient *ethclient.Client) (*IPDPProvingSchedule, error) { +func GetProvingScheduleFromListener(listenerAddr common.Address, ethClient bind.ContractBackend) (*IPDPProvingSchedule, error) { // Try to get the view contract address from the listener provingScheduleAddr := listenerAddr @@ -77,7 +77,7 @@ func ServiceRegistryAddress() (common.Address, error) { } } -func FSRegister(ctx context.Context, db *harmonydb.DB, full api.FullNode, ethClient *ethclient.Client, name, description string, pdpOffering ServiceProviderRegistryStoragePDPOffering, capabilities map[string]string) (uint64, error) { +func FSRegister(ctx context.Context, db *harmonydb.DB, full lapi.FullNode, ethClient api.EthClientInterface, name, description string, pdpOffering ServiceProviderRegistryStoragePDPOffering, capabilities map[string]string) (uint64, error) { if len(name) > 128 { return 0, xerrors.Errorf("name is too long, max 128 characters allowed") } @@ -246,7 +246,7 @@ func getSender(ctx context.Context, db *harmonydb.DB) (common.Address, address.A return sender, fSender, privateKey, nil } -func createSignedTransaction(ctx context.Context, ethClient *ethclient.Client, privateKey *ecdsa.PrivateKey, from, to common.Address, amount *mbig.Int, data []byte) (*etypes.Transaction, error) { +func createSignedTransaction(ctx context.Context, ethClient api.EthClientInterface, privateKey *ecdsa.PrivateKey, from, to common.Address, amount *mbig.Int, data []byte) (*etypes.Transaction, error) { msg := ethereum.CallMsg{ From: from, To: &to, @@ -280,7 +280,7 @@ func createSignedTransaction(ctx context.Context, ethClient *ethclient.Client, p } // Calculate GasFeeCap (maxFeePerGas) - gasFeeCap := big.NewInt(0).Add(baseFee, gasTipCap) + gasFeeCap := mbig.NewInt(0).Add(baseFee, gasTipCap) chainID, err := ethClient.NetworkID(ctx) if err != nil { @@ -314,7 +314,7 @@ func createSignedTransaction(ctx context.Context, ethClient *ethclient.Client, p return signedTx, nil } -func FSUpdateProvider(ctx context.Context, name, description string, db *harmonydb.DB, ethClient *ethclient.Client) (string, error) { +func FSUpdateProvider(ctx context.Context, name, description string, db *harmonydb.DB, ethClient api.EthClientInterface) (string, error) { if len(name) > 128 { return "", xerrors.Errorf("name is too long, max 128 characters allowed") } @@ -360,7 +360,7 @@ func FSUpdateProvider(ctx context.Context, name, description string, db *harmony return signedTx.Hash().String(), nil } -func FSUpdatePDPService(ctx context.Context, db *harmonydb.DB, ethClient *ethclient.Client, pdpOffering ServiceProviderRegistryStoragePDPOffering, capabilities map[string]string) (string, error) { +func FSUpdatePDPService(ctx context.Context, db *harmonydb.DB, ethClient api.EthClientInterface, pdpOffering ServiceProviderRegistryStoragePDPOffering, capabilities map[string]string) (string, error) { var keys, values []string for k, v := range capabilities { keys = append(keys, k) diff --git a/pdp/handlers.go b/pdp/handlers.go index 1ae8229c6..8bd5655ac 100644 --- a/pdp/handlers.go +++ b/pdp/handlers.go @@ -17,7 +17,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/go-chi/chi/v5" "github.com/ipfs/go-cid" "github.com/yugabyte/pgx/v5" @@ -25,6 +24,7 @@ import ( "github.com/filecoin-project/go-commp-utils/nonffi" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/pdp/contract" @@ -42,7 +42,7 @@ type PDPService struct { storage paths.StashStore sender *message.SenderETH - ethClient *ethclient.Client + ethClient api.EthClientInterface filClient PDPServiceNodeApi } @@ -51,7 +51,7 @@ type PDPServiceNodeApi interface { } // NewPDPService creates a new instance of PDPService with the provided stores -func NewPDPService(db *harmonydb.DB, stor paths.StashStore, ec *ethclient.Client, fc PDPServiceNodeApi, sn *message.SenderETH) *PDPService { +func NewPDPService(db *harmonydb.DB, stor paths.StashStore, ec api.EthClientInterface, fc PDPServiceNodeApi, sn *message.SenderETH) *PDPService { return &PDPService{ Auth: &NullAuth{}, db: db, diff --git a/tasks/message/sender.go b/tasks/message/sender.go index 8d5e77611..e548c8187 100644 --- a/tasks/message/sender.go +++ b/tasks/message/sender.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -47,7 +48,7 @@ type SignerAPI interface { type Sender struct { api SenderAPI - maximizeFeeCap bool + maximizeFeeCap *config.Dynamic[bool] sendTask *SendTask @@ -262,7 +263,7 @@ var _ harmonytask.TaskInterface = &SendTask{} var _ = harmonytask.Reg(&SendTask{}) // NewSender creates a new Sender. -func NewSender(api SenderAPI, signer SignerAPI, db *harmonydb.DB, maximizeFeeCap bool) (*Sender, *SendTask) { +func NewSender(api SenderAPI, signer SignerAPI, db *harmonydb.DB, maximizeFeeCap *config.Dynamic[bool]) (*Sender, *SendTask) { st := &SendTask{ api: api, signer: signer, @@ -298,7 +299,7 @@ func (s *Sender) Send(ctx context.Context, msg *types.Message, mss *api.MessageS return cid.Undef, xerrors.Errorf("MessageSendSpec.MsgUuid must be zero") } - if s.maximizeFeeCap { + if s.maximizeFeeCap.Get() { mss.MaximizeFeeCap = true } diff --git a/tasks/message/sender_eth.go b/tasks/message/sender_eth.go index 2575c7ebd..49b574c14 100644 --- a/tasks/message/sender_eth.go +++ b/tasks/message/sender_eth.go @@ -11,10 +11,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" "go.uber.org/multierr" "golang.org/x/xerrors" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -23,7 +23,7 @@ import ( ) type SenderETH struct { - client *ethclient.Client + client api.EthClientInterface sendTask *SendTaskETH @@ -33,7 +33,7 @@ type SenderETH struct { type SendTaskETH struct { sendTF promise.Promise[harmonytask.AddTaskFunc] - client *ethclient.Client + client api.EthClientInterface db *harmonydb.DB } @@ -251,7 +251,7 @@ var _ harmonytask.TaskInterface = &SendTaskETH{} var _ = harmonytask.Reg(&SendTaskETH{}) // NewSenderETH creates a new SenderETH. -func NewSenderETH(client *ethclient.Client, db *harmonydb.DB) (*SenderETH, *SendTaskETH) { +func NewSenderETH(client api.EthClientInterface, db *harmonydb.DB) (*SenderETH, *SendTaskETH) { st := &SendTaskETH{ client: client, db: db, diff --git a/tasks/message/watch_eth.go b/tasks/message/watch_eth.go index 4ac851a4b..3a6702607 100644 --- a/tasks/message/watch_eth.go +++ b/tasks/message/watch_eth.go @@ -12,8 +12,8 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -54,7 +54,7 @@ type MessageWatcherEth struct { ethCallTimeout time.Duration } -func NewMessageWatcherEth(db *harmonydb.DB, ht *harmonytask.TaskEngine, pcs *chainsched.CurioChainSched, api *ethclient.Client) (*MessageWatcherEth, error) { +func NewMessageWatcherEth(db *harmonydb.DB, ht *harmonytask.TaskEngine, pcs *chainsched.CurioChainSched, api api.EthClientInterface) (*MessageWatcherEth, error) { mw := &MessageWatcherEth{ txMgr: NewHarmonyEthTxManager(db), ht: ht, diff --git a/tasks/pdp/data_set_create_watch.go b/tasks/pdp/data_set_create_watch.go index b6184d5d6..42719818e 100644 --- a/tasks/pdp/data_set_create_watch.go +++ b/tasks/pdp/data_set_create_watch.go @@ -9,10 +9,10 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/lib/chainsched" "github.com/filecoin-project/curio/pdp/contract" @@ -26,7 +26,7 @@ type DataSetCreate struct { Client string `db:"client"` } -func NewWatcherDataSetCreate(db *harmonydb.DB, ethClient *ethclient.Client, pcs *chainsched.CurioChainSched) { +func NewWatcherDataSetCreate(db *harmonydb.DB, ethClient api.EthClientInterface, pcs *chainsched.CurioChainSched) { if err := pcs.AddHandler(func(ctx context.Context, revert, apply *chainTypes.TipSet) error { err := processPendingDataSetCreates(ctx, db, ethClient) if err != nil { @@ -38,7 +38,7 @@ func NewWatcherDataSetCreate(db *harmonydb.DB, ethClient *ethclient.Client, pcs } } -func processPendingDataSetCreates(ctx context.Context, db *harmonydb.DB, ethClient *ethclient.Client) error { +func processPendingDataSetCreates(ctx context.Context, db *harmonydb.DB, ethClient api.EthClientInterface) error { // Query for pdp_data_set_create entries tx_hash is NOT NULL var dataSetCreates []DataSetCreate @@ -67,7 +67,7 @@ func processPendingDataSetCreates(ctx context.Context, db *harmonydb.DB, ethClie return nil } -func processDataSetCreate(ctx context.Context, db *harmonydb.DB, dsc DataSetCreate, ethClient *ethclient.Client) error { +func processDataSetCreate(ctx context.Context, db *harmonydb.DB, dsc DataSetCreate, ethClient api.EthClientInterface) error { // Retrieve the tx_receipt from message_waits_eth var txReceiptJSON []byte var txSuccess bool @@ -207,7 +207,7 @@ func extractDataSetIdFromReceipt(receipt *types.Receipt) (uint64, error) { return 0, xerrors.Errorf("DataSetCreated event not found in receipt") } -func getProvingPeriodChallengeWindow(ctx context.Context, ethClient *ethclient.Client, listenerAddr common.Address) (uint64, uint64, error) { +func getProvingPeriodChallengeWindow(ctx context.Context, ethClient bind.ContractBackend, listenerAddr common.Address) (uint64, uint64, error) { // Get the proving schedule from the listener (handles view contract indirection) schedule, err := contract.GetProvingScheduleFromListener(listenerAddr, ethClient) if err != nil { diff --git a/tasks/pdp/dataset_add_piece_watch.go b/tasks/pdp/dataset_add_piece_watch.go index 8b625488b..639e28ba5 100644 --- a/tasks/pdp/dataset_add_piece_watch.go +++ b/tasks/pdp/dataset_add_piece_watch.go @@ -7,11 +7,11 @@ import ( "fmt" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ipfs/go-cid" "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/lib/chainsched" "github.com/filecoin-project/curio/pdp/contract" @@ -31,7 +31,7 @@ type DataSetPieceAdd struct { } // NewWatcherPieceAdd sets up the watcher for data set piece additions -func NewWatcherPieceAdd(db *harmonydb.DB, pcs *chainsched.CurioChainSched, ethClient *ethclient.Client) { +func NewWatcherPieceAdd(db *harmonydb.DB, pcs *chainsched.CurioChainSched, ethClient api.EthClientInterface) { if err := pcs.AddHandler(func(ctx context.Context, revert, apply *chainTypes.TipSet) error { err := processPendingDataSetPieceAdds(ctx, db, ethClient) if err != nil { @@ -45,7 +45,7 @@ func NewWatcherPieceAdd(db *harmonydb.DB, pcs *chainsched.CurioChainSched, ethCl } // processPendingDataSetPieceAdds processes piece additions that have been confirmed on-chain -func processPendingDataSetPieceAdds(ctx context.Context, db *harmonydb.DB, ethClient *ethclient.Client) error { +func processPendingDataSetPieceAdds(ctx context.Context, db *harmonydb.DB, ethClient api.EthClientInterface) error { // Query for pdp_dataset_piece_adds entries where add_message_ok = TRUE var pieceAdds []DataSetPieceAdd @@ -75,7 +75,7 @@ func processPendingDataSetPieceAdds(ctx context.Context, db *harmonydb.DB, ethCl return nil } -func processDataSetPieceAdd(ctx context.Context, db *harmonydb.DB, pieceAdd DataSetPieceAdd, ethClient *ethclient.Client) error { +func processDataSetPieceAdd(ctx context.Context, db *harmonydb.DB, pieceAdd DataSetPieceAdd, ethClient api.EthClientInterface) error { // Retrieve the tx_receipt from message_waits_eth var txReceiptJSON []byte var txSuccess bool diff --git a/tasks/pdp/task_add_data_set.go b/tasks/pdp/task_add_data_set.go index 226841b08..97b2c28f1 100644 --- a/tasks/pdp/task_add_data_set.go +++ b/tasks/pdp/task_add_data_set.go @@ -8,10 +8,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -24,11 +24,11 @@ import ( type PDPTaskAddDataSet struct { db *harmonydb.DB sender *message.SenderETH - ethClient *ethclient.Client + ethClient api.EthClientInterface filClient PDPServiceNodeApi } -func NewPDPTaskAddDataSet(db *harmonydb.DB, sender *message.SenderETH, ethClient *ethclient.Client, filClient PDPServiceNodeApi) *PDPTaskAddDataSet { +func NewPDPTaskAddDataSet(db *harmonydb.DB, sender *message.SenderETH, ethClient api.EthClientInterface, filClient PDPServiceNodeApi) *PDPTaskAddDataSet { return &PDPTaskAddDataSet{ db: db, sender: sender, diff --git a/tasks/pdp/task_add_piece.go b/tasks/pdp/task_add_piece.go index b22b4b153..507043823 100644 --- a/tasks/pdp/task_add_piece.go +++ b/tasks/pdp/task_add_piece.go @@ -9,11 +9,11 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ipfs/go-cid" "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -32,10 +32,10 @@ type PDPServiceNodeApi interface { type PDPTaskAddPiece struct { db *harmonydb.DB sender *message.SenderETH - ethClient *ethclient.Client + ethClient api.EthClientInterface } -func NewPDPTaskAddPiece(db *harmonydb.DB, sender *message.SenderETH, ethClient *ethclient.Client) *PDPTaskAddPiece { +func NewPDPTaskAddPiece(db *harmonydb.DB, sender *message.SenderETH, ethClient api.EthClientInterface) *PDPTaskAddPiece { return &PDPTaskAddPiece{ db: db, sender: sender, diff --git a/tasks/pdp/task_delete_data_set.go b/tasks/pdp/task_delete_data_set.go index e0ed526ab..715892d4b 100644 --- a/tasks/pdp/task_delete_data_set.go +++ b/tasks/pdp/task_delete_data_set.go @@ -9,10 +9,10 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -25,11 +25,11 @@ import ( type PDPTaskDeleteDataSet struct { db *harmonydb.DB sender *message.SenderETH - ethClient *ethclient.Client + ethClient api.EthClientInterface filClient PDPServiceNodeApi } -func NewPDPTaskDeleteDataSet(db *harmonydb.DB, sender *message.SenderETH, ethClient *ethclient.Client, filClient PDPServiceNodeApi) *PDPTaskDeleteDataSet { +func NewPDPTaskDeleteDataSet(db *harmonydb.DB, sender *message.SenderETH, ethClient api.EthClientInterface, filClient PDPServiceNodeApi) *PDPTaskDeleteDataSet { return &PDPTaskDeleteDataSet{ db: db, sender: sender, diff --git a/tasks/pdp/task_delete_piece.go b/tasks/pdp/task_delete_piece.go index e4ed7805c..4cc84527e 100644 --- a/tasks/pdp/task_delete_piece.go +++ b/tasks/pdp/task_delete_piece.go @@ -9,10 +9,10 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -25,7 +25,7 @@ import ( type PDPTaskDeletePiece struct { db *harmonydb.DB sender *message.SenderETH - ethClient *ethclient.Client + ethClient api.EthClientInterface } func (p *PDPTaskDeletePiece) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { @@ -196,7 +196,7 @@ func (p *PDPTaskDeletePiece) schedule(ctx context.Context, taskFunc harmonytask. func (p *PDPTaskDeletePiece) Adder(taskFunc harmonytask.AddTaskFunc) {} -func NewPDPTaskDeletePiece(db *harmonydb.DB, sender *message.SenderETH, ethClient *ethclient.Client) *PDPTaskDeletePiece { +func NewPDPTaskDeletePiece(db *harmonydb.DB, sender *message.SenderETH, ethClient api.EthClientInterface) *PDPTaskDeletePiece { return &PDPTaskDeletePiece{ db: db, sender: sender, diff --git a/tasks/pdp/task_init_pp.go b/tasks/pdp/task_init_pp.go index c75c4d9b4..c5afd69e4 100644 --- a/tasks/pdp/task_init_pp.go +++ b/tasks/pdp/task_init_pp.go @@ -8,10 +8,10 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -25,7 +25,7 @@ import ( type InitProvingPeriodTask struct { db *harmonydb.DB - ethClient *ethclient.Client + ethClient api.EthClientInterface sender *message.SenderETH fil NextProvingPeriodTaskChainApi @@ -37,7 +37,7 @@ type InitProvingPeriodTaskChainApi interface { ChainHead(context.Context) (*chainTypes.TipSet, error) } -func NewInitProvingPeriodTask(db *harmonydb.DB, ethClient *ethclient.Client, fil NextProvingPeriodTaskChainApi, chainSched *chainsched.CurioChainSched, sender *message.SenderETH) *InitProvingPeriodTask { +func NewInitProvingPeriodTask(db *harmonydb.DB, ethClient api.EthClientInterface, fil NextProvingPeriodTaskChainApi, chainSched *chainsched.CurioChainSched, sender *message.SenderETH) *InitProvingPeriodTask { ipp := &InitProvingPeriodTask{ db: db, ethClient: ethClient, diff --git a/tasks/pdp/task_next_pp.go b/tasks/pdp/task_next_pp.go index 708fa3b8e..7fcf67f5a 100644 --- a/tasks/pdp/task_next_pp.go +++ b/tasks/pdp/task_next_pp.go @@ -7,10 +7,10 @@ import ( "strings" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/yugabyte/pgx/v5" "golang.org/x/xerrors" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -24,7 +24,7 @@ import ( type NextProvingPeriodTask struct { db *harmonydb.DB - ethClient *ethclient.Client + ethClient api.EthClientInterface sender *message.SenderETH fil NextProvingPeriodTaskChainApi @@ -36,7 +36,7 @@ type NextProvingPeriodTaskChainApi interface { ChainHead(context.Context) (*chainTypes.TipSet, error) } -func NewNextProvingPeriodTask(db *harmonydb.DB, ethClient *ethclient.Client, fil NextProvingPeriodTaskChainApi, chainSched *chainsched.CurioChainSched, sender *message.SenderETH) *NextProvingPeriodTask { +func NewNextProvingPeriodTask(db *harmonydb.DB, ethClient api.EthClientInterface, fil NextProvingPeriodTaskChainApi, chainSched *chainsched.CurioChainSched, sender *message.SenderETH) *NextProvingPeriodTask { n := &NextProvingPeriodTask{ db: db, ethClient: ethClient, diff --git a/tasks/pdp/task_prove.go b/tasks/pdp/task_prove.go index b5772c831..33fac9263 100644 --- a/tasks/pdp/task_prove.go +++ b/tasks/pdp/task_prove.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ipfs/go-cid" "github.com/minio/sha256-simd" "github.com/oklog/ulid" @@ -26,6 +25,7 @@ import ( "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -46,7 +46,7 @@ const LeafSize = proof.NODE_SIZE type ProveTask struct { db *harmonydb.DB - ethClient *ethclient.Client + ethClient api.EthClientInterface sender *message.SenderETH cpr *cachedreader.CachedPieceReader fil ProveTaskChainApi @@ -62,7 +62,7 @@ type ProveTaskChainApi interface { ChainHead(context.Context) (*chainTypes.TipSet, error) //perm:read } -func NewProveTask(chainSched *chainsched.CurioChainSched, db *harmonydb.DB, ethClient *ethclient.Client, fil ProveTaskChainApi, sender *message.SenderETH, cpr *cachedreader.CachedPieceReader, idx *indexstore.IndexStore) *ProveTask { +func NewProveTask(chainSched *chainsched.CurioChainSched, db *harmonydb.DB, ethClient api.EthClientInterface, fil ProveTaskChainApi, sender *message.SenderETH, cpr *cachedreader.CachedPieceReader, idx *indexstore.IndexStore) *ProveTask { pt := &ProveTask{ db: db, ethClient: ethClient, diff --git a/tasks/seal/poller.go b/tasks/seal/poller.go index 3bbcce1d7..22d526faf 100644 --- a/tasks/seal/poller.go +++ b/tasks/seal/poller.go @@ -51,15 +51,15 @@ type SealPollerAPI interface { type preCommitBatchingConfig struct { MaxPreCommitBatch int - Slack time.Duration - Timeout time.Duration + Slack *config.Dynamic[time.Duration] + Timeout *config.Dynamic[time.Duration] } type commitBatchingConfig struct { MinCommitBatch int MaxCommitBatch int - Slack time.Duration - Timeout time.Duration + Slack *config.Dynamic[time.Duration] + Timeout *config.Dynamic[time.Duration] } type pollerConfig struct { diff --git a/tasks/seal/poller_commit_msg.go b/tasks/seal/poller_commit_msg.go index 68ddc65ae..37b439ebd 100644 --- a/tasks/seal/poller_commit_msg.go +++ b/tasks/seal/poller_commit_msg.go @@ -79,7 +79,7 @@ func (s *SealPoller) pollStartBatchCommitMsg(ctx context.Context) { return } - slackEpoch := int64(math.Ceil(s.cfg.commit.Slack.Seconds() / float64(build.BlockDelaySecs))) + slackEpoch := int64(math.Ceil(s.cfg.commit.Slack.Get().Seconds() / float64(build.BlockDelaySecs))) s.pollers[pollerCommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { var updatedCount int64 @@ -90,14 +90,14 @@ func (s *SealPoller) pollStartBatchCommitMsg(ctx context.Context) { "current_height", ts.Height(), "max_batch", s.cfg.commit.MaxCommitBatch, "new_task_id", id, - "timeout_secs", s.cfg.commit.Timeout.Seconds()) + "timeout_secs", s.cfg.commit.Timeout.Get().Seconds()) err = tx.QueryRow(`SELECT updated_count, reason FROM poll_start_batch_commit_msgs($1, $2, $3, $4, $5)`, - slackEpoch, // p_slack_epoch - ts.Height(), // p_current_height - s.cfg.commit.MaxCommitBatch, // p_max_batch - id, // p_new_task_id - int(s.cfg.commit.Timeout.Seconds()), // p_timeout_secs + slackEpoch, // p_slack_epoch + ts.Height(), // p_current_height + s.cfg.commit.MaxCommitBatch, // p_max_batch + id, // p_new_task_id + int(s.cfg.commit.Timeout.Get().Seconds()), // p_timeout_secs ).Scan(&updatedCount, &reason) if err != nil { return false, err diff --git a/tasks/seal/poller_precommit_msg.go b/tasks/seal/poller_precommit_msg.go index 34e1d1c20..121b99516 100644 --- a/tasks/seal/poller_precommit_msg.go +++ b/tasks/seal/poller_precommit_msg.go @@ -32,7 +32,7 @@ func (s *SealPoller) pollStartBatchPrecommitMsg(ctx context.Context) { return } - slackEpoch := int64(math.Ceil(s.cfg.preCommit.Slack.Seconds() / float64(build.BlockDelaySecs))) + slackEpoch := int64(math.Ceil(s.cfg.preCommit.Slack.Get().Seconds() / float64(build.BlockDelaySecs))) s.pollers[pollerPrecommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { var updatedCount int64 @@ -43,18 +43,18 @@ func (s *SealPoller) pollStartBatchPrecommitMsg(ctx context.Context) { "current_height", ts.Height(), "max_batch", s.cfg.preCommit.MaxPreCommitBatch, "new_task_id", id, - "timeout_secs", s.cfg.preCommit.Timeout.Seconds(), - "timeout_at", time.Now().Add(s.cfg.preCommit.Timeout).UTC().Format(time.RFC3339), + "timeout_secs", s.cfg.preCommit.Timeout.Get().Seconds(), + "timeout_at", time.Now().Add(s.cfg.preCommit.Timeout.Get()).UTC().Format(time.RFC3339), "randomness_lookback", policy.MaxPreCommitRandomnessLookback, ) err = tx.QueryRow(`SELECT updated_count, reason FROM poll_start_batch_precommit_msgs($1, $2, $3, $4, $5, $6)`, - policy.MaxPreCommitRandomnessLookback, // p_randomnessLookBack BIGINT, -- policy.MaxPreCommitRandomnessLookback - slackEpoch, // p_slack_epoch BIGINT, -- "Slack" epoch to compare against a sector's start_epoch - ts.Height(), // p_current_height BIGINT, -- Current on-chain height - s.cfg.preCommit.MaxPreCommitBatch, // p_max_batch INT, -- Max number of sectors per batch - id, // p_new_task_id BIGINT, -- Task ID to assign if a batch is chosen - int(s.cfg.preCommit.Timeout.Seconds()), // p_timeout_secs INT -- Timeout in seconds for earliest_ready_at check + policy.MaxPreCommitRandomnessLookback, // p_randomnessLookBack BIGINT, -- policy.MaxPreCommitRandomnessLookback + slackEpoch, // p_slack_epoch BIGINT, -- "Slack" epoch to compare against a sector's start_epoch + ts.Height(), // p_current_height BIGINT, -- Current on-chain height + s.cfg.preCommit.MaxPreCommitBatch, // p_max_batch INT, -- Max number of sectors per batch + id, // p_new_task_id BIGINT, -- Task ID to assign if a batch is chosen + int(s.cfg.preCommit.Timeout.Get().Seconds()), // p_timeout_secs INT -- Timeout in seconds for earliest_ready_at check ).Scan(&updatedCount, &reason) if err != nil { return false, err diff --git a/tasks/seal/task_submit_commit.go b/tasks/seal/task_submit_commit.go index 6dafb048f..5ce31229c 100644 --- a/tasks/seal/task_submit_commit.go +++ b/tasks/seal/task_submit_commit.go @@ -512,8 +512,8 @@ func (s *SubmitCommitTask) gasEstimateCommit(ctx context.Context, maddr address. } func (s *SubmitCommitTask) calculateCollateral(minerBalance abi.TokenAmount, collateral abi.TokenAmount) abi.TokenAmount { - if s.cfg.feeCfg.CollateralFromMinerBalance { - if s.cfg.feeCfg.DisableCollateralFallback { + if s.cfg.feeCfg.CollateralFromMinerBalance.Get() { + if s.cfg.feeCfg.DisableCollateralFallback.Get() { collateral = big.Zero() } diff --git a/tasks/seal/task_submit_precommit.go b/tasks/seal/task_submit_precommit.go index 1c4a861f7..2f38aa719 100644 --- a/tasks/seal/task_submit_precommit.go +++ b/tasks/seal/task_submit_precommit.go @@ -280,8 +280,8 @@ func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bo aggFee := big.Div(big.Mul(aggFeeRaw, big.NewInt(110)), big.NewInt(100)) needFunds := big.Add(collateral, aggFee) - if s.feeCfg.CollateralFromMinerBalance { - if s.feeCfg.DisableCollateralFallback { + if s.feeCfg.CollateralFromMinerBalance.Get() { + if s.feeCfg.DisableCollateralFallback.Get() { needFunds = big.Zero() } balance, err := s.api.StateMinerAvailableBalance(ctx, maddr, types.EmptyTSK) diff --git a/tasks/snap/task_submit.go b/tasks/snap/task_submit.go index 47027334a..120dee380 100644 --- a/tasks/snap/task_submit.go +++ b/tasks/snap/task_submit.go @@ -66,9 +66,9 @@ type SubmitTaskNodeAPI interface { type updateBatchingConfig struct { MaxUpdateBatch int - Slack time.Duration - Timeout time.Duration - BaseFeeThreshold abi.TokenAmount + Slack *config.Dynamic[time.Duration] + Timeout *config.Dynamic[time.Duration] + BaseFeeThreshold *config.Dynamic[types.FIL] } type submitConfig struct { @@ -76,8 +76,8 @@ type submitConfig struct { feeCfg *config.CurioFees RequireActivationSuccess bool RequireNotificationSuccess bool - CollateralFromMinerBalance bool - DisableCollateralFallback bool + CollateralFromMinerBalance *config.Dynamic[bool] + DisableCollateralFallback *config.Dynamic[bool] } type SubmitTask struct { @@ -109,7 +109,7 @@ func NewSubmitTask(db *harmonydb.DB, api SubmitTaskNodeAPI, bstore curiochain.Cu MaxUpdateBatch: 16, Slack: cfg.Batching.Update.Slack, Timeout: cfg.Batching.Update.Timeout, - BaseFeeThreshold: abi.TokenAmount(cfg.Batching.Update.BaseFeeThreshold), + BaseFeeThreshold: cfg.Batching.Update.BaseFeeThreshold, }, feeCfg: &cfg.Fees, RequireActivationSuccess: cfg.Subsystems.RequireActivationSuccess, @@ -367,8 +367,8 @@ func (s *SubmitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done return false, xerrors.Errorf("could not serialize commit params: %w", err) } - if s.cfg.CollateralFromMinerBalance { - if s.cfg.DisableCollateralFallback { + if s.cfg.CollateralFromMinerBalance.Get() { + if s.cfg.DisableCollateralFallback.Get() { collateral = big.Zero() } balance, err := s.api.StateMinerAvailableBalance(ctx, maddr, types.EmptyTSK) @@ -641,20 +641,20 @@ func (s *SubmitTask) schedule(ctx context.Context, addTaskFunc harmonytask.AddTa scheduleNow := false // Slack - if timeUntil < s.cfg.batch.Slack { + if timeUntil < s.cfg.batch.Slack.Get() { scheduleNow = true } // Base fee check if !scheduleNow { - if ts.MinTicketBlock().ParentBaseFee.LessThan(s.cfg.batch.BaseFeeThreshold) { + if ts.MinTicketBlock().ParentBaseFee.LessThan(abi.TokenAmount(s.cfg.batch.BaseFeeThreshold.Get())) { scheduleNow = true } } // Timeout since earliestTime if !scheduleNow && !earliestTime.IsZero() { - if time.Since(earliestTime) > s.cfg.batch.Timeout { + if time.Since(earliestTime) > s.cfg.batch.Timeout.Get() { scheduleNow = true } } diff --git a/tasks/storage-market/mk20.go b/tasks/storage-market/mk20.go index a1440f07e..2ed3f7377 100644 --- a/tasks/storage-market/mk20.go +++ b/tasks/storage-market/mk20.go @@ -631,7 +631,7 @@ func (d *CurioStorageDealMarket) processMk20Pieces(ctx context.Context, piece MK // downloadMk20Deal handles the downloading process of an MK20 pipeline piece by scheduling it in the database and updating its status. // If the pieces are part of an aggregation deal then we download for short term otherwise, // we download for long term to avoid the need to have unsealed copy -func (d *CurioStorageDealMarket) downloadMk20Deal(ctx context.Context, piece MK20PipelinePiece) error { +func (d *CurioStorageDealMarket) downloadMk20Deal(ctx context.Context, _ MK20PipelinePiece) error { n, err := d.db.Exec(ctx, `SELECT mk20_ddo_mark_downloaded($1)`, mk20.ProductNameDDOV1) if err != nil { log.Errorf("failed to mark PDP downloaded piece: %v", err) @@ -717,7 +717,7 @@ func (d *CurioStorageDealMarket) findOfflineURLMk20Deal(ctx context.Context, pie } // Check if We can find the URL for this piece on remote servers - for rUrl, headers := range d.urls { + for rUrl, headers := range d.urls.Get() { // Create a new HTTP request urlString := fmt.Sprintf("%s?id=%s", rUrl, piece.PieceCIDV2) req, err := http.NewRequest(http.MethodHead, urlString, nil) diff --git a/tasks/storage-market/storage_market.go b/tasks/storage-market/storage_market.go index ca14de12f..a5fc338b4 100644 --- a/tasks/storage-market/storage_market.go +++ b/tasks/storage-market/storage_market.go @@ -14,7 +14,6 @@ import ( "strings" "time" - "github.com/ethereum/go-ethereum/ethclient" "github.com/google/go-cmp/cmp" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" @@ -26,6 +25,7 @@ import ( "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/curio/api" "github.com/filecoin-project/curio/build" "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" @@ -41,6 +41,7 @@ import ( lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/proofs" + "github.com/filecoin-project/lotus/lib/lazy" "github.com/filecoin-project/lotus/storage/pipeline/piece" ) @@ -70,9 +71,9 @@ type CurioStorageDealMarket struct { api storageMarketAPI MK12Handler *mk12.MK12 MK20Handler *mk20.MK20 - ethClient *ethclient.Client + ethClient *lazy.Lazy[api.EthClientInterface] si paths.SectorIndex - urls map[string]http.Header + urls *config.Dynamic[map[string]http.Header] adders [numPollers]promise.Promise[harmonytask.AddTaskFunc] as *multictladdr.MultiAddressSelector sc *ffi.SealCalls @@ -112,12 +113,19 @@ type MK12Pipeline struct { Offset sql.NullInt64 `db:"sector_offset"` } -func NewCurioStorageDealMarket(miners *config.Dynamic[[]address.Address], db *harmonydb.DB, cfg *config.CurioConfig, ethClient *ethclient.Client, si paths.SectorIndex, mapi storageMarketAPI, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) *CurioStorageDealMarket { +func NewCurioStorageDealMarket(miners *config.Dynamic[[]address.Address], db *harmonydb.DB, cfg *config.CurioConfig, ethClient *lazy.Lazy[api.EthClientInterface], si paths.SectorIndex, mapi storageMarketAPI, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) *CurioStorageDealMarket { - urls := make(map[string]http.Header) - for _, curl := range cfg.Market.StorageMarketConfig.PieceLocator { - urls[curl.URL] = curl.Headers + urlsDynamic := config.NewDynamic(make(map[string]http.Header)) + + makeUrls := func() { + urls := make(map[string]http.Header) + for _, curl := range cfg.Market.StorageMarketConfig.PieceLocator.Get() { + urls[curl.URL] = curl.Headers + } + urlsDynamic.Set(urls) } + makeUrls() + cfg.Market.StorageMarketConfig.PieceLocator.OnChange(makeUrls) return &CurioStorageDealMarket{ cfg: cfg, @@ -125,7 +133,7 @@ func NewCurioStorageDealMarket(miners *config.Dynamic[[]address.Address], db *ha api: mapi, miners: miners, si: si, - urls: urls, + urls: urlsDynamic, as: as, ethClient: ethClient, sc: sc, @@ -550,7 +558,7 @@ func (d *CurioStorageDealMarket) findURLForOfflineDeals(ctx context.Context, dea } // Check if We can find the URL for this piece on remote servers - for rUrl, headers := range d.urls { + for rUrl, headers := range d.urls.Get() { // Create a new HTTP request urlString := fmt.Sprintf("%s?id=%s", rUrl, pcid) req, err := http.NewRequest(http.MethodHead, urlString, nil) diff --git a/web/api/webrpc/ipfs_content.go b/web/api/webrpc/ipfs_content.go index dbbc4b999..8677c606c 100644 --- a/web/api/webrpc/ipfs_content.go +++ b/web/api/webrpc/ipfs_content.go @@ -61,7 +61,7 @@ func (a *WebRPC) FindContentByCID(ctx context.Context, cs string) ([]ContentInfo PieceCID: pcid2.String(), Offset: off, Size: offset.BlockSize, - Err: err.Error(), + Err: "", }) continue } diff --git a/web/api/webrpc/ipni.go b/web/api/webrpc/ipni.go index 3bf08321f..5bbd86401 100644 --- a/web/api/webrpc/ipni.go +++ b/web/api/webrpc/ipni.go @@ -267,7 +267,7 @@ func (a *WebRPC) IPNISummary(ctx context.Context) ([]*IPNI, error) { var services []string - err = forEachConfig[minimalIpniInfo](a, func(name string, info minimalIpniInfo) error { + err = forEachConfig(a, func(name string, info minimalIpniInfo) error { services = append(services, info.Market.StorageMarketConfig.IPNI.ServiceURL...) return nil }) diff --git a/web/api/webrpc/sync_state.go b/web/api/webrpc/sync_state.go index 70d2486f0..1c5ecb56c 100644 --- a/web/api/webrpc/sync_state.go +++ b/web/api/webrpc/sync_state.go @@ -129,7 +129,7 @@ func (a *WebRPC) SyncerState(ctx context.Context) ([]RpcInfo, error) { var rpcInfos []string confNameToAddr := make(map[string][]string) // config name -> api addresses - err := forEachConfig[minimalApiInfo](a, func(name string, info minimalApiInfo) error { + err := forEachConfig(a, func(name string, info minimalApiInfo) error { if len(info.Apis.ChainApiInfo) == 0 { return nil }