diff --git a/build/devenv/cmd/ccv/ccv.go b/build/devenv/cmd/ccv/ccv.go index e602279e..bafde312 100644 --- a/build/devenv/cmd/ccv/ccv.go +++ b/build/devenv/cmd/ccv/ccv.go @@ -20,7 +20,6 @@ import ( chainsel "github.com/smartcontractkit/chain-selectors" "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/committee_verifier" "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/mock_receiver" - "github.com/smartcontractkit/chainlink-ccv/devenv/gencfg" "github.com/smartcontractkit/chainlink-ccv/devenv/services" "github.com/smartcontractkit/chainlink-ccv/protocol" hmacutil "github.com/smartcontractkit/chainlink-ccv/protocol/common/hmac" @@ -469,35 +468,6 @@ var printAddressesCmd = &cobra.Command{ }, } -var generateConfigsCmd = &cobra.Command{ - Use: "generate-configs", - Short: "Generate the verifier and executor jobspecs (CL deployment only), and the aggregator and indexer TOML configuration files for the environment. Requires gh tool to authenticate to CLD repo.", - RunE: func(cmd *cobra.Command, args []string) error { - cldDomain, err := cmd.Flags().GetString("cld-domain") - if err != nil { - return err - } - verifierPubKeys, err := cmd.Flags().GetStringSlice("verifier-pubkeys") - if err != nil { - return err - } - numExecutors, err := cmd.Flags().GetInt("num-executors") - if err != nil { - return err - } - createPR, err := cmd.Flags().GetBool("create-pr") - if err != nil { - return err - } - - _, err = gencfg.GenerateConfigs(cldDomain, verifierPubKeys, numExecutors, createPR) - if err != nil { - return fmt.Errorf("failed to generate configs: %w", err) - } - return nil - }, -} - var generateHMACSecretCmd = &cobra.Command{ Use: "generate-hmac-secret", Short: "Generate cryptographically secure HMAC credentials (API key and secret) for aggregator authentication", @@ -723,17 +693,6 @@ func init() { rootCmd.AddCommand(deployCommitVerifierCmd) rootCmd.AddCommand(deployReceiverCmd) - // config generation - rootCmd.AddCommand(generateConfigsCmd) - generateConfigsCmd.Flags().String("cld-domain", "", "CLD Domain to target for config generation. Current options: staging_testnet") - generateConfigsCmd.Flags().StringSlice("verifier-pubkeys", []string{}, "List of verifier public keys (hex encoded) to include in the generated configs") - generateConfigsCmd.Flags().Int("num-executors", 1, "Number of executor jobspecs to generate") - generateConfigsCmd.Flags().Bool("create-pr", false, "Create a pull request with the generated configs") - - _ = generateConfigsCmd.MarkFlagRequired("cld-domain") - _ = generateConfigsCmd.MarkFlagRequired("verifier-pubkeys") - _ = generateConfigsCmd.MarkFlagRequired("num-executors") - // HMAC secret generation rootCmd.AddCommand(generateHMACSecretCmd) generateHMACSecretCmd.Flags().Int("count", 1, "Number of HMAC credential pairs to generate") diff --git a/build/devenv/environment.go b/build/devenv/environment.go index 6a15475e..a62cd1b9 100644 --- a/build/devenv/environment.go +++ b/build/devenv/environment.go @@ -5,23 +5,29 @@ import ( "encoding/json" "errors" "fmt" + "io" "math/big" "net/http" "os" "path/filepath" "strconv" "strings" + "time" "github.com/BurntSushi/toml" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/rs/zerolog" + chainsel "github.com/smartcontractkit/chain-selectors" + "github.com/smartcontractkit/chainlink-ccv/deployments" "github.com/smartcontractkit/chainlink-ccv/deployments/changesets" "github.com/smartcontractkit/chainlink-ccv/devenv/cciptestinterfaces" + "github.com/smartcontractkit/chainlink-ccv/devenv/evm" "github.com/smartcontractkit/chainlink-ccv/devenv/internal/util" "github.com/smartcontractkit/chainlink-ccv/devenv/services" + "github.com/smartcontractkit/chainlink-ccv/executor" "github.com/smartcontractkit/chainlink-ccv/indexer/pkg/config" "github.com/smartcontractkit/chainlink-ccv/protocol" "github.com/smartcontractkit/chainlink-ccv/verifier/commit" @@ -32,9 +38,6 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/framework/components/blockchain" "github.com/smartcontractkit/chainlink-testing-framework/framework/components/clnode" "github.com/smartcontractkit/chainlink-testing-framework/framework/components/jd" - - chainsel "github.com/smartcontractkit/chain-selectors" - "github.com/smartcontractkit/chainlink-ccv/devenv/evm" ns "github.com/smartcontractkit/chainlink-testing-framework/framework/components/simple_node_set" ) @@ -98,7 +101,10 @@ type Cfg struct { AggregatorEndpoints map[string]string `toml:"aggregator_endpoints"` // AggregatorCACertFiles map the verifier qualifier to the CA cert file path for TLS verification. AggregatorCACertFiles map[string]string `toml:"aggregator_ca_cert_files"` - IndexerEndpoint string `toml:"indexer_endpoint"` + // IndexerEndpoint is the external URL (localhost:port) for host access. + IndexerEndpoint string `toml:"indexer_endpoint"` + // IndexerInternalEndpoint is the internal Docker network URL for container-to-container access. + IndexerInternalEndpoint string `toml:"indexer_internal_endpoint"` } // NewAggregatorClientForCommittee creates an AggregatorClient for the specified committee. @@ -138,6 +144,329 @@ func NewProductConfigurationFromNetwork(typ string) (cciptestinterfaces.CCIP17Co } } +// buildEnvConfig constructs an EnvConfig from devenv inputs. +// This creates the NOP topology, committee configurations, and executor pools +// based on the verifier and executor inputs. +func buildEnvConfig( + verifiers []*services.VerifierInput, + executors []*services.ExecutorInput, + aggregators []*services.AggregatorInput, + indexerEndpoint string, + selectors []uint64, +) deployments.EnvConfig { + nops := make(map[string]deployments.NOPConfig) + committees := make(map[string]deployments.CommitteeConfig) + executorPools := make(map[string]deployments.ExecutorPoolConfig) + + // Build NOPs from verifiers (each verifier container is a NOP) + for _, ver := range verifiers { + nops[ver.ContainerName] = deployments.NOPConfig{ + Alias: ver.ContainerName, + Name: ver.ContainerName, + SignerAddress: ver.SigningKeyPublic, + } + } + + // Build set of committee names that have verifiers + committeeNames := make(map[string]struct{}) + for _, ver := range verifiers { + committeeNames[ver.CommitteeName] = struct{}{} + } + + // Build committees only for those that have verifiers + for _, agg := range aggregators { + // Skip committees that don't have verifiers in the input + if _, ok := committeeNames[agg.CommitteeName]; !ok { + continue + } + + // Find all verifiers for this committee + var memberAliases []string + for _, ver := range verifiers { + if ver.CommitteeName == agg.CommitteeName { + memberAliases = append(memberAliases, ver.ContainerName) + } + } + + // Build chain configs (same members for all chains in devenv) + chainConfigs := make(map[string]deployments.ChainCommitteeConfig) + for _, sel := range selectors { + chainConfigs[strconv.FormatUint(sel, 10)] = deployments.ChainCommitteeConfig{ + NOPAliases: memberAliases, + Threshold: uint8(len(memberAliases)), + } + } + + // Determine aggregator address + aggAddress := "" + insecure := false + if agg.Out != nil { + // Use the nginx TLS proxy address by default + aggAddress = agg.Out.Address + } else { + aggAddress = fmt.Sprintf("%s-aggregator-nginx:443", agg.CommitteeName) + } + + // Check if any verifier wants insecure connection + for _, ver := range verifiers { + if ver.CommitteeName == agg.CommitteeName && ver.InsecureAggregatorConnection { + insecure = true + if agg.Out != nil { + aggAddress = agg.Out.ExternalHTTPUrl + } else { + aggAddress = fmt.Sprintf("%s-aggregator:50051", agg.CommitteeName) + } + break + } + } + + committees[agg.CommitteeName] = deployments.CommitteeConfig{ + Qualifier: agg.CommitteeName, + VerifierVersion: "1.7.0", + ChainConfigs: chainConfigs, + Aggregators: []deployments.AggregatorConfig{ + { + Name: "default", + Address: aggAddress, + InsecureAggregatorConnection: insecure, + }, + }, + } + } + + // Build executor pools from executors, grouped by qualifier + if len(executors) > 0 { + poolsByQualifier := make(map[string][]string) + for _, exec := range executors { + // Add executor as NOP + nops[exec.ContainerName] = deployments.NOPConfig{ + Alias: exec.ContainerName, + Name: exec.ContainerName, + } + + // Group by qualifier + qualifier := exec.ExecutorQualifier + if qualifier == "" { + qualifier = evm.DefaultExecutorQualifier + } + poolsByQualifier[qualifier] = append(poolsByQualifier[qualifier], exec.ContainerName) + } + + // Create executor pool for each qualifier + for qualifier, poolAliases := range poolsByQualifier { + executorPools[qualifier] = deployments.ExecutorPoolConfig{ + NOPAliases: poolAliases, + ExecutionInterval: 15 * time.Second, + } + } + } + + return deployments.EnvConfig{ + IndexerAddress: indexerEndpoint, + PyroscopeURL: "http://host.docker.internal:4040", + Monitoring: deployments.MonitoringConfig{ + Enabled: true, + Type: "beholder", + Beholder: deployments.BeholderConfig{ + InsecureConnection: true, + OtelExporterHTTPEndpoint: "host.docker.internal:4318", + MetricReaderInterval: 5, + TraceSampleRatio: 1.0, + TraceBatchTimeout: 10, + }, + }, + NOPTopology: deployments.NOPTopology{ + NOPs: nops, + Committees: committees, + }, + ExecutorPools: executorPools, + } +} + +// buildAndWriteEnvConfig builds the shared EnvConfig from all devenv inputs and writes it to a file. +// This is used by both executor and verifier changesets as the single source of truth. +// Uses the internal Docker network endpoint for container-to-container communication. +func buildAndWriteEnvConfig(in *Cfg, selectors []uint64) (string, error) { + envCfg := buildEnvConfig(in.Verifier, in.Executor, in.Aggregator, in.IndexerInternalEndpoint, selectors) + + configDir := filepath.Join(util.CCVConfigDir(), "env-config") + if err := os.MkdirAll(configDir, 0o755); err != nil { + return "", fmt.Errorf("failed to create env config directory: %w", err) + } + + configPath := filepath.Join(configDir, "env.toml") + if err := deployments.WriteEnvConfig(configPath, envCfg); err != nil { + return "", fmt.Errorf("failed to write env config: %w", err) + } + + Plog.Info().Str("path", configPath).Msg("Wrote shared EnvConfig") + return configPath, nil +} + +// generateExecutorJobSpecs generates job specs for all executors using the EnvConfig-based changeset. +// It returns a map of container name -> job spec for use in CL mode. +// For standalone mode, it also sets GeneratedConfig on each executor. +func generateExecutorJobSpecs( + ctx context.Context, + e *deployment.Environment, + in *Cfg, + selectors []uint64, + impls []cciptestinterfaces.CCIP17Configuration, + envConfigPath string, +) (map[string]string, error) { + executorJobSpecs := make(map[string]string) + + if len(in.Executor) == 0 { + return executorJobSpecs, nil + } + + // Group executors by qualifier + executorsByQualifier := make(map[string][]*services.ExecutorInput) + for _, exec := range in.Executor { + qualifier := exec.ExecutorQualifier + if qualifier == "" { + qualifier = evm.DefaultExecutorQualifier + } + executorsByQualifier[qualifier] = append(executorsByQualifier[qualifier], exec) + } + + // Generate configs for each qualifier group + for qualifier, qualifierExecutors := range executorsByQualifier { + execNOPAliases := make([]string, 0, len(qualifierExecutors)) + for _, exec := range qualifierExecutors { + execNOPAliases = append(execNOPAliases, exec.ContainerName) + } + + cs := changesets.GenerateExecutorConfig() + output, err := cs.Apply(*e, changesets.GenerateExecutorConfigCfg{ + EnvConfigPath: envConfigPath, + ExecutorQualifier: qualifier, + ChainSelectors: selectors, + NOPAliases: execNOPAliases, + }) + if err != nil { + return nil, fmt.Errorf("failed to generate executor configs for qualifier %s: %w", qualifier, err) + } + + for _, exec := range qualifierExecutors { + jobSpecID := fmt.Sprintf("%s-%s-executor", exec.ContainerName, qualifier) + jobSpec, err := deployments.GetNOPJobSpec(output.DataStore.Seal(), exec.ContainerName, jobSpecID) + if err != nil { + return nil, fmt.Errorf("failed to get executor job spec for %s: %w", exec.ContainerName, err) + } + executorJobSpecs[exec.ContainerName] = jobSpec + + // Extract inner config from job spec for standalone mode + execCfg, err := ParseExecutorConfigFromJobSpec(jobSpec) + if err != nil { + return nil, fmt.Errorf("failed to parse executor config from job spec: %w", err) + } + + // Marshal the inner config back to TOML for standalone mode + configBytes, err := toml.Marshal(execCfg) + if err != nil { + return nil, fmt.Errorf("failed to marshal executor config: %w", err) + } + exec.GeneratedConfig = string(configBytes) + } + } + + // Set transmitter keys for standalone mode + _, err := services.SetTransmitterPrivateKey(in.Executor) + if err != nil { + return nil, fmt.Errorf("failed to set transmitter private key: %w", err) + } + + // Fund executor addresses for standalone mode + addresses := make([]protocol.UnknownAddress, 0, len(in.Executor)) + for _, exec := range in.Executor { + addresses = append(addresses, exec.GetTransmitterAddress()) + } + Plog.Info().Any("Addresses", addresses).Int("ImplsLen", len(impls)).Msg("Funding executors") + for i, impl := range impls { + Plog.Info().Int("ImplIndex", i).Msg("Funding executor") + err = impl.FundAddresses(ctx, in.Blockchains[i], addresses, big.NewInt(5)) + if err != nil { + return nil, fmt.Errorf("failed to fund addresses for executors: %w", err) + } + Plog.Info().Int("ImplIndex", i).Msg("Funded executors") + } + + return executorJobSpecs, nil +} + +// generateVerifierJobSpecs generates job specs for all verifiers using the EnvConfig-based changeset. +// It returns a map of container name -> job spec for use in CL mode. +// For standalone mode, it also sets GeneratedConfig on each verifier. +func generateVerifierJobSpecs( + e *deployment.Environment, + in *Cfg, + selectors []uint64, + envConfigPath string, + sharedTLSCerts *services.TLSCertPaths, +) (map[string]string, error) { + verifierJobSpecs := make(map[string]string) + + if len(in.Verifier) == 0 { + return verifierJobSpecs, nil + } + + // Group verifiers by committee for batch generation + verifiersByCommittee := make(map[string][]*services.VerifierInput) + for _, ver := range in.Verifier { + verifiersByCommittee[ver.CommitteeName] = append(verifiersByCommittee[ver.CommitteeName], ver) + } + + // Generate verifier configs per committee + for committeeName, committeeVerifiers := range verifiersByCommittee { + verNOPAliases := make([]string, 0, len(committeeVerifiers)) + for _, ver := range committeeVerifiers { + verNOPAliases = append(verNOPAliases, ver.ContainerName) + } + + cs := changesets.GenerateVerifierConfig() + output, err := cs.Apply(*e, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: committeeName, + ExecutorQualifier: evm.DefaultExecutorQualifier, + ChainSelectors: selectors, + NOPAliases: verNOPAliases, + }) + if err != nil { + return nil, fmt.Errorf("failed to generate verifier configs for committee %s: %w", committeeName, err) + } + + for _, ver := range committeeVerifiers { + // Aggregator name is "default" as configured in the env config + jobSpecID := fmt.Sprintf("default-%s-verifier", committeeName) + jobSpec, err := deployments.GetNOPJobSpec(output.DataStore.Seal(), ver.ContainerName, jobSpecID) + if err != nil { + return nil, fmt.Errorf("failed to get verifier job spec for %s: %w", ver.ContainerName, err) + } + verifierJobSpecs[ver.ContainerName] = jobSpec + + // Extract inner config from job spec for standalone mode + verCfg, err := ParseVerifierConfigFromJobSpec(jobSpec) + if err != nil { + return nil, fmt.Errorf("failed to parse verifier config from job spec: %w", err) + } + + // Marshal the inner config back to TOML for standalone mode + configBytes, err := toml.Marshal(verCfg) + if err != nil { + return nil, fmt.Errorf("failed to marshal verifier config: %w", err) + } + ver.GeneratedConfig = string(configBytes) + + if sharedTLSCerts != nil && !ver.InsecureAggregatorConnection { + ver.TLSCACertFile = sharedTLSCerts.CACertFile + } + } + } + + return verifierJobSpecs, nil +} + // NewEnvironment creates a new CCIP CCV environment locally in Docker. func NewEnvironment() (in *Cfg, err error) { ctx := context.Background() @@ -217,9 +546,17 @@ func NewEnvironment() (in *Cfg, err error) { // CL nodes, so they can receive the credentials via secrets. /////////////////////////////////////////// for _, agg := range in.Aggregator { - if _, err := agg.EnsureClientCredentials(); err != nil { + creds, err := agg.EnsureClientCredentials() + if err != nil { return nil, fmt.Errorf("failed to ensure client credentials for aggregator %s: %w", agg.CommitteeName, err) } + for clientID, c := range creds { + Plog.Debug(). + Str("aggregator", agg.CommitteeName). + Str("clientID", clientID). + Str("apiKey", c.APIKey[:8]+"..."). + Msg("Generated aggregator credentials") + } } ///////////////////////////////////////// // END: Generate Aggregator Credentials // @@ -477,13 +814,29 @@ func NewEnvironment() (in *Cfg, err error) { /////////////////////////// // Generate indexer config using changeset (on-chain state as source of truth) if len(in.Aggregator) > 0 && in.Indexer != nil { + // Build indexer verifier configs FIRST from aggregator topology + // Each aggregator becomes a separate verifier entry for HA + verifierConfigs := make([]config.VerifierConfig, 0, len(in.Aggregator)) verifierNameToQualifier := make(map[string]string, len(in.Aggregator)) - for idx, agg := range in.Aggregator { - if idx < len(in.Indexer.IndexerConfig.Verifiers) { - verifierName := in.Indexer.IndexerConfig.Verifiers[idx].Name - verifierNameToQualifier[verifierName] = agg.CommitteeName + + for _, agg := range in.Aggregator { + if agg.Out == nil { + continue } + verifierName := fmt.Sprintf("CommitteeVerifier (%s)", agg.CommitteeName) + verifierConfigs = append(verifierConfigs, config.VerifierConfig{ + Type: config.ReaderTypeAggregator, + AggregatorReaderConfig: config.AggregatorReaderConfig{ + Address: agg.Out.Address, + Since: 0, + }, + Name: verifierName, + BatchSize: 100, + MaxBatchWaitTime: 50, + }) + verifierNameToQualifier[verifierName] = agg.CommitteeName } + in.Indexer.IndexerConfig.Verifiers = verifierConfigs cs := changesets.GenerateIndexerConfig() output, err := cs.Apply(*e, changesets.GenerateIndexerConfigCfg{ @@ -500,13 +853,6 @@ func NewEnvironment() (in *Cfg, err error) { return nil, fmt.Errorf("failed to get indexer config from output: %w", err) } in.Indexer.GeneratedCfg = idxCfg - - // Update verifier addresses to use nginx TLS proxy (Later will be pulled from env metadata and added to job spec) - for idx, agg := range in.Aggregator { - if agg.Out != nil && idx < len(in.Indexer.IndexerConfig.Verifiers) { - in.Indexer.IndexerConfig.Verifiers[idx].Address = agg.Out.Address - } - } } // Set TLS CA cert for indexer (all aggregators share the same CA) @@ -552,46 +898,32 @@ func NewEnvironment() (in *Cfg, err error) { } in.IndexerEndpoint = indexerOut.ExternalHTTPURL + in.IndexerInternalEndpoint = indexerOut.InternalHTTPURL ///////////////////////// // END: Launch indexer // ///////////////////////// + ///////////////////////////////////////// + // START: Build shared EnvConfig // + // Used by both executor and verifier // + // changesets as single source of truth // + ///////////////////////////////////////// + + envConfigPath, err := buildAndWriteEnvConfig(in, selectors) + if err != nil { + return nil, err + } + ///////////////////////////// // START: Launch executors // ///////////////////////////// - if len(in.Executor) > 0 { - execs, err := services.ResolveContractsForExecutor(e.DataStore, in.Blockchains, in.Executor) - if err != nil { - return nil, fmt.Errorf("failed to lookup contracts for executor: %w", err) - } - execs, err = services.SetExecutorPoolAndID(execs) - if err != nil { - return nil, fmt.Errorf("failed to set executor pool and ID: %w", err) - } - execs, err = services.SetTransmitterPrivateKey(execs) - if err != nil { - return nil, fmt.Errorf("failed to set transmitter private key: %w", err) - } - - // fund the keys used by the executors to send transactions in standalone mode. - addresses := make([]protocol.UnknownAddress, 0, len(execs)) - for _, exec := range execs { - addresses = append(addresses, exec.GetTransmitterAddress()) - } - Plog.Info().Any("Addresses", addresses).Int("ImplsLen", len(impls)).Msg("Funding executors") - for i, impl := range impls { - Plog.Info().Int("ImplIndex", i).Msg("Funding executor") - err = impl.FundAddresses(ctx, in.Blockchains[i], addresses, big.NewInt(5)) - if err != nil { - return nil, fmt.Errorf("failed to fund addresses for executors: %w", err) - } - Plog.Info().Int("ImplIndex", i).Msg("Funded executors") - } - - in.Executor = execs + executorJobSpecs, err := generateExecutorJobSpecs(ctx, e, in, selectors, impls, envConfigPath) + if err != nil { + return nil, err } + _, err = launchStandaloneExecutors(in.Executor) if err != nil { return nil, fmt.Errorf("failed to create standalone executor: %w", err) @@ -604,40 +936,10 @@ func NewEnvironment() (in *Cfg, err error) { ///////////////////////////// // START: Launch verifiers // ///////////////////////////// - // Populate verifier input with contract addresses from the CLDF datastore. - for i := range in.Verifier { - ver, err := services.ResolveContractsForVerifier(e.DataStore, in.Blockchains, *in.Verifier[i]) - if err != nil { - return nil, fmt.Errorf("failed to lookup contracts for verifier %s: %w", in.Verifier[i].CommitteeName, err) - } - // Find aggregator output for this verifier's committee - for _, agg := range in.Aggregator { - if agg.CommitteeName == ver.CommitteeName && agg.Out != nil { - if ver.InsecureAggregatorConnection { - // CL node tests can't inject certs, use direct insecure connection - ver.AggregatorAddress = agg.Out.ExternalHTTPUrl - } else { - ver.AggregatorAddress = agg.Out.Address - } - break - } - } - if ver.AggregatorAddress == "" { - if ver.InsecureAggregatorConnection { - ver.AggregatorAddress = fmt.Sprintf("%s-aggregator:50051", ver.CommitteeName) - } else { - ver.AggregatorAddress = fmt.Sprintf("%s-aggregator-nginx:443", ver.CommitteeName) - } - } - - // Use shared TLS CA cert for all verifiers (not needed for insecure connections) - if sharedTLSCerts != nil && !ver.InsecureAggregatorConnection { - ver.TLSCACertFile = sharedTLSCerts.CACertFile - } - - // Apply changes back to input. - in.Verifier[i] = &ver + verifierJobSpecs, err := generateVerifierJobSpecs(e, in, selectors, envConfigPath, sharedTLSCerts) + if err != nil { + return nil, err } _, err = launchStandaloneVerifiers(in) @@ -677,7 +979,7 @@ func NewEnvironment() (in *Cfg, err error) { // there would be no CL nodes and this would be a no-op. //////////////////////////////////////////////////// - err = createJobs(in, in.Verifier, in.Executor) + err = createJobs(in, in.Verifier, in.Executor, verifierJobSpecs, executorJobSpecs) if err != nil { return nil, fmt.Errorf("failed to create jobs: %w", err) } @@ -695,7 +997,14 @@ func NewEnvironment() (in *Cfg, err error) { } // createJobs creates the jobs for the verifiers and executors on the CL nodes if they're in CL mode. -func createJobs(in *Cfg, vIn []*services.VerifierInput, executorIn []*services.ExecutorInput) error { +// Uses pre-generated job specs from changesets instead of generating them on the fly. +func createJobs( + in *Cfg, + vIn []*services.VerifierInput, + executorIn []*services.ExecutorInput, + verifierJobSpecs map[string]string, + executorJobSpecs map[string]string, +) error { // Exit early, there are no nodes configured. if len(in.NodeSets) == 0 { return nil @@ -715,16 +1024,32 @@ func createJobs(in *Cfg, vIn []*services.VerifierInput, executorIn []*services.E case services.CL: index, clClient := roundRobin.GetNext() - jobSpec, err := ver.GenerateJobSpec() - if err != nil { - return fmt.Errorf("failed to generate verifier config: %w", err) + jobSpec, ok := verifierJobSpecs[ver.ContainerName] + if !ok { + return fmt.Errorf("job spec not found for verifier %s", ver.ContainerName) } + Plog.Debug(). + Str("ContainerName", ver.ContainerName). + Str("JobSpec", jobSpec). + Msg("Submitting verifier job spec to CL node") jb, resp, err := clClient.CreateJobRaw(jobSpec) if err != nil { + Plog.Error(). + Err(err). + Str("ContainerName", ver.ContainerName). + Str("JobSpec", jobSpec). + Msg("Failed to create committee verifier job") return fmt.Errorf("failed to create committee verifier job: %w", err) } if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { - return fmt.Errorf("failed to create committee verifier job: %s", resp.Status) + bodyBytes, _ := io.ReadAll(resp.Body) + Plog.Error(). + Str("Status", resp.Status). + Str("ContainerName", ver.ContainerName). + Str("JobSpec", jobSpec). + Str("ResponseBody", string(bodyBytes)). + Msg("Failed to create committee verifier job - bad status") + return fmt.Errorf("failed to create committee verifier job: %s - %s", resp.Status, string(bodyBytes)) } Plog.Info(). Int("CurrentIndex", index). @@ -740,9 +1065,9 @@ func createJobs(in *Cfg, vIn []*services.VerifierInput, executorIn []*services.E case services.CL: index, clClient := roundRobin.GetNext() - jobSpec, err := exec.GenerateJobSpec() - if err != nil { - return fmt.Errorf("failed to generate executor config: %w", err) + jobSpec, ok := executorJobSpecs[exec.ContainerName] + if !ok { + return fmt.Errorf("job spec not found for executor %s", exec.ContainerName) } jb, resp, err := clClient.CreateJobRaw(jobSpec) @@ -843,6 +1168,12 @@ func launchCLNodes( return nil, fmt.Errorf("no API key pairs found for client %s", apiClient.ClientID) } apiKeyPair := apiClient.APIKeyPairs[0] + Plog.Debug(). + Int("nodeIndex", index). + Str("verifier", ver.ContainerName). + Str("committee", ver.CommitteeName). + Str("apiKey", apiKeyPair.APIKey[:8]+"..."). + Msg("Passing aggregator credentials to CL node") aggSecretsPerNode[index] = append(aggSecretsPerNode[index], AggregatorSecret{ VerifierID: ver.ContainerName, APIKey: apiKeyPair.APIKey, @@ -1049,3 +1380,47 @@ type IndexerSecret struct { APIKey string `toml:",omitempty"` APISecret string `toml:",omitempty"` } + +// VerifierJobSpec represents the structure of a verifier job spec TOML. +type VerifierJobSpec struct { + SchemaVersion int `toml:"schemaVersion"` + Type string `toml:"type"` + CommitteeVerifierConfig string `toml:"committeeVerifierConfig"` +} + +// ParseVerifierConfigFromJobSpec extracts the inner commit.Config from a verifier job spec. +func ParseVerifierConfigFromJobSpec(jobSpec string) (*commit.Config, error) { + var spec VerifierJobSpec + if err := toml.Unmarshal([]byte(jobSpec), &spec); err != nil { + return nil, fmt.Errorf("failed to parse job spec: %w", err) + } + + var cfg commit.Config + if err := toml.Unmarshal([]byte(spec.CommitteeVerifierConfig), &cfg); err != nil { + return nil, fmt.Errorf("failed to parse verifier config from job spec: %w", err) + } + + return &cfg, nil +} + +// ExecutorJobSpec represents the structure of an executor job spec TOML. +type ExecutorJobSpec struct { + SchemaVersion int `toml:"schemaVersion"` + Type string `toml:"type"` + ExecutorConfig string `toml:"executorConfig"` +} + +// ParseExecutorConfigFromJobSpec extracts the inner executor.Configuration from an executor job spec. +func ParseExecutorConfigFromJobSpec(jobSpec string) (*executor.Configuration, error) { + var spec ExecutorJobSpec + if err := toml.Unmarshal([]byte(jobSpec), &spec); err != nil { + return nil, fmt.Errorf("failed to parse job spec: %w", err) + } + + var cfg executor.Configuration + if err := toml.Unmarshal([]byte(spec.ExecutorConfig), &cfg); err != nil { + return nil, fmt.Errorf("failed to parse executor config from job spec: %w", err) + } + + return &cfg, nil +} diff --git a/build/devenv/gencfg/gencfg.go b/build/devenv/gencfg/gencfg.go deleted file mode 100644 index 93bccc3f..00000000 --- a/build/devenv/gencfg/gencfg.go +++ /dev/null @@ -1,337 +0,0 @@ -package gencfg - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strconv" - "time" - - "github.com/google/go-github/v68/github" - "golang.org/x/oauth2" - "gopkg.in/yaml.v2" - - "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/committee_verifier" - executor_operations "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/executor" - offrampoperations "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/offramp" - onrampoperations "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/onramp" - "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/rmn_remote" - "github.com/smartcontractkit/chainlink-ccv/aggregator/pkg/model" - ccv "github.com/smartcontractkit/chainlink-ccv/devenv" - "github.com/smartcontractkit/chainlink-ccv/devenv/services" - "github.com/smartcontractkit/chainlink-deployments-framework/datastore" -) - -func ocrThreshold(n int) uint8 { - f := (n - 1) / 3 - return uint8(f + 1) -} - -func GenerateConfigs(cldDomain string, verifierPubKeys []string, numExecutors int, createPR bool) (string, error) { - // Validate environment - ctx := context.Background() - - ccv.Plog.Info(). - Str("cldDomain", cldDomain). - Strs("verifierPubKeys", verifierPubKeys). - Int("numExecutors", numExecutors). - Bool("createPR", createPR). - Msg("Generating configs") - - token := os.Getenv("GITHUB_TOKEN") - if token == "" { - return "", fmt.Errorf("GITHUB_TOKEN environment variable is not set. Run `export GITHUB_TOKEN=$(gh auth token)`") - } - // Create GH client - ts := oauth2.StaticTokenSource( - &oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")}, - ) - tc := oauth2.NewClient(ctx, ts) - gh := github.NewClient(tc) - - // Fetch address refs file from github - addressRefsGh, _, _, err := gh.Repositories.GetContents(ctx, "smartcontractkit", "chainlink-deployments", fmt.Sprintf("domains/ccv/%s/datastore/address_refs.json", cldDomain), &github.RepositoryContentGetOptions{}) - if err != nil { - return "", fmt.Errorf("failed to get address refs JSON from GitHub: %w", err) - } - f, _ := base64.StdEncoding.DecodeString(*addressRefsGh.Content) - - var addressRefs []datastore.AddressRef - if err := json.Unmarshal(f, &addressRefs); err != nil { - return "", fmt.Errorf("failed to decode address refs JSON: %w", err) - } - - const ( - verifierIDPrefix = "default-verifier-" - executorIDPrefix = "default-executor-" - committeeName = "default" - monitoringOtelExporterHTTPEndpoint = "staging.telemetry.chain.link:443" - aggregatorAddress = "chainlink-ccv-aggregator.ccip.stage.external.griddle.sh/instance-1:50051" - indexerAddress = "http://chainlink-ccv-indexer.ccip.stage.external.griddle.sh/all" - ) - - var ( - onRampAddresses = make(map[string]string) - committeeVerifierAddresses = make(map[string]string) - committeeVerifierResolverAddresses = make(map[uint64]string) - defaultExecutorOnRampAddresses = make(map[string]string) - defaultExecutorOnRampAddressesUint64 = make(map[uint64]string) - rmnRemoteAddresses = make(map[string]string) - rmnRemoteAddressesUint64 = make(map[uint64]string) - offRampAddresses = make(map[uint64]string) - thresholdPerSource = make(map[uint64]uint8) - ) - - for _, ref := range addressRefs { - chainSelectorStr := strconv.FormatUint(ref.ChainSelector, 10) - switch ref.Type { - case datastore.ContractType(onrampoperations.ContractType): - onRampAddresses[chainSelectorStr] = ref.Address - case datastore.ContractType(committee_verifier.ResolverType): - committeeVerifierAddresses[chainSelectorStr] = ref.Address - committeeVerifierResolverAddresses[ref.ChainSelector] = ref.Address - case datastore.ContractType(executor_operations.ContractType): - defaultExecutorOnRampAddresses[chainSelectorStr] = ref.Address - defaultExecutorOnRampAddressesUint64[ref.ChainSelector] = ref.Address - case datastore.ContractType(rmn_remote.ContractType): - rmnRemoteAddresses[chainSelectorStr] = ref.Address - rmnRemoteAddressesUint64[ref.ChainSelector] = ref.Address - case datastore.ContractType(offrampoperations.ContractType): - offRampAddresses[ref.ChainSelector] = ref.Address - } - thresholdPerSource[ref.ChainSelector] = ocrThreshold(len(verifierPubKeys)) - } - - tempDir, err := os.MkdirTemp("", "ccv-configs") - if err != nil { - return "", fmt.Errorf("failed to create temporary directory: %w", err) - } - ccv.Plog.Info().Str("temp-dir", tempDir).Msg("Created temporary directory for configs") - - // Verifier inputs - verifierInputs := make([]*services.VerifierInput, 0, len(verifierPubKeys)) - for i, pubKey := range verifierPubKeys { - verifierInputs = append(verifierInputs, &services.VerifierInput{ - ContainerName: fmt.Sprintf("%s%d", verifierIDPrefix, i), - AggregatorAddress: aggregatorAddress, - SigningKeyPublic: pubKey, - CommitteeVerifierAddresses: committeeVerifierAddresses, - OnRampAddresses: onRampAddresses, - DefaultExecutorOnRampAddresses: defaultExecutorOnRampAddresses, - RMNRemoteAddresses: rmnRemoteAddresses, - CommitteeName: committeeName, - MonitoringOtelExporterHTTPEndpoint: monitoringOtelExporterHTTPEndpoint, - }) - } - - for _, verifierInput := range verifierInputs { - verifierJobSpec, err := verifierInput.GenerateJobSpec() - if err != nil { - return "", fmt.Errorf("failed to generate verifier job spec: %w", err) - } - ccv.Plog.Info().Msg("Generated verifier job spec, writing to temporary directory as a separate file") - filePath := filepath.Join(tempDir, fmt.Sprintf("verifier-%s-job-spec.toml", verifierInput.ContainerName)) - if err := os.WriteFile(filePath, []byte(verifierJobSpec), 0o644); err != nil { - return "", fmt.Errorf("failed to write verifier job spec to file: %w", err) - } - ccv.Plog.Info().Str("file-path", filePath).Msg("Wrote verifier job spec to file") - } - - // Executor inputs - executorInputs := make([]services.ExecutorInput, 0, numExecutors) - executorPool := make([]string, 0, numExecutors) - for i := range numExecutors { - executorPool = append(executorPool, fmt.Sprintf("%s%d", executorIDPrefix, i)) - } - for i := range numExecutors { - executorInputs = append(executorInputs, services.ExecutorInput{ - ExecutorID: fmt.Sprintf("%s%d", executorIDPrefix, i), - ExecutorPool: executorPool, - OfframpAddresses: offRampAddresses, - IndexerAddress: indexerAddress, - ExecutorAddresses: defaultExecutorOnRampAddressesUint64, - RmnAddresses: rmnRemoteAddressesUint64, - MonitoringOtelExporterHTTPEndpoint: monitoringOtelExporterHTTPEndpoint, - }) - } - for _, executorInput := range executorInputs { - executorJobSpec, err := executorInput.GenerateJobSpec() - if err != nil { - return "", fmt.Errorf("failed to generate executor job spec: %w", err) - } - ccv.Plog.Info().Msg("Generated executor job spec, writing to temporary directory as a separate file") - filePath := filepath.Join(tempDir, fmt.Sprintf("executor-%s-job-spec.toml", executorInput.ExecutorID)) - if err := os.WriteFile(filePath, []byte(executorJobSpec), 0o644); err != nil { - return "", fmt.Errorf("failed to write executor job spec to file: %w", err) - } - ccv.Plog.Info().Str("file-path", filePath).Msg("Wrote executor job spec to file") - } - - // Build committee config from address refs and verifier public keys - committeeConfig := buildCommitteeConfig( - verifierPubKeys, - committeeVerifierResolverAddresses, - thresholdPerSource, - ) - - // Aggregator config - generatedConfigFileName := "aggregator-generated.toml" - aggregatorInput := services.AggregatorInput{ - CommitteeName: committeeName, - MonitoringOtelExporterHTTPEndpoint: monitoringOtelExporterHTTPEndpoint, - GeneratedCommittee: committeeConfig, - } - configResult, err := aggregatorInput.GenerateConfigs(generatedConfigFileName) - if err != nil { - return "", fmt.Errorf("failed to generate aggregator configs: %w", err) - } - ccv.Plog.Info().Msg("Generated aggregator configs") - - // Write main config - filePath := filepath.Join(tempDir, "aggregator-config.toml") - if err := os.WriteFile(filePath, configResult.MainConfig, 0o644); err != nil { - return "", fmt.Errorf("failed to write aggregator config to file: %w", err) - } - ccv.Plog.Info().Str("file-path", filePath).Msg("Wrote aggregator config to file") - - // Write generated config (committee) - generatedFilePath := filepath.Join(tempDir, generatedConfigFileName) - if err := os.WriteFile(generatedFilePath, configResult.GeneratedConfig, 0o644); err != nil { - return "", fmt.Errorf("failed to write aggregator generated config to file: %w", err) - } - ccv.Plog.Info().Str("file-path", generatedFilePath).Msg("Wrote aggregator generated config to file") - - if createPR { - prURL, err := createConfigPR(gh, ctx, cldDomain, configResult.MainConfig, configResult.GeneratedConfig) - if err != nil { - return "", fmt.Errorf("failed to create config PR: %w", err) - } - ccv.Plog.Info().Str("pr-url", prURL).Msg("Created PR with aggregator config") - } - - return tempDir, nil -} - -func createConfigPR(gh *github.Client, ctx context.Context, cldDomain string, aggregatorConfig, generatedConfig []byte) (string, error) { - // Create a new branch, add the aggregator config file and open a PR - owner := "smartcontractkit" - repo := "infra-k8s" - - // Get repository to find default branch - repoInfo, _, err := gh.Repositories.Get(ctx, owner, repo) - if err != nil { - return "", fmt.Errorf("failed to fetch repository info: %w", err) - } - defaultBranch := repoInfo.GetDefaultBranch() - - // Create a unique branch name - branchName := fmt.Sprintf("ccv_config_%d", time.Now().Unix()) - - // Get the reference for the default branch - baseRef, _, err := gh.Git.GetRef(ctx, owner, repo, "refs/heads/"+defaultBranch) - if err != nil { - return "", fmt.Errorf("failed to get base ref for branch %s: %w", defaultBranch, err) - } - - // Create new branch ref pointing to the same commit as default - newRef := &github.Reference{ - Ref: github.Ptr("refs/heads/" + branchName), - Object: &github.GitObject{ - SHA: baseRef.Object.SHA, - }, - } - _, _, err = gh.Git.CreateRef(ctx, owner, repo, newRef) - if err != nil { - return "", fmt.Errorf("failed to create branch %s: %w", branchName, err) - } - - // Path where to add the aggregator config in the repo - aggPath := "projects/chainlink-ccv/files/aggregator/aggregator-config.yaml" - - // Create file on the new branch - commitMsg := "Update ccv configuration" - - // Marshal aggregator config into YAML under configMap with both main and generated configs - aggYaml := map[string]any{ - "main": map[string]any{ - "stage": map[string]any{ - "configMap": map[string]string{ - "aggregator.toml": string(aggregatorConfig), - "aggregator-generated.toml": string(generatedConfig), - }, - }, - }, - } - aggFileContent, err := yaml.Marshal(aggYaml) - if err != nil { - return "", fmt.Errorf("failed to marshal aggregator config to YAML: %w", err) - } - - aggFile, _, _, _ := gh.Repositories.GetContents(ctx, "smartcontractkit", "infra-k8s", "projects/chainlink-ccv/files/aggregator/aggregator-config.yaml", &github.RepositoryContentGetOptions{}) - aggSHA := aggFile.GetSHA() - - opts := &github.RepositoryContentFileOptions{ - Message: github.Ptr(commitMsg), - Content: aggFileContent, - Branch: github.Ptr(branchName), - SHA: github.Ptr(aggSHA), - } - _, _, err = gh.Repositories.CreateFile(ctx, owner, repo, aggPath, opts) - if err != nil { - return "", fmt.Errorf("failed to create file %s on branch %s: %w", aggPath, branchName, err) - } - - // Open a PR from the new branch into default - prTitle := "CCV config update" - prBody := fmt.Sprintf("CCV CLI auto-generated PR to update configuration from CLD for %s environment", cldDomain) - newPR := &github.NewPullRequest{ - Title: github.Ptr(prTitle), - Head: github.Ptr(branchName), - Base: github.Ptr(defaultBranch), - Body: github.Ptr(prBody), - } - pr, _, err := gh.PullRequests.Create(ctx, owner, repo, newPR) - if err != nil { - return "", fmt.Errorf("failed to create pull request: %w", err) - } - return pr.GetHTMLURL(), nil -} - -func buildCommitteeConfig( - verifierPubKeys []string, - resolverAddresses map[uint64]string, - thresholdPerSource map[uint64]uint8, -) *model.Committee { - signers := make([]model.Signer, 0, len(verifierPubKeys)) - for _, pubKey := range verifierPubKeys { - signers = append(signers, model.Signer{Address: pubKey}) - } - - quorumConfigs := make(map[model.SourceSelector]*model.QuorumConfig) - destVerifiers := make(map[model.DestinationSelector]string) - - for chainSelector, resolverAddr := range resolverAddresses { - chainSelStr := strconv.FormatUint(chainSelector, 10) - - destVerifiers[chainSelStr] = resolverAddr - - threshold := thresholdPerSource[chainSelector] - if threshold == 0 { - threshold = uint8(len(signers)) - } - - quorumConfigs[chainSelStr] = &model.QuorumConfig{ - SourceVerifierAddress: resolverAddr, - Signers: signers, - Threshold: threshold, - } - } - - return &model.Committee{ - QuorumConfigs: quorumConfigs, - DestinationVerifiers: destVerifiers, - } -} diff --git a/build/devenv/go.mod b/build/devenv/go.mod index da9c8915..1ceb15cc 100644 --- a/build/devenv/go.mod +++ b/build/devenv/go.mod @@ -189,8 +189,6 @@ require ( github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-github/v68 v68.0.0 - github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e // indirect github.com/google/s2a-go v0.1.9 // indirect @@ -425,7 +423,7 @@ require ( golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc // indirect golang.org/x/mod v0.30.0 // indirect golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.32.0 + golang.org/x/oauth2 v0.32.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.39.0 // indirect golang.org/x/term v0.38.0 // indirect @@ -440,7 +438,7 @@ require ( gopkg.in/guregu/null.v4 v4.0.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect - gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.32.2 // indirect k8s.io/apimachinery v0.32.2 // indirect diff --git a/build/devenv/go.sum b/build/devenv/go.sum index 801a8b7a..46fd06fa 100644 --- a/build/devenv/go.sum +++ b/build/devenv/go.sum @@ -487,15 +487,12 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-github/v68 v68.0.0 h1:ZW57zeNZiXTdQ16qrDiZ0k6XucrxZ2CGmoTvcCyQG6s= -github.com/google/go-github/v68 v68.0.0/go.mod h1:K9HAUBovM2sLwM408A18h+wd9vqdLOEqTUCbnRIcx68= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= diff --git a/build/devenv/services/committeeVerifier.go b/build/devenv/services/committeeVerifier.go index 3a6a25bd..ffbfba63 100644 --- a/build/devenv/services/committeeVerifier.go +++ b/build/devenv/services/committeeVerifier.go @@ -2,7 +2,6 @@ package services import ( "context" - _ "embed" "fmt" "os" "path/filepath" @@ -10,31 +9,20 @@ import ( "time" "github.com/BurntSushi/toml" - "github.com/Masterminds/semver/v3" "github.com/docker/docker/api/types/container" "github.com/docker/go-connections/nat" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/modules/postgres" "github.com/testcontainers/testcontainers-go/wait" - chainsel "github.com/smartcontractkit/chain-selectors" - "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/committee_verifier" - "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/executor" - onrampoperations "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/onramp" - "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/rmn_remote" aggregator "github.com/smartcontractkit/chainlink-ccv/aggregator/pkg" - "github.com/smartcontractkit/chainlink-ccv/devenv/evm" "github.com/smartcontractkit/chainlink-ccv/devenv/internal/util" "github.com/smartcontractkit/chainlink-ccv/protocol" "github.com/smartcontractkit/chainlink-ccv/verifier/commit" - "github.com/smartcontractkit/chainlink-deployments-framework/datastore" "github.com/smartcontractkit/chainlink-testing-framework/framework" "github.com/smartcontractkit/chainlink-testing-framework/framework/components/blockchain" ) -//go:embed committeeVerifier.template.toml -var committeeVerifierConfigTemplate string - const ( DefaultVerifierName = "verifier" DefaultVerifierDBName = "verifier-db" @@ -92,102 +80,53 @@ type VerifierEnvConfig struct { } type VerifierInput struct { - Mode Mode `toml:"mode"` - DB *VerifierDBInput `toml:"db"` - Out *VerifierOutput `toml:"out"` - Image string `toml:"image"` - SourceCodePath string `toml:"source_code_path"` - RootPath string `toml:"root_path"` - // TODO: Rename to VerifierID -- maps to this value in verifier.Config - ContainerName string `toml:"container_name"` - Port int `toml:"port"` - UseCache bool `toml:"use_cache"` - AggregatorAddress string `toml:"aggregator_address"` - Env *VerifierEnvConfig `toml:"env"` - CommitteeName string `toml:"committee_name"` - NodeIndex int `toml:"node_index"` - - // SigningKey is generated during the deploy step. + Mode Mode `toml:"mode"` + DB *VerifierDBInput `toml:"db"` + Out *VerifierOutput `toml:"out"` + Image string `toml:"image"` + SourceCodePath string `toml:"source_code_path"` + RootPath string `toml:"root_path"` + ContainerName string `toml:"container_name"` + Port int `toml:"port"` + UseCache bool `toml:"use_cache"` + Env *VerifierEnvConfig `toml:"env"` + CommitteeName string `toml:"committee_name"` + NodeIndex int `toml:"node_index"` + + // SigningKey is the private key for standalone mode signing. SigningKey string `toml:"signing_key"` - // SigningKeyPublic is generated during the deploy step. - // Maps to signer_address in the verifier config toml. - SigningKeyPublic string `toml:"signing_key_public"` - // Contract addresses used to generate configs - // Maps to on_ramp_addresses in the verifier config toml. - OnRampAddresses map[string]string `toml:"on_ramp_addresses"` - // Maps to committee_verifier_addresses in the verifier config toml. - CommitteeVerifierAddresses map[string]string `toml:"committee_verifier_addresses"` - // Maps to default_executor_on_ramp_addresses in the verifier config toml. - DefaultExecutorOnRampAddresses map[string]string `toml:"default_executor_on_ramp_addresses"` - // Maps to rmn_remote_addresses in the verifier config toml. - RMNRemoteAddresses map[string]string `toml:"rmn_remote_addresses"` - // Maps to Monitoring.Beholder.OtelExporterHTTPEndpoint in the verifier config toml. - MonitoringOtelExporterHTTPEndpoint string `toml:"monitoring_otel_exporter_http_endpoint"` - // Maps to blockchain_infos in the verifier config toml. - // NOTE: this should be removed from the verifier app config toml and into another config file - // that is specifically for standalone mode verifiers. - BlockchainInfos map[string]*protocol.BlockchainInfo `toml:"blockchain_infos"` + // SigningKeyPublic is the public key used for on-chain committee configuration. + SigningKeyPublic string `toml:"signing_key_public"` // TLSCACertFile is the path to the CA certificate file for TLS verification. - // This is set by the aggregator service and used to trust the self-signed CA. TLSCACertFile string `toml:"-"` // InsecureAggregatorConnection disables TLS for the aggregator gRPC connection. - // Only use for CL node tests where certificates cannot be injected. InsecureAggregatorConnection bool `toml:"insecure_aggregator_connection"` // AggregatorOutput is optionally set to automatically obtain credentials. - // If Env is nil or has empty credentials, credentials will be looked up from here. AggregatorOutput *AggregatorOutput `toml:"-"` -} -func (v *VerifierInput) GenerateJobSpec() (verifierJobSpec string, err error) { - tomlConfigBytes, err := v.GenerateConfig() - if err != nil { - return "", fmt.Errorf("failed to generate verifier config: %w", err) - } - return fmt.Sprintf( - ` -schemaVersion = 1 -type = "ccvcommitteeverifier" -committeeVerifierConfig = """ -%s -""" -`, string(tomlConfigBytes), - ), nil + // GeneratedConfig is the verifier configuration TOML generated by the changeset. + // This is used in standalone mode. For CL mode, job specs are submitted directly. + GeneratedConfig string `toml:"-"` } -func (v *VerifierInput) buildVerifierConfiguration(config *commit.Config) error { - if _, err := toml.Decode(committeeVerifierConfigTemplate, &config); err != nil { - return fmt.Errorf("failed to decode verifier config template: %w", err) - } - - config.VerifierID = v.ContainerName - config.AggregatorAddress = v.AggregatorAddress - config.SignerAddress = v.SigningKeyPublic - config.CommitteeVerifierAddresses = v.CommitteeVerifierAddresses - config.OnRampAddresses = v.OnRampAddresses - config.DefaultExecutorOnRampAddresses = v.DefaultExecutorOnRampAddresses - config.RMNRemoteAddresses = v.RMNRemoteAddresses - config.InsecureAggregatorConnection = v.InsecureAggregatorConnection - - // The value in the template should be usable for devenv setups, only override if a different value is provided. - if v.MonitoringOtelExporterHTTPEndpoint != "" { - config.Monitoring.Beholder.OtelExporterHTTPEndpoint = v.MonitoringOtelExporterHTTPEndpoint +// GenerateConfigWithBlockchainInfos combines the pre-generated config with blockchain infos +// for standalone mode deployment. +func (v *VerifierInput) GenerateConfigWithBlockchainInfos(blockchainInfos map[string]*protocol.BlockchainInfo) ([]byte, error) { + if v.GeneratedConfig == "" { + return nil, fmt.Errorf("GeneratedConfig is empty - must be set from changeset output") } - return nil -} - -func (v *VerifierInput) GenerateConfigWithBlockchainInfos(blockchainInfos map[string]*protocol.BlockchainInfo) (verifierTomlConfig []byte, err error) { - // Build base configuration + // Parse the generated config var baseConfig commit.Config - if err := v.buildVerifierConfiguration(&baseConfig); err != nil { - return nil, err + if _, err := toml.Decode(v.GeneratedConfig, &baseConfig); err != nil { + return nil, fmt.Errorf("failed to parse generated config: %w", err) } - // Wrap in ConfigWithBlockchainInfo and add blockchain infos + // Wrap with blockchain infos for standalone mode config := commit.ConfigWithBlockchainInfos{ Config: baseConfig, BlockchainInfos: blockchainInfos, @@ -201,21 +140,6 @@ func (v *VerifierInput) GenerateConfigWithBlockchainInfos(blockchainInfos map[st return cfg, nil } -func (v *VerifierInput) GenerateConfig() (verifierTomlConfig []byte, err error) { - var config commit.Config - err = v.buildVerifierConfiguration(&config) - if err != nil { - return nil, fmt.Errorf("failed to build verifier configuration: %w", err) - } - - cfg, err := toml.Marshal(config) - if err != nil { - return nil, fmt.Errorf("failed to marshal verifier config to TOML: %w", err) - } - - return cfg, nil -} - type VerifierOutput struct { ContainerName string `toml:"container_name"` ExternalHTTPURL string `toml:"http_url"` @@ -438,64 +362,3 @@ func NewVerifier(in *VerifierInput) (*VerifierOutput, error) { in.ContainerName, in.ContainerName, in.DB.Port, in.ContainerName), }, nil } - -func ResolveContractsForVerifier(ds datastore.DataStore, blockchains []*blockchain.Input, ver VerifierInput) (VerifierInput, error) { - ver.OnRampAddresses = make(map[string]string) - ver.CommitteeVerifierAddresses = make(map[string]string) - ver.DefaultExecutorOnRampAddresses = make(map[string]string) - ver.RMNRemoteAddresses = make(map[string]string) - - for _, chain := range blockchains { - networkInfo, err := chainsel.GetChainDetailsByChainIDAndFamily(chain.ChainID, chainsel.FamilyEVM) - if err != nil { - return VerifierInput{}, err - } - selectorStr := strconv.FormatUint(networkInfo.ChainSelector, 10) - - onRampAddressRef, err := ds.Addresses().Get(datastore.NewAddressRefKey( - networkInfo.ChainSelector, - datastore.ContractType(onrampoperations.ContractType), - semver.MustParse(onrampoperations.Deploy.Version()), - "", - )) - if err != nil { - return VerifierInput{}, fmt.Errorf("failed to get on ramp address for chain %s: %w", chain.ChainID, err) - } - ver.OnRampAddresses[selectorStr] = onRampAddressRef.Address - - committeeVerifierAddressRef, err := ds.Addresses().Get(datastore.NewAddressRefKey( - networkInfo.ChainSelector, - datastore.ContractType(committee_verifier.ResolverType), - semver.MustParse(committee_verifier.Deploy.Version()), - ver.CommitteeName, - )) - if err != nil { - return VerifierInput{}, fmt.Errorf("failed to get committee verifier address for chain %s: %w", chain.ChainID, err) - } - ver.CommitteeVerifierAddresses[selectorStr] = committeeVerifierAddressRef.Address - - defaultExecutorOnRampAddressRef, err := ds.Addresses().Get(datastore.NewAddressRefKey( - networkInfo.ChainSelector, - datastore.ContractType(executor.ProxyType), - semver.MustParse(executor.DeployProxy.Version()), - evm.DefaultExecutorQualifier, - )) - if err != nil { - return VerifierInput{}, fmt.Errorf("failed to get default executor on ramp address for chain %s: %w", chain.ChainID, err) - } - ver.DefaultExecutorOnRampAddresses[selectorStr] = defaultExecutorOnRampAddressRef.Address - - rmnRemoteAddressRef, err := ds.Addresses().Get(datastore.NewAddressRefKey( - networkInfo.ChainSelector, - datastore.ContractType(rmn_remote.ContractType), - semver.MustParse(rmn_remote.Deploy.Version()), - "", - )) - if err != nil { - return VerifierInput{}, fmt.Errorf("failed to get rmn remote address for chain %s: %w", chain.ChainID, err) - } - ver.RMNRemoteAddresses[selectorStr] = rmnRemoteAddressRef.Address - } - - return ver, nil -} diff --git a/build/devenv/services/executor.go b/build/devenv/services/executor.go index 49efbb5e..a902ebdb 100644 --- a/build/devenv/services/executor.go +++ b/build/devenv/services/executor.go @@ -2,33 +2,22 @@ package services import ( "context" - _ "embed" "encoding/hex" - "errors" "fmt" "os" "path/filepath" - "slices" "strconv" - "time" "github.com/BurntSushi/toml" - "github.com/Masterminds/semver/v3" "github.com/docker/docker/api/types/container" "github.com/docker/go-connections/nat" "github.com/ethereum/go-ethereum/crypto" "github.com/testcontainers/testcontainers-go" - chainsel "github.com/smartcontractkit/chain-selectors" - execcontract "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/executor" - offrampoperations "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/offramp" - "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/rmn_remote" "github.com/smartcontractkit/chainlink-ccv/devenv/internal/util" "github.com/smartcontractkit/chainlink-ccv/executor" "github.com/smartcontractkit/chainlink-ccv/protocol" - "github.com/smartcontractkit/chainlink-deployments-framework/datastore" "github.com/smartcontractkit/chainlink-testing-framework/framework" - "github.com/smartcontractkit/chainlink-testing-framework/framework/components/blockchain" ) const ( @@ -38,32 +27,25 @@ const ( DefaultExecutorMode = Standalone ) -//go:embed executor.template.toml -var executorConfigTemplate string - type ExecutorInput struct { - Mode Mode `toml:"mode"` - Out *ExecutorOutput `toml:"out"` - Image string `toml:"image"` - SourceCodePath string `toml:"source_code_path"` - RootPath string `toml:"root_path"` - ContainerName string `toml:"container_name"` - Port int `toml:"port"` - UseCache bool `toml:"use_cache"` - OfframpAddresses map[uint64]string `toml:"offramp_addresses"` - ExecutorPool []string `toml:"executor_pool"` - ExecutorID string `toml:"executor_id"` - RmnAddresses map[uint64]string `toml:"rmn_addresses"` - ExecutorAddresses map[uint64]string `toml:"executor_addresses"` - IndexerAddress string `toml:"indexer_address"` - // ExecutorQualifier is the qualifier for the executor contract to have the offchain executor - // check against. + Mode Mode `toml:"mode"` + Out *ExecutorOutput `toml:"out"` + Image string `toml:"image"` + SourceCodePath string `toml:"source_code_path"` + RootPath string `toml:"root_path"` + ContainerName string `toml:"container_name"` + Port int `toml:"port"` + UseCache bool `toml:"use_cache"` + + // ExecutorQualifier is the qualifier for the executor contract. ExecutorQualifier string `toml:"executor_qualifier"` - // Maps to Monitoring.Beholder.OtelExporterHTTPEndpoint in the executor config toml. - MonitoringOtelExporterHTTPEndpoint string `toml:"monitoring_otel_exporter_http_endpoint"` - // Only used in standalone mode. + // TransmitterPrivateKey is used in standalone mode for transaction signing. TransmitterPrivateKey string `toml:"transmitter_private_key"` + + // GeneratedConfig is the executor configuration TOML generated by the changeset. + // This is used in standalone mode. For CL mode, job specs are submitted directly. + GeneratedConfig string `toml:"-"` } type ExecutorOutput struct { @@ -73,96 +55,20 @@ type ExecutorOutput struct { UseCache bool `toml:"use_cache"` } -// GenerateJobSpec generates a Chainlink job spec for the executor. -// This is used for Chainlink node deployments and does NOT include blockchain infos. -func (v *ExecutorInput) GenerateJobSpec() (executorJobSpec string, err error) { - tomlConfigBytes, err := v.GenerateConfig() - if err != nil { - return "", fmt.Errorf("failed to generate executor config: %w", err) - } - return fmt.Sprintf( - ` -schemaVersion = 1 -type = "ccvexecutor" -executorConfig = """ -%s -""" -`, string(tomlConfigBytes), - ), nil -} - -// buildExecutorConfiguration builds the core executor configuration from ExecutorInput. -// This is shared logic used by both GenerateConfig and GenerateConfigWithBlockchainInfos. -func (v *ExecutorInput) buildExecutorConfiguration(config *executor.Configuration) error { - // Decode template into base config - if _, err := toml.Decode(executorConfigTemplate, config); err != nil { - return fmt.Errorf("failed to decode executor config template: %w", err) - } - - // Validate inputs - if v.ExecutorID == "" { - return errors.New("invalid ExecutorID, should be non-empty") - } - if len(v.ExecutorPool) == 0 { - return errors.New("invalid ExecutorPool, should be non-empty") - } - if !slices.Contains(v.ExecutorPool, v.ExecutorID) { - return fmt.Errorf("invalid ExecutorID %s, should be in ExecutorPool %+v", v.ExecutorID, v.ExecutorPool) - } - - // Build chain configuration - config.ChainConfiguration = make(map[string]executor.ChainConfiguration, len(v.OfframpAddresses)) - for chainSelector, address := range v.OfframpAddresses { - config.ChainConfiguration[strconv.FormatUint(chainSelector, 10)] = executor.ChainConfiguration{ - OffRampAddress: address, - RmnAddress: v.RmnAddresses[chainSelector], - ExecutionInterval: 15 * time.Second, - ExecutorPool: v.ExecutorPool, - DefaultExecutorAddress: v.ExecutorAddresses[chainSelector], - } - } - - // Set executor ID - config.ExecutorID = v.ExecutorID - config.NtpServer = "time.google.com" - - // Apply optional overrides from input - if v.IndexerAddress != "" { - config.IndexerAddress = v.IndexerAddress - } - if v.MonitoringOtelExporterHTTPEndpoint != "" { - config.Monitoring.Beholder.OtelExporterHTTPEndpoint = v.MonitoringOtelExporterHTTPEndpoint - } - - return nil -} - -// GenerateConfig generates the executor TOML configuration for Chainlink node deployments. -// This does NOT include blockchain infos - use GenerateConfigWithBlockchainInfos for standalone mode. -func (v *ExecutorInput) GenerateConfig() (executorTomlConfig []byte, err error) { - var config executor.Configuration - if err := v.buildExecutorConfiguration(&config); err != nil { - return nil, err +// GenerateConfigWithBlockchainInfos combines the pre-generated config with blockchain infos +// for standalone mode deployment. +func (v *ExecutorInput) GenerateConfigWithBlockchainInfos(blockchainInfos map[string]*protocol.BlockchainInfo) ([]byte, error) { + if v.GeneratedConfig == "" { + return nil, fmt.Errorf("GeneratedConfig is empty - must be set from changeset output") } - cfg, err := toml.Marshal(config) - if err != nil { - return nil, fmt.Errorf("failed to marshal executor config to TOML: %w", err) - } - - return cfg, nil -} - -// GenerateConfigWithBlockchainInfos generates the executor TOML configuration for standalone deployments. -// This includes blockchain infos which contain RPC node information. -func (v *ExecutorInput) GenerateConfigWithBlockchainInfos(blockchainInfos map[string]*protocol.BlockchainInfo) (executorTomlConfig []byte, err error) { - // Build base configuration + // Parse the generated config var baseConfig executor.Configuration - if err := v.buildExecutorConfiguration(&baseConfig); err != nil { - return nil, err + if _, err := toml.Decode(v.GeneratedConfig, &baseConfig); err != nil { + return nil, fmt.Errorf("failed to parse generated config: %w", err) } - // Wrap in ConfigWithBlockchainInfo and add blockchain infos + // Wrap with blockchain infos for standalone mode config := executor.ConfigWithBlockchainInfo{ Configuration: baseConfig, BlockchainInfos: blockchainInfos, @@ -226,7 +132,7 @@ func NewExecutor(in *ExecutorInput) (*ExecutorOutput, error) { return nil, fmt.Errorf("failed to generate config for executor: %w", err) } confDir := util.CCVConfigDir() - configFilePath := filepath.Join(confDir, fmt.Sprintf("executor-%s-config.toml", in.ExecutorID)) + configFilePath := filepath.Join(confDir, fmt.Sprintf("executor-%s-config.toml", in.ContainerName)) if err := os.WriteFile(configFilePath, config, 0o644); err != nil { return nil, fmt.Errorf("failed to write executor config to file: %w", err) } @@ -307,81 +213,3 @@ func SetTransmitterPrivateKey(execs []*ExecutorInput) ([]*ExecutorInput, error) } return execs, nil } - -// SetExecutorPoolAndID sets the executor pool and ID for the provided execs array. -// The executor ID is set to the executor qualifier followed by the index of the executor. -// The executor pool is set to the executor IDs. -func SetExecutorPoolAndID(execs []*ExecutorInput) ([]*ExecutorInput, error) { - executorPoolByQualifier := make(map[string][]string) - indexByQualifier := make(map[string]int) - executorIDs := make([]string, 0, len(execs)) - for _, exec := range execs { - executorID := fmt.Sprintf("%s_%d", exec.ExecutorQualifier, indexByQualifier[exec.ExecutorQualifier]) - executorIDs = append(executorIDs, executorID) - executorPoolByQualifier[exec.ExecutorQualifier] = append(executorPoolByQualifier[exec.ExecutorQualifier], executorID) - indexByQualifier[exec.ExecutorQualifier]++ - } - - for i, exec := range execs { - exec.ExecutorID = executorIDs[i] - exec.ExecutorPool = executorPoolByQualifier[exec.ExecutorQualifier] - } - - return execs, nil -} - -// ResolveContractsForExecutor determines the offramp addresses for the executor and mutates the -// provided execs array to have the offramp addresses set. -func ResolveContractsForExecutor(ds datastore.DataStore, blockchains []*blockchain.Input, execs []*ExecutorInput) ([]*ExecutorInput, error) { - for _, exec := range execs { - exec.OfframpAddresses = make(map[uint64]string) - exec.RmnAddresses = make(map[uint64]string) - exec.ExecutorAddresses = make(map[uint64]string) - } - - for _, exec := range execs { - for _, chain := range blockchains { - // TODO: Not chain agnostic. - networkInfo, err := chainsel.GetChainDetailsByChainIDAndFamily(chain.ChainID, chainsel.FamilyEVM) - if err != nil { - return nil, err - } - - offRampAddressRef, err := ds.Addresses().Get(datastore.NewAddressRefKey( - networkInfo.ChainSelector, - datastore.ContractType(offrampoperations.ContractType), - semver.MustParse(offrampoperations.Deploy.Version()), - "", - )) - if err != nil { - return nil, fmt.Errorf("failed to get off ramp address for chain %s: %w", chain.ChainID, err) - } - - rmnRemoteAddressRef, err := ds.Addresses().Get(datastore.NewAddressRefKey( - networkInfo.ChainSelector, - datastore.ContractType(rmn_remote.ContractType), - semver.MustParse(rmn_remote.Deploy.Version()), - "", - )) - if err != nil { - return nil, fmt.Errorf("failed to get rmn remote address for chain %s: %w", chain.ChainID, err) - } - - defaultExecutorAddressRef, err := ds.Addresses().Get(datastore.NewAddressRefKey( - networkInfo.ChainSelector, - datastore.ContractType(execcontract.ProxyType), - semver.MustParse(execcontract.DeployProxy.Version()), - exec.ExecutorQualifier, - )) - if err != nil { - return nil, fmt.Errorf("failed to get executor address for chain %s: %w", chain.ChainID, err) - } - - exec.OfframpAddresses[networkInfo.ChainSelector] = offRampAddressRef.Address - exec.RmnAddresses[networkInfo.ChainSelector] = rmnRemoteAddressRef.Address - exec.ExecutorAddresses[networkInfo.ChainSelector] = defaultExecutorAddressRef.Address - } - } - - return execs, nil -} diff --git a/build/devenv/services/indexer.go b/build/devenv/services/indexer.go index cf7d329a..bd6ebd7a 100644 --- a/build/devenv/services/indexer.go +++ b/build/devenv/services/indexer.go @@ -19,11 +19,12 @@ import ( ) const ( - DefaultIndexerName = "indexer" - DefaultIndexerDBName = "indexer-db" - DefaultIndexerImage = "indexer:dev" - DefaultIndexerHTTPPort = 8102 - DefaultIndexerDBPort = 6432 + DefaultIndexerName = "indexer" + DefaultIndexerDBName = "indexer-db" + DefaultIndexerImage = "indexer:dev" + DefaultIndexerHTTPPort = 8102 + DefaultIndexerInternalPort = 8100 + DefaultIndexerDBPort = 6432 DefaultIndexerDBImage = "postgres:16-alpine" ) @@ -111,38 +112,8 @@ func defaults(in *IndexerInput) { Timeout: 5000, NtpServer: "time.google.com", }, - Verifiers: []config.VerifierConfig{ - { - Type: config.ReaderTypeAggregator, - AggregatorReaderConfig: config.AggregatorReaderConfig{ - Address: "default-aggregator:50051", - Since: 0, - }, - Name: "CommiteeVerifier (Primary)", - BatchSize: 100, - MaxBatchWaitTime: 50, - }, - { - Type: config.ReaderTypeAggregator, - AggregatorReaderConfig: config.AggregatorReaderConfig{ - Address: "secondary-aggregator:50051", - Since: 0, - }, - Name: "CommiteeVerifier (Secondary)", - BatchSize: 100, - MaxBatchWaitTime: 50, - }, - { - Type: config.ReaderTypeAggregator, - AggregatorReaderConfig: config.AggregatorReaderConfig{ - Address: "tertiary-aggregator:50051", - Since: 0, - }, - Name: "CommiteeVerifier (Tertiary)", - BatchSize: 100, - MaxBatchWaitTime: 50, - }, - }, + // Verifiers are built dynamically from aggregator topology in environment.go + Verifiers: []config.VerifierConfig{}, Storage: config.StorageConfig{ Strategy: config.StorageStrategySink, Sink: &config.SinkStorageConfig{ @@ -187,20 +158,8 @@ func defaults(in *IndexerInput) { }, }, }, - Verifier: map[string]config.VerifierSecrets{ - "0": { - APIKey: "dev-api-key-indexer", - Secret: "dev-secret-indexer", - }, - "1": { - APIKey: "dev-api-key-indexer", - Secret: "dev-secret-indexer", - }, - "2": { - APIKey: "dev-api-key-indexer", - Secret: "dev-secret-indexer", - }, - }, + // Verifier secrets are built dynamically from aggregator topology in environment.go + Verifier: map[string]config.VerifierSecrets{}, } } } @@ -363,7 +322,7 @@ func NewIndexer(in *IndexerInput) (*IndexerOutput, error) { out := &IndexerOutput{ ContainerName: in.ContainerName, ExternalHTTPURL: fmt.Sprintf("http://%s:%d", host, in.Port), - InternalHTTPURL: fmt.Sprintf("http://%s:%d", in.ContainerName, in.Port), + InternalHTTPURL: fmt.Sprintf("http://%s:%d", in.ContainerName, DefaultIndexerInternalPort), DBConnectionString: DefaultIndexerDBConnectionString, } in.Out = out diff --git a/build/devenv/tests/services/executor_test.go b/build/devenv/tests/services/executor_test.go index 0a0b43b8..7d8add99 100644 --- a/build/devenv/tests/services/executor_test.go +++ b/build/devenv/tests/services/executor_test.go @@ -9,14 +9,27 @@ import ( ) func TestServiceExecutor(t *testing.T) { + // Minimal valid executor config for testing + generatedConfig := ` +executor_id = "executor-test" +indexer_address = "http://localhost:8100" + +[chain_configuration] +[chain_configuration."1"] +off_ramp_address = "0x0000000000000000000000000000000000000001" +rmn_address = "0x0000000000000000000000000000000000000002" +default_executor_address = "0x0000000000000000000000000000000000000003" +execution_interval = "15s" +executor_pool = ["executor-test"] +` + out, err := services.NewExecutor(&services.ExecutorInput{ - SourceCodePath: "../../../executor", - RootPath: "../../../../", - ContainerName: "executor-test", - Port: 8101, - Mode: services.Standalone, - ExecutorID: "executor-test", - ExecutorPool: []string{"executor-test"}, + SourceCodePath: "../../../executor", + RootPath: "../../../../", + ContainerName: "executor-test", + Port: 8101, + Mode: services.Standalone, + GeneratedConfig: generatedConfig, }) require.NoError(t, err) t.Run("test #1", func(t *testing.T) { diff --git a/deployments/changesets/generate_executor_config.go b/deployments/changesets/generate_executor_config.go new file mode 100644 index 00000000..cba82b68 --- /dev/null +++ b/deployments/changesets/generate_executor_config.go @@ -0,0 +1,222 @@ +package changesets + +import ( + "fmt" + "slices" + "strings" + "time" + + "github.com/BurntSushi/toml" + + "github.com/smartcontractkit/chainlink-deployments-framework/datastore" + "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + "github.com/smartcontractkit/chainlink-deployments-framework/operations" + + "github.com/smartcontractkit/chainlink-ccv/deployments" + "github.com/smartcontractkit/chainlink-ccv/deployments/sequences" + "github.com/smartcontractkit/chainlink-ccv/executor" +) + +// GenerateExecutorConfigCfg is the configuration for the generate executor config changeset. +type GenerateExecutorConfigCfg struct { + EnvConfigPath string + ExecutorQualifier string + ChainSelectors []uint64 + NOPAliases []string +} + +// GenerateExecutorConfig creates a changeset that generates executor configurations +// for NOPs that are part of an executor pool. It iterates over specified NOPs (or all if empty) +// and generates a job spec for each NOP. +func GenerateExecutorConfig() deployment.ChangeSetV2[GenerateExecutorConfigCfg] { + validate := func(e deployment.Environment, cfg GenerateExecutorConfigCfg) error { + if cfg.EnvConfigPath == "" { + return fmt.Errorf("env config path is required") + } + + envCfg, err := deployments.LoadEnvConfig(cfg.EnvConfigPath) + if err != nil { + return fmt.Errorf("failed to load env config: %w", err) + } + + for _, alias := range cfg.NOPAliases { + if _, ok := envCfg.NOPTopology.NOPs[alias]; !ok { + return fmt.Errorf("NOP alias %q not found in env config", alias) + } + } + + envSelectors := e.BlockChains.ListChainSelectors() + for _, s := range cfg.ChainSelectors { + if !slices.Contains(envSelectors, s) { + return fmt.Errorf("selector %d is not available in environment", s) + } + } + return nil + } + + apply := func(e deployment.Environment, cfg GenerateExecutorConfigCfg) (deployment.ChangesetOutput, error) { + envCfg, err := deployments.LoadEnvConfig(cfg.EnvConfigPath) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to load env config: %w", err) + } + + selectors := cfg.ChainSelectors + if len(selectors) == 0 { + selectors = e.BlockChains.ListChainSelectors() + } + + deps := sequences.GenerateExecutorConfigDeps{ + Env: e, + } + + input := sequences.GenerateExecutorConfigInput{ + ExecutorQualifier: cfg.ExecutorQualifier, + ChainSelectors: selectors, + } + + report, err := operations.ExecuteSequence(e.OperationsBundle, sequences.GenerateExecutorConfig, deps, input) + if err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to generate executor config: %w", err) + } + + nopAliases := cfg.NOPAliases + if len(nopAliases) == 0 { + for alias := range envCfg.NOPTopology.NOPs { + nopAliases = append(nopAliases, alias) + } + } + + outputDS := datastore.NewMemoryDataStore() + if e.DataStore != nil { + if err := outputDS.Merge(e.DataStore); err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to merge existing datastore: %w", err) + } + } + + // Track expected job spec IDs for cleanup + expectedJobSpecIDs := make(map[string]bool) + executorSuffix := fmt.Sprintf("-%s-executor", cfg.ExecutorQualifier) + + for _, nopAlias := range nopAliases { + // Check if NOP is in the requested executor pool + pools := envCfg.GetPoolsForNOP(nopAlias) + if !slices.Contains(pools, cfg.ExecutorQualifier) { + continue + } + + pool, ok := envCfg.ExecutorPools[cfg.ExecutorQualifier] + if !ok { + continue + } + + executionInterval := pool.ExecutionInterval + if executionInterval == 0 { + executionInterval = 15 * time.Second + } + + chainConfigs := make(map[string]executor.ChainConfiguration) + for chainSelectorStr, genCfg := range report.Output.Config.ChainConfigs { + chainConfigs[chainSelectorStr] = executor.ChainConfiguration{ + OffRampAddress: genCfg.OffRampAddress, + RmnAddress: genCfg.RmnAddress, + DefaultExecutorAddress: genCfg.DefaultExecutorAddress, + ExecutorPool: pool.NOPAliases, + ExecutionInterval: executionInterval, + } + } + + executorID := fmt.Sprintf("%s-%s-executor", nopAlias, cfg.ExecutorQualifier) + expectedJobSpecIDs[executorID] = true + + executorCfg := executor.Configuration{ + IndexerAddress: envCfg.IndexerAddress, + ExecutorID: executorID, + PyroscopeURL: envCfg.PyroscopeURL, + Monitoring: convertMonitoringConfig(envCfg.Monitoring), + ChainConfiguration: chainConfigs, + } + + configBytes, err := toml.Marshal(executorCfg) + if err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to marshal executor config to TOML for NOP %q: %w", nopAlias, err) + } + + jobSpec := fmt.Sprintf(`schemaVersion = 1 +type = "ccvexecutor" +executorConfig = """ +%s""" +`, string(configBytes)) + + if err := deployments.SaveNOPJobSpec(outputDS, nopAlias, executorID, jobSpec); err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to save executor job spec for NOP %q: %w", nopAlias, err) + } + } + + // Clean up orphaned executor job specs for this qualifier + // When NOPAliases is explicitly set, only clean up those specific NOPs (scoped mode) + // When NOPAliases is empty, clean up all NOPs in the datastore (full sync mode) + scopedCleanup := len(cfg.NOPAliases) > 0 + scopedNOPs := make(map[string]bool) + if scopedCleanup { + for _, nopAlias := range cfg.NOPAliases { + scopedNOPs[nopAlias] = true + } + } + + allNOPJobSpecs, err := deployments.GetAllNOPJobSpecs(outputDS.Seal()) + if err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to get all NOP job specs for cleanup: %w", err) + } + + for nopAlias, jobSpecs := range allNOPJobSpecs { + // In scoped mode, only cleanup NOPs that were explicitly specified + if scopedCleanup && !scopedNOPs[nopAlias] { + continue + } + for jobSpecID := range jobSpecs { + // Check if this job spec matches the pattern for this executor qualifier + if strings.HasSuffix(jobSpecID, executorSuffix) && !expectedJobSpecIDs[jobSpecID] { + if err := deployments.DeleteNOPJobSpec(outputDS, nopAlias, jobSpecID); err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to delete orphaned executor job spec %q for NOP %q: %w", jobSpecID, nopAlias, err) + } + } + } + } + + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + DataStore: outputDS, + }, nil + } + + return deployment.CreateChangeSet(apply, validate) +} + +func convertMonitoringConfig(cfg deployments.MonitoringConfig) executor.MonitoringConfig { + return executor.MonitoringConfig{ + Enabled: cfg.Enabled, + Type: cfg.Type, + Beholder: executor.BeholderConfig{ + InsecureConnection: cfg.Beholder.InsecureConnection, + CACertFile: cfg.Beholder.CACertFile, + OtelExporterGRPCEndpoint: cfg.Beholder.OtelExporterGRPCEndpoint, + OtelExporterHTTPEndpoint: cfg.Beholder.OtelExporterHTTPEndpoint, + LogStreamingEnabled: cfg.Beholder.LogStreamingEnabled, + MetricReaderInterval: cfg.Beholder.MetricReaderInterval, + TraceSampleRatio: cfg.Beholder.TraceSampleRatio, + TraceBatchTimeout: cfg.Beholder.TraceBatchTimeout, + }, + } +} diff --git a/deployments/changesets/generate_executor_config_test.go b/deployments/changesets/generate_executor_config_test.go new file mode 100644 index 00000000..2db685cf --- /dev/null +++ b/deployments/changesets/generate_executor_config_test.go @@ -0,0 +1,419 @@ +package changesets_test + +import ( + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + chainsel "github.com/smartcontractkit/chain-selectors" + execcontract "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/executor" + offrampoperations "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/offramp" + "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/rmn_remote" + "github.com/smartcontractkit/chainlink-deployments-framework/datastore" + "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + + "github.com/smartcontractkit/chainlink-ccv/deployments" + "github.com/smartcontractkit/chainlink-ccv/deployments/changesets" + "github.com/smartcontractkit/chainlink-ccv/deployments/testutils" +) + +func TestGenerateExecutorConfig_ValidatesEnvConfigPath(t *testing.T) { + changeset := changesets.GenerateExecutorConfig() + + env := createExecutorTestEnvironment(t) + + err := changeset.VerifyPreconditions(env, changesets.GenerateExecutorConfigCfg{ + EnvConfigPath: "", + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "env config path is required") +} + +func TestGenerateExecutorConfig_ValidatesNOPAliases(t *testing.T) { + changeset := changesets.GenerateExecutorConfig() + + env := createExecutorTestEnvironment(t) + envConfigPath := createTestEnvConfig(t) + + err := changeset.VerifyPreconditions(env, changesets.GenerateExecutorConfigCfg{ + EnvConfigPath: envConfigPath, + NOPAliases: []string{"unknown-nop"}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), `NOP alias "unknown-nop" not found in env config`) +} + +func TestGenerateExecutorConfig_ValidatesChainSelectors(t *testing.T) { + changeset := changesets.GenerateExecutorConfig() + + env := createExecutorTestEnvironment(t) + envConfigPath := createTestEnvConfig(t) + + err := changeset.VerifyPreconditions(env, changesets.GenerateExecutorConfigCfg{ + EnvConfigPath: envConfigPath, + ChainSelectors: []uint64{1234}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "selector 1234 is not available in environment") +} + +func TestGenerateExecutorConfig_GeneratesCorrectJobSpec(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + sel1Str := strconv.FormatUint(sel1, 10) + sel2Str := strconv.FormatUint(sel2, 10) + + offRampAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + offRampAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + rmnAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + rmnAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, "", offrampoperations.ContractType, offRampAddr1) + addContractToDatastore(t, ds, sel2, "", offrampoperations.ContractType, offRampAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + + env.DataStore = ds.Seal() + + envConfigPath := createTestEnvConfig(t) + + cs := changesets.GenerateExecutorConfig() + output, err := cs.Apply(env, changesets.GenerateExecutorConfigCfg{ + EnvConfigPath: envConfigPath, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + NOPAliases: []string{"nop-1"}, + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + jobSpec, err := deployments.GetNOPJobSpec(output.DataStore.Seal(), "nop-1", "nop-1-default-executor") + require.NoError(t, err) + + assert.Contains(t, jobSpec, `schemaVersion = 1`) + assert.Contains(t, jobSpec, `type = "ccvexecutor"`) + assert.Contains(t, jobSpec, `executorConfig = """`) + assert.Contains(t, jobSpec, `indexer_address = "http://indexer:8100"`) + assert.Contains(t, jobSpec, `executor_id = "nop-1-default-executor"`) + assert.Contains(t, jobSpec, `pyroscope_url = "http://pyroscope:4040"`) + + assert.Contains(t, jobSpec, `[chain_configuration]`) + assert.Contains(t, jobSpec, sel1Str) + assert.Contains(t, jobSpec, sel2Str) + assert.True(t, strings.Contains(jobSpec, offRampAddr1.Hex()) || strings.Contains(jobSpec, strings.ToLower(offRampAddr1.Hex()))) + + assert.Contains(t, jobSpec, `executor_pool = ["nop-1", "nop-2"]`) + assert.Contains(t, jobSpec, `execution_interval`) + + assert.Contains(t, jobSpec, `[Monitoring]`) + assert.Contains(t, jobSpec, `Enabled = true`) + assert.Contains(t, jobSpec, `Type = "beholder"`) +} + +func TestGenerateExecutorConfig_PreservesExistingConfigs(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + + offRampAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + offRampAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + rmnAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + rmnAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, "", offrampoperations.ContractType, offRampAddr1) + addContractToDatastore(t, ds, sel2, "", offrampoperations.ContractType, offRampAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + + existingVerifierJobSpec := "existing-verifier-job-spec-content" + err := deployments.SaveNOPJobSpec(ds, "existing-nop", "existing-verifier", existingVerifierJobSpec) + require.NoError(t, err) + + env.DataStore = ds.Seal() + + envConfigPath := createTestEnvConfig(t) + + cs := changesets.GenerateExecutorConfig() + output, err := cs.Apply(env, changesets.GenerateExecutorConfigCfg{ + EnvConfigPath: envConfigPath, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + NOPAliases: []string{"nop-1"}, + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + outputSealed := output.DataStore.Seal() + + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-1", "nop-1-default-executor") + require.NoError(t, err, "new executor job spec should be present") + + retrievedVerifierJobSpec, err := deployments.GetNOPJobSpec(outputSealed, "existing-nop", "existing-verifier") + require.NoError(t, err, "existing verifier job spec should be preserved") + assert.Equal(t, existingVerifierJobSpec, retrievedVerifierJobSpec, "verifier job spec should be unchanged") +} + +func TestGenerateExecutorConfig_RemovesOrphanedJobSpecs(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + + offRampAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + offRampAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + rmnAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + rmnAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, "", offrampoperations.ContractType, offRampAddr1) + addContractToDatastore(t, ds, sel2, "", offrampoperations.ContractType, offRampAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + + // Pre-populate with an executor job spec that will become orphaned + err := deployments.SaveNOPJobSpec(ds, "nop-removed", "nop-removed-default-executor", "old-job-spec") + require.NoError(t, err) + + env.DataStore = ds.Seal() + + // Create config where nop-removed is NOT in the pool (only nop-1, nop-2) + envConfigPath := createTestEnvConfig(t) + + cs := changesets.GenerateExecutorConfig() + output, err := cs.Apply(env, changesets.GenerateExecutorConfigCfg{ + EnvConfigPath: envConfigPath, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + outputSealed := output.DataStore.Seal() + + // The orphaned job spec should be deleted + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-removed", "nop-removed-default-executor") + require.Error(t, err, "orphaned executor job spec should be deleted") + + // Active NOPs should still have their job specs + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-1", "nop-1-default-executor") + require.NoError(t, err, "nop-1 executor job spec should exist") + + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-2", "nop-2-default-executor") + require.NoError(t, err, "nop-2 executor job spec should exist") +} + +func TestGenerateExecutorConfig_PreservesOtherQualifierJobSpecs(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + + offRampAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + offRampAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + rmnAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + rmnAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, "", offrampoperations.ContractType, offRampAddr1) + addContractToDatastore(t, ds, sel2, "", offrampoperations.ContractType, offRampAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + + // Pre-populate with an executor job spec for a DIFFERENT qualifier + err := deployments.SaveNOPJobSpec(ds, "nop-1", "nop-1-other-pool-executor", "other-pool-job-spec") + require.NoError(t, err) + + env.DataStore = ds.Seal() + + envConfigPath := createTestEnvConfig(t) + + cs := changesets.GenerateExecutorConfig() + output, err := cs.Apply(env, changesets.GenerateExecutorConfigCfg{ + EnvConfigPath: envConfigPath, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + NOPAliases: []string{"nop-1"}, + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + outputSealed := output.DataStore.Seal() + + // The job spec for a different qualifier should be preserved + otherPoolJobSpec, err := deployments.GetNOPJobSpec(outputSealed, "nop-1", "nop-1-other-pool-executor") + require.NoError(t, err, "job spec for other pool should be preserved") + assert.Equal(t, "other-pool-job-spec", otherPoolJobSpec) + + // Current qualifier job spec should exist + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-1", "nop-1-default-executor") + require.NoError(t, err, "nop-1 default executor job spec should exist") +} + +func TestGenerateExecutorConfig_ScopedNOPAliasesPreservesOtherNOPs(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + + offRampAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + offRampAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + rmnAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + rmnAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, "", offrampoperations.ContractType, offRampAddr1) + addContractToDatastore(t, ds, sel2, "", offrampoperations.ContractType, offRampAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + + // Pre-populate with executor job specs for BOTH nop-1 and nop-2 + err := deployments.SaveNOPJobSpec(ds, "nop-1", "nop-1-default-executor", "nop-1-job-spec") + require.NoError(t, err) + err = deployments.SaveNOPJobSpec(ds, "nop-2", "nop-2-default-executor", "nop-2-job-spec") + require.NoError(t, err) + + env.DataStore = ds.Seal() + + envConfigPath := createTestEnvConfig(t) + + // Run the changeset with only nop-1 in scope + cs := changesets.GenerateExecutorConfig() + output, err := cs.Apply(env, changesets.GenerateExecutorConfigCfg{ + EnvConfigPath: envConfigPath, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + NOPAliases: []string{"nop-1"}, // Scoped to only nop-1 + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + outputSealed := output.DataStore.Seal() + + // nop-1 job spec should be regenerated + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-1", "nop-1-default-executor") + require.NoError(t, err, "nop-1 executor job spec should exist") + + // nop-2 job spec should be PRESERVED (not deleted) since nop-2 was not in scope + nop2JobSpec, err := deployments.GetNOPJobSpec(outputSealed, "nop-2", "nop-2-default-executor") + require.NoError(t, err, "nop-2 executor job spec should be preserved when not in scope") + assert.Equal(t, "nop-2-job-spec", nop2JobSpec, "nop-2 job spec should be unchanged") +} + +func createTestEnvConfig(t *testing.T) string { + t.Helper() + + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "env.toml") + + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + PyroscopeURL: "http://pyroscope:4040", + Monitoring: deployments.MonitoringConfig{ + Enabled: true, + Type: "beholder", + Beholder: deployments.BeholderConfig{ + InsecureConnection: true, + OtelExporterHTTPEndpoint: "otel:4318", + MetricReaderInterval: 5, + TraceSampleRatio: 1.0, + TraceBatchTimeout: 10, + }, + }, + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One", SignerAddress: "0xABCDEF1234567890ABCDEF1234567890ABCDEF12"}, + "nop-2": {Alias: "nop-2", Name: "NOP Two", SignerAddress: "0x1234567890ABCDEF1234567890ABCDEF12345678"}, + }, + Committees: map[string]deployments.CommitteeConfig{ + "test-committee": { + Qualifier: "test-committee", + VerifierVersion: "1.7.0", + ChainConfigs: map[string]deployments.ChainCommitteeConfig{ + "16015286601757825753": {NOPAliases: []string{"nop-1", "nop-2"}, Threshold: 2}, + }, + Aggregators: []deployments.AggregatorConfig{ + {Name: "instance-1", Address: "aggregator-1:443"}, + }, + }, + }, + }, + ExecutorPools: map[string]deployments.ExecutorPoolConfig{ + "default": { + NOPAliases: []string{"nop-1", "nop-2"}, + ExecutionInterval: 15 * time.Second, + }, + }, + } + + err := deployments.WriteEnvConfig(configPath, cfg) + require.NoError(t, err) + + return configPath +} + +func createExecutorTestEnvironment(t *testing.T) deployment.Environment { + t.Helper() + + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + return env +} diff --git a/deployments/changesets/generate_indexer_config.go b/deployments/changesets/generate_indexer_config.go index 0614da6c..8be5327a 100644 --- a/deployments/changesets/generate_indexer_config.go +++ b/deployments/changesets/generate_indexer_config.go @@ -13,13 +13,23 @@ import ( "github.com/smartcontractkit/chainlink-ccv/deployments/sequences" ) -// GenerateIndexerConfigCfg is an alias for the changeset input type. -type GenerateIndexerConfigCfg = idxconfig.BuildConfigInput +// GenerateIndexerConfigCfg contains the configuration for the generate indexer config changeset. +type GenerateIndexerConfigCfg struct { + // ServiceIdentifier is the identifier for this indexer service (e.g. "default-indexer") + ServiceIdentifier string + // VerifierNameToQualifier maps verifier names (matching VerifierConfig.Name) to qualifiers + // used for looking up addresses in the datastore. + VerifierNameToQualifier map[string]string + // ChainSelectors are the source chains the indexer will monitor. + // If empty, defaults to all chain selectors available in the environment. + ChainSelectors []uint64 +} // GenerateIndexerConfig creates a changeset that generates the indexer configuration -// by scanning on-chain CommitteeVerifier contracts. -func GenerateIndexerConfig() deployment.ChangeSetV2[idxconfig.BuildConfigInput] { - validate := func(e deployment.Environment, cfg idxconfig.BuildConfigInput) error { +// by scanning on-chain CommitteeVerifier contracts. It generates one entry per verifier name +// with all IssuerAddresses (resolver addresses) for that verifier across all chains. +func GenerateIndexerConfig() deployment.ChangeSetV2[GenerateIndexerConfigCfg] { + validate := func(e deployment.Environment, cfg GenerateIndexerConfigCfg) error { if cfg.ServiceIdentifier == "" { return fmt.Errorf("service identifier is required") } @@ -32,14 +42,22 @@ func GenerateIndexerConfig() deployment.ChangeSetV2[idxconfig.BuildConfigInput] return fmt.Errorf("selector %d is not available in environment", s) } } + return nil } - apply := func(e deployment.Environment, cfg idxconfig.BuildConfigInput) (deployment.ChangesetOutput, error) { - input := cfg - if len(input.ChainSelectors) == 0 { - input.ChainSelectors = e.BlockChains.ListChainSelectors() + apply := func(e deployment.Environment, cfg GenerateIndexerConfigCfg) (deployment.ChangesetOutput, error) { + selectors := cfg.ChainSelectors + if len(selectors) == 0 { + selectors = e.BlockChains.ListChainSelectors() + } + + input := idxconfig.BuildConfigInput{ + ServiceIdentifier: cfg.ServiceIdentifier, + VerifierNameToQualifier: cfg.VerifierNameToQualifier, + ChainSelectors: selectors, } + deps := sequences.GenerateIndexerConfigDeps{ Env: e, } @@ -61,6 +79,7 @@ func GenerateIndexerConfig() deployment.ChangeSetV2[idxconfig.BuildConfigInput] } idxCfg := idxconfig.GeneratedVerifiersToGeneratedConfig(report.Output.Verifiers) + if err := deployments.SaveIndexerConfig(outputDS, report.Output.ServiceIdentifier, idxCfg); err != nil { return deployment.ChangesetOutput{ Reports: report.ExecutionReports, diff --git a/deployments/changesets/generate_verifier_config.go b/deployments/changesets/generate_verifier_config.go new file mode 100644 index 00000000..77ec25f5 --- /dev/null +++ b/deployments/changesets/generate_verifier_config.go @@ -0,0 +1,239 @@ +package changesets + +import ( + "fmt" + "slices" + "strings" + + "github.com/BurntSushi/toml" + + "github.com/smartcontractkit/chainlink-deployments-framework/datastore" + "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + "github.com/smartcontractkit/chainlink-deployments-framework/operations" + + "github.com/smartcontractkit/chainlink-ccv/deployments" + "github.com/smartcontractkit/chainlink-ccv/deployments/sequences" + "github.com/smartcontractkit/chainlink-ccv/verifier" + "github.com/smartcontractkit/chainlink-ccv/verifier/commit" +) + +// GenerateVerifierConfigCfg is the configuration for the generate verifier config changeset. +type GenerateVerifierConfigCfg struct { + EnvConfigPath string + CommitteeQualifier string + ExecutorQualifier string + ChainSelectors []uint64 + NOPAliases []string +} + +// GenerateVerifierConfig creates a changeset that generates verifier configurations +// for NOPs that are part of committees. It iterates over specified NOPs (or all if empty) +// and generates a job spec for each (NOP, committee, aggregator) combination for HA support. +// The SignerAddress for each NOP is read from the NOPConfig in the EnvConfig. +func GenerateVerifierConfig() deployment.ChangeSetV2[GenerateVerifierConfigCfg] { + validate := func(e deployment.Environment, cfg GenerateVerifierConfigCfg) error { + if cfg.EnvConfigPath == "" { + return fmt.Errorf("env config path is required") + } + if cfg.CommitteeQualifier == "" { + return fmt.Errorf("committee qualifier is required") + } + + envCfg, err := deployments.LoadEnvConfig(cfg.EnvConfigPath) + if err != nil { + return fmt.Errorf("failed to load env config: %w", err) + } + + if _, ok := envCfg.NOPTopology.Committees[cfg.CommitteeQualifier]; !ok { + return fmt.Errorf("committee %q not found in env config", cfg.CommitteeQualifier) + } + + nopAliases := cfg.NOPAliases + if len(nopAliases) == 0 { + nopAliases, err = envCfg.GetNOPsForCommittee(cfg.CommitteeQualifier) + if err != nil { + return fmt.Errorf("failed to get NOPs for committee: %w", err) + } + } + + for _, alias := range nopAliases { + nop, ok := envCfg.NOPTopology.NOPs[alias] + if !ok { + return fmt.Errorf("NOP alias %q not found in env config", alias) + } + if nop.SignerAddress == "" { + return fmt.Errorf("NOP %q missing signer_address in env config", alias) + } + } + + envSelectors := e.BlockChains.ListChainSelectors() + for _, s := range cfg.ChainSelectors { + if !slices.Contains(envSelectors, s) { + return fmt.Errorf("selector %d is not available in environment", s) + } + } + return nil + } + + apply := func(e deployment.Environment, cfg GenerateVerifierConfigCfg) (deployment.ChangesetOutput, error) { + envCfg, err := deployments.LoadEnvConfig(cfg.EnvConfigPath) + if err != nil { + return deployment.ChangesetOutput{}, fmt.Errorf("failed to load env config: %w", err) + } + + selectors := cfg.ChainSelectors + if len(selectors) == 0 { + selectors = e.BlockChains.ListChainSelectors() + } + + deps := sequences.GenerateVerifierConfigDeps{ + Env: e, + } + + input := sequences.GenerateVerifierConfigInput{ + CommitteeQualifier: cfg.CommitteeQualifier, + ExecutorQualifier: cfg.ExecutorQualifier, + ChainSelectors: selectors, + } + + report, err := operations.ExecuteSequence(e.OperationsBundle, sequences.GenerateVerifierConfig, deps, input) + if err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to generate verifier config: %w", err) + } + + committee := envCfg.NOPTopology.Committees[cfg.CommitteeQualifier] + + nopAliases := cfg.NOPAliases + if len(nopAliases) == 0 { + nopsForCommittee, err := envCfg.GetNOPsForCommittee(cfg.CommitteeQualifier) + if err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to get NOPs for committee: %w", err) + } + nopAliases = nopsForCommittee + } + + outputDS := datastore.NewMemoryDataStore() + if e.DataStore != nil { + if err := outputDS.Merge(e.DataStore); err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to merge existing datastore: %w", err) + } + } + + // Track expected NOPs and job spec IDs for cleanup + expectedNOPs := make(map[string]bool) + expectedJobSpecIDs := make(map[string]bool) + verifierSuffix := fmt.Sprintf("-%s-verifier", committee.Qualifier) + + for _, nopAlias := range nopAliases { + nop := envCfg.NOPTopology.NOPs[nopAlias] + expectedNOPs[nopAlias] = true + + for _, agg := range committee.Aggregators { + verifierID := fmt.Sprintf("%s-%s-verifier", agg.Name, committee.Qualifier) + expectedJobSpecIDs[verifierID] = true + + verifierCfg := commit.Config{ + VerifierID: verifierID, + AggregatorAddress: agg.Address, + InsecureAggregatorConnection: agg.InsecureAggregatorConnection, + SignerAddress: nop.SignerAddress, + PyroscopeURL: envCfg.PyroscopeURL, + CommitteeVerifierAddresses: report.Output.Config.CommitteeVerifierAddresses, + OnRampAddresses: report.Output.Config.OnRampAddresses, + DefaultExecutorOnRampAddresses: report.Output.Config.DefaultExecutorOnRampAddresses, + RMNRemoteAddresses: report.Output.Config.RMNRemoteAddresses, + Monitoring: convertVerifierMonitoringConfig(envCfg.Monitoring), + } + + configBytes, err := toml.Marshal(verifierCfg) + if err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to marshal verifier config to TOML for NOP %q aggregator %q: %w", nopAlias, agg.Name, err) + } + + jobSpec := fmt.Sprintf(`schemaVersion = 1 +type = "ccvcommitteeverifier" +committeeVerifierConfig = """ +%s""" +`, string(configBytes)) + + if err := deployments.SaveNOPJobSpec(outputDS, nopAlias, verifierID, jobSpec); err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to save verifier job spec for NOP %q aggregator %q: %w", nopAlias, agg.Name, err) + } + } + } + + // Clean up orphaned verifier job specs for this committee + // When NOPAliases is explicitly set, only clean up those specific NOPs (scoped mode) + // When NOPAliases is empty, clean up all NOPs in the datastore (full sync mode) + scopedCleanup := len(cfg.NOPAliases) > 0 + scopedNOPs := make(map[string]bool) + if scopedCleanup { + for _, nopAlias := range cfg.NOPAliases { + scopedNOPs[nopAlias] = true + } + } + + allNOPJobSpecs, err := deployments.GetAllNOPJobSpecs(outputDS.Seal()) + if err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to get all NOP job specs for cleanup: %w", err) + } + + for nopAlias, jobSpecs := range allNOPJobSpecs { + // In scoped mode, only cleanup NOPs that were explicitly specified + if scopedCleanup && !scopedNOPs[nopAlias] { + continue + } + for jobSpecID := range jobSpecs { + // Check if this job spec matches the pattern for this committee's verifier + if !strings.HasSuffix(jobSpecID, verifierSuffix) { + continue + } + // Delete if: 1) job spec ID is not expected, OR 2) NOP is not expected to have verifier jobs + shouldDelete := !expectedJobSpecIDs[jobSpecID] || !expectedNOPs[nopAlias] + if shouldDelete { + if err := deployments.DeleteNOPJobSpec(outputDS, nopAlias, jobSpecID); err != nil { + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + }, fmt.Errorf("failed to delete orphaned verifier job spec %q for NOP %q: %w", jobSpecID, nopAlias, err) + } + } + } + } + + return deployment.ChangesetOutput{ + Reports: report.ExecutionReports, + DataStore: outputDS, + }, nil + } + + return deployment.CreateChangeSet(apply, validate) +} + +func convertVerifierMonitoringConfig(cfg deployments.MonitoringConfig) verifier.MonitoringConfig { + return verifier.MonitoringConfig{ + Enabled: cfg.Enabled, + Type: cfg.Type, + Beholder: verifier.BeholderConfig{ + InsecureConnection: cfg.Beholder.InsecureConnection, + CACertFile: cfg.Beholder.CACertFile, + OtelExporterGRPCEndpoint: cfg.Beholder.OtelExporterGRPCEndpoint, + OtelExporterHTTPEndpoint: cfg.Beholder.OtelExporterHTTPEndpoint, + LogStreamingEnabled: cfg.Beholder.LogStreamingEnabled, + MetricReaderInterval: cfg.Beholder.MetricReaderInterval, + TraceSampleRatio: cfg.Beholder.TraceSampleRatio, + TraceBatchTimeout: cfg.Beholder.TraceBatchTimeout, + }, + } +} diff --git a/deployments/changesets/generate_verifier_config_test.go b/deployments/changesets/generate_verifier_config_test.go new file mode 100644 index 00000000..2749a485 --- /dev/null +++ b/deployments/changesets/generate_verifier_config_test.go @@ -0,0 +1,668 @@ +package changesets_test + +import ( + "os" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + chainsel "github.com/smartcontractkit/chain-selectors" + "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/committee_verifier" + execcontract "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/executor" + onrampoperations "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/onramp" + "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/rmn_remote" + "github.com/smartcontractkit/chainlink-deployments-framework/datastore" + "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + + "github.com/smartcontractkit/chainlink-ccv/deployments" + "github.com/smartcontractkit/chainlink-ccv/deployments/changesets" + "github.com/smartcontractkit/chainlink-ccv/deployments/testutils" +) + +const testDefaultQualifier = "default" + +func TestGenerateVerifierConfig_ValidatesEnvConfigPath(t *testing.T) { + changeset := changesets.GenerateVerifierConfig() + + env := createVerifierTestEnvironment(t) + + err := changeset.VerifyPreconditions(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: "", + CommitteeQualifier: "test-committee", + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "env config path is required") +} + +func TestGenerateVerifierConfig_ValidatesCommitteeQualifier(t *testing.T) { + changeset := changesets.GenerateVerifierConfig() + + env := createVerifierTestEnvironment(t) + envConfigPath := createVerifierTestEnvConfig(t) + + err := changeset.VerifyPreconditions(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: "", + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "committee qualifier is required") +} + +func TestGenerateVerifierConfig_ValidatesNOPSignerAddress(t *testing.T) { + changeset := changesets.GenerateVerifierConfig() + + env := createVerifierTestEnvironment(t) + envConfigPath := createEnvConfigWithoutSignerAddress(t) + + err := changeset.VerifyPreconditions(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: "test-committee", + }) + require.Error(t, err) + assert.Contains(t, err.Error(), `missing signer_address in env config`) +} + +func TestGenerateVerifierConfig_ValidatesCommitteeExists(t *testing.T) { + changeset := changesets.GenerateVerifierConfig() + + env := createVerifierTestEnvironment(t) + envConfigPath := createVerifierTestEnvConfig(t) + + err := changeset.VerifyPreconditions(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: "unknown-committee", + }) + require.Error(t, err) + assert.Contains(t, err.Error(), `committee "unknown-committee" not found in env config`) +} + +func TestGenerateVerifierConfig_ValidatesChainSelectors(t *testing.T) { + changeset := changesets.GenerateVerifierConfig() + + env := createVerifierTestEnvironment(t) + envConfigPath := createVerifierTestEnvConfig(t) + + err := changeset.VerifyPreconditions(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: "test-committee", + ChainSelectors: []uint64{1234}, + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "selector 1234 is not available in environment") +} + +func TestGenerateVerifierConfig_GeneratesCorrectJobSpec(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + committee := testCommittee + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + sel1Str := strconv.FormatUint(sel1, 10) + sel2Str := strconv.FormatUint(sel2, 10) + + committeeVerifierAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + committeeVerifierAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + onRampAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + onRampAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + rmnAddr1 := common.HexToAddress("0x7777777777777777777777777777777777777777") + rmnAddr2 := common.HexToAddress("0x8888888888888888888888888888888888888888") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, committee, committee_verifier.ResolverType, committeeVerifierAddr1) + addContractToDatastore(t, ds, sel2, committee, committee_verifier.ResolverType, committeeVerifierAddr2) + addContractToDatastore(t, ds, sel1, "", onrampoperations.ContractType, onRampAddr1) + addContractToDatastore(t, ds, sel2, "", onrampoperations.ContractType, onRampAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + + env.DataStore = ds.Seal() + + envConfigPath := createVerifierTestEnvConfig(t) + + cs := changesets.GenerateVerifierConfig() + output, err := cs.Apply(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: committee, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + NOPAliases: []string{"nop-1"}, + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + jobSpec, err := deployments.GetNOPJobSpec(output.DataStore.Seal(), "nop-1", "instance-1-test-committee-verifier") + require.NoError(t, err) + + assert.Contains(t, jobSpec, `schemaVersion = 1`) + assert.Contains(t, jobSpec, `type = "ccvcommitteeverifier"`) + assert.Contains(t, jobSpec, `committeeVerifierConfig = """`) + assert.Contains(t, jobSpec, `verifier_id = "instance-1-test-committee-verifier"`) + assert.Contains(t, jobSpec, `aggregator_address = "aggregator-1:443"`) + assert.Contains(t, jobSpec, `signer_address = "0xABCDEF1234567890ABCDEF1234567890ABCDEF12"`) + assert.Contains(t, jobSpec, `pyroscope_url = "http://pyroscope:4040"`) + + assert.Contains(t, jobSpec, `[committee_verifier_addresses]`) + assert.Contains(t, jobSpec, sel1Str) + assert.Contains(t, jobSpec, sel2Str) + assert.True(t, strings.Contains(jobSpec, committeeVerifierAddr1.Hex()) || strings.Contains(jobSpec, strings.ToLower(committeeVerifierAddr1.Hex()))) + + assert.Contains(t, jobSpec, `[on_ramp_addresses]`) + assert.Contains(t, jobSpec, `[default_executor_on_ramp_addresses]`) + assert.Contains(t, jobSpec, `[rmn_remote_addresses]`) + + assert.Contains(t, jobSpec, `[monitoring]`) + assert.Contains(t, jobSpec, `Enabled = true`) + assert.Contains(t, jobSpec, `Type = "beholder"`) +} + +func addContractToDatastore(t *testing.T, ds *datastore.MemoryDataStore, selector uint64, qualifier string, contractType deployment.ContractType, addr common.Address) { + t.Helper() + err := ds.Addresses().Add(datastore.AddressRef{ + ChainSelector: selector, + Qualifier: qualifier, + Type: datastore.ContractType(contractType), + Address: addr.Hex(), + }) + require.NoError(t, err) +} + +func TestGenerateVerifierConfig_PreservesExistingConfigs(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + committee := testCommittee + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + + committeeVerifierAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + committeeVerifierAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + onRampAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + onRampAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + rmnAddr1 := common.HexToAddress("0x7777777777777777777777777777777777777777") + rmnAddr2 := common.HexToAddress("0x8888888888888888888888888888888888888888") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, committee, committee_verifier.ResolverType, committeeVerifierAddr1) + addContractToDatastore(t, ds, sel2, committee, committee_verifier.ResolverType, committeeVerifierAddr2) + addContractToDatastore(t, ds, sel1, "", onrampoperations.ContractType, onRampAddr1) + addContractToDatastore(t, ds, sel2, "", onrampoperations.ContractType, onRampAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + + existingExecutorJobSpec := "existing-executor-job-spec-content" + err := deployments.SaveNOPJobSpec(ds, "existing-nop", "existing-executor", existingExecutorJobSpec) + require.NoError(t, err) + + env.DataStore = ds.Seal() + + envConfigPath := createVerifierTestEnvConfig(t) + + cs := changesets.GenerateVerifierConfig() + output, err := cs.Apply(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: committee, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + NOPAliases: []string{"nop-1"}, + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + outputSealed := output.DataStore.Seal() + + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-1", "instance-1-test-committee-verifier") + require.NoError(t, err, "new verifier job spec should be present") + + retrievedExecutorJobSpec, err := deployments.GetNOPJobSpec(outputSealed, "existing-nop", "existing-executor") + require.NoError(t, err, "existing executor job spec should be preserved") + assert.Equal(t, existingExecutorJobSpec, retrievedExecutorJobSpec, "executor job spec should be unchanged") +} + +func TestGenerateVerifierConfig_MultipleAggregatorsPerCommittee(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + committee := testCommittee + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + + committeeVerifierAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + committeeVerifierAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + onRampAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + onRampAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + rmnAddr1 := common.HexToAddress("0x7777777777777777777777777777777777777777") + rmnAddr2 := common.HexToAddress("0x8888888888888888888888888888888888888888") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, committee, committee_verifier.ResolverType, committeeVerifierAddr1) + addContractToDatastore(t, ds, sel2, committee, committee_verifier.ResolverType, committeeVerifierAddr2) + addContractToDatastore(t, ds, sel1, "", onrampoperations.ContractType, onRampAddr1) + addContractToDatastore(t, ds, sel2, "", onrampoperations.ContractType, onRampAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + + env.DataStore = ds.Seal() + + envConfigPath := createEnvConfigWithMultipleAggregators(t) + + cs := changesets.GenerateVerifierConfig() + output, err := cs.Apply(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: committee, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + NOPAliases: []string{"nop-1"}, + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + outputSealed := output.DataStore.Seal() + + // Should generate one job spec per (NOP, aggregator) combination + // With 1 NOP and 3 aggregators, we expect 3 job specs + jobSpec1, err := deployments.GetNOPJobSpec(outputSealed, "nop-1", "agg-primary-test-committee-verifier") + require.NoError(t, err, "job spec for primary aggregator should exist") + assert.Contains(t, jobSpec1, `aggregator_address = "aggregator-primary:443"`) + + jobSpec2, err := deployments.GetNOPJobSpec(outputSealed, "nop-1", "agg-secondary-test-committee-verifier") + require.NoError(t, err, "job spec for secondary aggregator should exist") + assert.Contains(t, jobSpec2, `aggregator_address = "aggregator-secondary:443"`) + + jobSpec3, err := deployments.GetNOPJobSpec(outputSealed, "nop-1", "agg-tertiary-test-committee-verifier") + require.NoError(t, err, "job spec for tertiary aggregator should exist") + assert.Contains(t, jobSpec3, `aggregator_address = "aggregator-tertiary:443"`) + + // All job specs should have the same signer address for the NOP + assert.Contains(t, jobSpec1, `signer_address = "0xABCDEF1234567890ABCDEF1234567890ABCDEF12"`) + assert.Contains(t, jobSpec2, `signer_address = "0xABCDEF1234567890ABCDEF1234567890ABCDEF12"`) + assert.Contains(t, jobSpec3, `signer_address = "0xABCDEF1234567890ABCDEF1234567890ABCDEF12"`) +} + +func TestGenerateVerifierConfig_RemovesOrphanedJobSpecs(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + committee := testCommittee + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + + committeeVerifierAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + committeeVerifierAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + onRampAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + onRampAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + rmnAddr1 := common.HexToAddress("0x7777777777777777777777777777777777777777") + rmnAddr2 := common.HexToAddress("0x8888888888888888888888888888888888888888") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, committee, committee_verifier.ResolverType, committeeVerifierAddr1) + addContractToDatastore(t, ds, sel2, committee, committee_verifier.ResolverType, committeeVerifierAddr2) + addContractToDatastore(t, ds, sel1, "", onrampoperations.ContractType, onRampAddr1) + addContractToDatastore(t, ds, sel2, "", onrampoperations.ContractType, onRampAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + + // Pre-populate with a verifier job spec for a NOP that will be removed from committee + err := deployments.SaveNOPJobSpec(ds, "nop-removed", "instance-1-test-committee-verifier", "old-job-spec") + require.NoError(t, err) + + env.DataStore = ds.Seal() + + // Create config where nop-removed is NOT in the committee (only nop-1, nop-2) + envConfigPath := createVerifierTestEnvConfig(t) + + cs := changesets.GenerateVerifierConfig() + output, err := cs.Apply(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: committee, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + outputSealed := output.DataStore.Seal() + + // The orphaned job spec should be deleted + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-removed", "instance-1-test-committee-verifier") + require.Error(t, err, "orphaned verifier job spec should be deleted") + + // Active NOPs should still have their job specs + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-1", "instance-1-test-committee-verifier") + require.NoError(t, err, "nop-1 verifier job spec should exist") + + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-2", "instance-1-test-committee-verifier") + require.NoError(t, err, "nop-2 verifier job spec should exist") +} + +func TestGenerateVerifierConfig_PreservesOtherCommitteeJobSpecs(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + committee := testCommittee + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + + committeeVerifierAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + committeeVerifierAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + onRampAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + onRampAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + rmnAddr1 := common.HexToAddress("0x7777777777777777777777777777777777777777") + rmnAddr2 := common.HexToAddress("0x8888888888888888888888888888888888888888") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, committee, committee_verifier.ResolverType, committeeVerifierAddr1) + addContractToDatastore(t, ds, sel2, committee, committee_verifier.ResolverType, committeeVerifierAddr2) + addContractToDatastore(t, ds, sel1, "", onrampoperations.ContractType, onRampAddr1) + addContractToDatastore(t, ds, sel2, "", onrampoperations.ContractType, onRampAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + + // Pre-populate with a verifier job spec for a DIFFERENT committee + err := deployments.SaveNOPJobSpec(ds, "nop-1", "instance-1-other-committee-verifier", "other-committee-job-spec") + require.NoError(t, err) + + env.DataStore = ds.Seal() + + envConfigPath := createVerifierTestEnvConfig(t) + + cs := changesets.GenerateVerifierConfig() + output, err := cs.Apply(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: committee, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + NOPAliases: []string{"nop-1"}, + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + outputSealed := output.DataStore.Seal() + + // The job spec for a different committee should be preserved + otherCommitteeJobSpec, err := deployments.GetNOPJobSpec(outputSealed, "nop-1", "instance-1-other-committee-verifier") + require.NoError(t, err, "job spec for other committee should be preserved") + assert.Equal(t, "other-committee-job-spec", otherCommitteeJobSpec) + + // Current committee job spec should exist + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-1", "instance-1-test-committee-verifier") + require.NoError(t, err, "nop-1 test-committee verifier job spec should exist") +} + +func TestGenerateVerifierConfig_ScopedNOPAliasesPreservesOtherNOPs(t *testing.T) { + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + committee := testCommittee + executorQualifier := testDefaultQualifier + + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + sel1, sel2 := selectors[0], selectors[1] + + committeeVerifierAddr1 := common.HexToAddress("0x1111111111111111111111111111111111111111") + committeeVerifierAddr2 := common.HexToAddress("0x2222222222222222222222222222222222222222") + onRampAddr1 := common.HexToAddress("0x3333333333333333333333333333333333333333") + onRampAddr2 := common.HexToAddress("0x4444444444444444444444444444444444444444") + executorAddr1 := common.HexToAddress("0x5555555555555555555555555555555555555555") + executorAddr2 := common.HexToAddress("0x6666666666666666666666666666666666666666") + rmnAddr1 := common.HexToAddress("0x7777777777777777777777777777777777777777") + rmnAddr2 := common.HexToAddress("0x8888888888888888888888888888888888888888") + + ds := datastore.NewMemoryDataStore() + + addContractToDatastore(t, ds, sel1, committee, committee_verifier.ResolverType, committeeVerifierAddr1) + addContractToDatastore(t, ds, sel2, committee, committee_verifier.ResolverType, committeeVerifierAddr2) + addContractToDatastore(t, ds, sel1, "", onrampoperations.ContractType, onRampAddr1) + addContractToDatastore(t, ds, sel2, "", onrampoperations.ContractType, onRampAddr2) + addContractToDatastore(t, ds, sel1, executorQualifier, execcontract.ProxyType, executorAddr1) + addContractToDatastore(t, ds, sel2, executorQualifier, execcontract.ProxyType, executorAddr2) + addContractToDatastore(t, ds, sel1, "", rmn_remote.ContractType, rmnAddr1) + addContractToDatastore(t, ds, sel2, "", rmn_remote.ContractType, rmnAddr2) + + // Pre-populate with verifier job specs for BOTH nop-1 and nop-2 + err := deployments.SaveNOPJobSpec(ds, "nop-1", "instance-1-test-committee-verifier", "nop-1-job-spec") + require.NoError(t, err) + err = deployments.SaveNOPJobSpec(ds, "nop-2", "instance-1-test-committee-verifier", "nop-2-job-spec") + require.NoError(t, err) + + env.DataStore = ds.Seal() + + envConfigPath := createVerifierTestEnvConfig(t) + + // Run the changeset with only nop-1 in scope + cs := changesets.GenerateVerifierConfig() + output, err := cs.Apply(env, changesets.GenerateVerifierConfigCfg{ + EnvConfigPath: envConfigPath, + CommitteeQualifier: committee, + ExecutorQualifier: executorQualifier, + ChainSelectors: selectors, + NOPAliases: []string{"nop-1"}, // Scoped to only nop-1 + }) + require.NoError(t, err) + require.NotNil(t, output.DataStore) + + outputSealed := output.DataStore.Seal() + + // nop-1 job spec should be regenerated + _, err = deployments.GetNOPJobSpec(outputSealed, "nop-1", "instance-1-test-committee-verifier") + require.NoError(t, err, "nop-1 verifier job spec should exist") + + // nop-2 job spec should be PRESERVED (not deleted) since nop-2 was not in scope + nop2JobSpec, err := deployments.GetNOPJobSpec(outputSealed, "nop-2", "instance-1-test-committee-verifier") + require.NoError(t, err, "nop-2 verifier job spec should be preserved when not in scope") + assert.Equal(t, "nop-2-job-spec", nop2JobSpec, "nop-2 job spec should be unchanged") +} + +func createVerifierTestEnvironment(t *testing.T) deployment.Environment { + t.Helper() + + selectors := []uint64{ + chainsel.TEST_90000001.Selector, + chainsel.TEST_90000002.Selector, + } + env, _ := testutils.NewSimulatedEVMEnvironment(t, selectors) + return env +} + +func createVerifierTestEnvConfig(t *testing.T) string { + t.Helper() + + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "env.toml") + + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + PyroscopeURL: "http://pyroscope:4040", + Monitoring: deployments.MonitoringConfig{ + Enabled: true, + Type: "beholder", + Beholder: deployments.BeholderConfig{ + InsecureConnection: true, + OtelExporterHTTPEndpoint: "otel:4318", + MetricReaderInterval: 5, + TraceSampleRatio: 1.0, + TraceBatchTimeout: 10, + }, + }, + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One", SignerAddress: "0xABCDEF1234567890ABCDEF1234567890ABCDEF12"}, + "nop-2": {Alias: "nop-2", Name: "NOP Two", SignerAddress: "0x1234567890ABCDEF1234567890ABCDEF12345678"}, + }, + Committees: map[string]deployments.CommitteeConfig{ + "test-committee": { + Qualifier: "test-committee", + VerifierVersion: "1.7.0", + ChainConfigs: map[string]deployments.ChainCommitteeConfig{ + "16015286601757825753": {NOPAliases: []string{"nop-1", "nop-2"}, Threshold: 2}, + }, + Aggregators: []deployments.AggregatorConfig{ + {Name: "instance-1", Address: "aggregator-1:443"}, + }, + }, + }, + }, + ExecutorPools: map[string]deployments.ExecutorPoolConfig{ + "default": { + NOPAliases: []string{"nop-1", "nop-2"}, + ExecutionInterval: 15 * time.Second, + }, + }, + } + + err := deployments.WriteEnvConfig(configPath, cfg) + require.NoError(t, err) + + return configPath +} + +func createEnvConfigWithoutSignerAddress(t *testing.T) string { + t.Helper() + + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "env.toml") + + // Write raw TOML to avoid validation errors during WriteEnvConfig + // This tests that the changeset validates signer_address presence + configContent := ` +indexer_address = "http://indexer:8100" +pyroscope_url = "http://pyroscope:4040" + +[monitoring] +Enabled = true +Type = "beholder" + +[monitoring.Beholder] +InsecureConnection = true + +[nop_topology.nops.nop-1] +alias = "nop-1" +name = "NOP One" + +[nop_topology.nops.nop-2] +alias = "nop-2" +name = "NOP Two" + +[nop_topology.committees.test-committee] +qualifier = "test-committee" +verifier_version = "1.7.0" + +[nop_topology.committees.test-committee.chain_configs.16015286601757825753] +nop_aliases = ["nop-1", "nop-2"] +threshold = 2 + +[[nop_topology.committees.test-committee.aggregators]] +name = "instance-1" +address = "aggregator-1:443" + +[executor_pools.default] +nop_aliases = ["nop-1", "nop-2"] +execution_interval = "15s" +` + + err := os.WriteFile(configPath, []byte(configContent), 0o600) + require.NoError(t, err) + + return configPath +} + +func createEnvConfigWithMultipleAggregators(t *testing.T) string { + t.Helper() + + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "env.toml") + + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + PyroscopeURL: "http://pyroscope:4040", + Monitoring: deployments.MonitoringConfig{ + Enabled: true, + Type: "beholder", + Beholder: deployments.BeholderConfig{ + InsecureConnection: true, + OtelExporterHTTPEndpoint: "otel:4318", + MetricReaderInterval: 5, + TraceSampleRatio: 1.0, + TraceBatchTimeout: 10, + }, + }, + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One", SignerAddress: "0xABCDEF1234567890ABCDEF1234567890ABCDEF12"}, + "nop-2": {Alias: "nop-2", Name: "NOP Two", SignerAddress: "0x1234567890ABCDEF1234567890ABCDEF12345678"}, + }, + Committees: map[string]deployments.CommitteeConfig{ + "test-committee": { + Qualifier: "test-committee", + VerifierVersion: "1.7.0", + ChainConfigs: map[string]deployments.ChainCommitteeConfig{ + "16015286601757825753": {NOPAliases: []string{"nop-1", "nop-2"}, Threshold: 2}, + }, + Aggregators: []deployments.AggregatorConfig{ + {Name: "agg-primary", Address: "aggregator-primary:443"}, + {Name: "agg-secondary", Address: "aggregator-secondary:443"}, + {Name: "agg-tertiary", Address: "aggregator-tertiary:443"}, + }, + }, + }, + }, + ExecutorPools: map[string]deployments.ExecutorPoolConfig{ + "default": { + NOPAliases: []string{"nop-1", "nop-2"}, + ExecutionInterval: 15 * time.Second, + }, + }, + } + + err := deployments.WriteEnvConfig(configPath, cfg) + require.NoError(t, err) + + return configPath +} diff --git a/deployments/envconfig.go b/deployments/envconfig.go new file mode 100644 index 00000000..2e487d34 --- /dev/null +++ b/deployments/envconfig.go @@ -0,0 +1,270 @@ +package deployments + +import ( + "fmt" + "os" + "slices" + "time" + + "github.com/BurntSushi/toml" +) + +// EnvConfig holds all environment-specific configuration that cannot be inferred +// from the datastore. This serves as the single source of truth for both off-chain +// (job specs) and on-chain (committee contracts) configuration. +type EnvConfig struct { + IndexerAddress string `toml:"indexer_address"` + PyroscopeURL string `toml:"pyroscope_url"` + Monitoring MonitoringConfig `toml:"monitoring"` + NOPTopology NOPTopology `toml:"nop_topology"` + ExecutorPools map[string]ExecutorPoolConfig `toml:"executor_pools"` +} + +// NOPTopology defines the node operator structure and committee membership. +// This is the single source of truth for both off-chain (job specs) and +// on-chain (committee contracts) configuration. +type NOPTopology struct { + NOPs map[string]NOPConfig `toml:"nops"` + Committees map[string]CommitteeConfig `toml:"committees"` +} + +// NOPConfig defines a Node Operator. +// Each NOP runs exactly one node. The NOP alias serves as the node/executor ID. +// For production: SignerAddress is resolved from e.Nodes at deployment time. +// For devenv: SignerAddress can be set directly in the config. +type NOPConfig struct { + Alias string `toml:"alias"` + Name string `toml:"name"` + SignerAddress string `toml:"signer_address,omitempty"` +} + +// CommitteeConfig defines a committee and its per-chain membership. +type CommitteeConfig struct { + Qualifier string `toml:"qualifier"` + VerifierVersion string `toml:"verifier_version"` + ChainConfigs map[string]ChainCommitteeConfig `toml:"chain_configs"` + Aggregators []AggregatorConfig `toml:"aggregators"` +} + +// ChainCommitteeConfig defines committee membership for a specific chain. +type ChainCommitteeConfig struct { + NOPAliases []string `toml:"nop_aliases"` + Threshold uint8 `toml:"threshold"` +} + +// AggregatorConfig defines an aggregator instance for HA setups. +type AggregatorConfig struct { + Name string `toml:"name"` + Address string `toml:"address"` + InsecureAggregatorConnection bool `toml:"insecure_connection"` +} + +// ExecutorPoolConfig defines executor pool membership and configuration. +type ExecutorPoolConfig struct { + NOPAliases []string `toml:"nop_aliases"` + ExecutionInterval time.Duration `toml:"execution_interval"` +} + +// MonitoringConfig provides monitoring configuration shared across services. +type MonitoringConfig struct { + Enabled bool `toml:"Enabled"` + Type string `toml:"Type"` + Beholder BeholderConfig `toml:"Beholder"` +} + +// BeholderConfig wraps OpenTelemetry configuration for the beholder client. +type BeholderConfig struct { + InsecureConnection bool `toml:"InsecureConnection"` + CACertFile string `toml:"CACertFile"` + OtelExporterGRPCEndpoint string `toml:"OtelExporterGRPCEndpoint"` + OtelExporterHTTPEndpoint string `toml:"OtelExporterHTTPEndpoint"` + LogStreamingEnabled bool `toml:"LogStreamingEnabled"` + MetricReaderInterval int64 `toml:"MetricReaderInterval"` + TraceSampleRatio float64 `toml:"TraceSampleRatio"` + TraceBatchTimeout int64 `toml:"TraceBatchTimeout"` +} + +// LoadEnvConfig loads an EnvConfig from a TOML file. +func LoadEnvConfig(path string) (*EnvConfig, error) { + data, err := os.ReadFile(path) //nolint:gosec // G304: path is provided by trusted caller + if err != nil { + return nil, fmt.Errorf("failed to read env config file: %w", err) + } + + var cfg EnvConfig + if err := toml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("failed to parse env config TOML: %w", err) + } + + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("env config validation failed: %w", err) + } + + return &cfg, nil +} + +// WriteEnvConfig writes an EnvConfig to a TOML file. +func WriteEnvConfig(path string, cfg EnvConfig) error { + if err := cfg.Validate(); err != nil { + return fmt.Errorf("env config validation failed: %w", err) + } + + data, err := toml.Marshal(cfg) + if err != nil { + return fmt.Errorf("failed to marshal env config to TOML: %w", err) + } + + if err := os.WriteFile(path, data, 0o600); err != nil { + return fmt.Errorf("failed to write env config file: %w", err) + } + + return nil +} + +// Validate validates the EnvConfig. +func (c *EnvConfig) Validate() error { + if c.IndexerAddress == "" { + return fmt.Errorf("indexer_address is required") + } + + if err := c.NOPTopology.Validate(); err != nil { + return fmt.Errorf("nop_topology validation failed: %w", err) + } + + for poolName, pool := range c.ExecutorPools { + if err := pool.Validate(poolName, c.NOPTopology.NOPs); err != nil { + return fmt.Errorf("executor_pool %q validation failed: %w", poolName, err) + } + } + + return nil +} + +// Validate validates the NOPTopology. +func (t *NOPTopology) Validate() error { + for alias, nop := range t.NOPs { + if nop.Alias != alias { + return fmt.Errorf("NOP alias mismatch: key %q != alias %q", alias, nop.Alias) + } + if nop.Name == "" { + return fmt.Errorf("NOP %q name is required", alias) + } + } + + for qualifier, committee := range t.Committees { + if committee.Qualifier != qualifier { + return fmt.Errorf("committee qualifier mismatch: key %q != qualifier %q", qualifier, committee.Qualifier) + } + if err := committee.Validate(t.NOPs); err != nil { + return fmt.Errorf("committee %q validation failed: %w", qualifier, err) + } + } + + return nil +} + +// Validate validates the CommitteeConfig. +func (c *CommitteeConfig) Validate(nops map[string]NOPConfig) error { + if len(c.Aggregators) == 0 { + return fmt.Errorf("at least one aggregator is required") + } + + for _, agg := range c.Aggregators { + if agg.Name == "" { + return fmt.Errorf("aggregator name is required") + } + if agg.Address == "" { + return fmt.Errorf("aggregator %q address is required", agg.Name) + } + } + + for chainSelector, chainCfg := range c.ChainConfigs { + if len(chainCfg.NOPAliases) == 0 { + return fmt.Errorf("chain %q requires at least one NOP", chainSelector) + } + for _, alias := range chainCfg.NOPAliases { + if _, ok := nops[alias]; !ok { + return fmt.Errorf("chain %q references unknown NOP alias %q", chainSelector, alias) + } + } + if chainCfg.Threshold == 0 { + return fmt.Errorf("chain %q threshold must be greater than 0", chainSelector) + } + if int(chainCfg.Threshold) > len(chainCfg.NOPAliases) { + return fmt.Errorf("chain %q threshold %d exceeds NOP count %d", chainSelector, chainCfg.Threshold, len(chainCfg.NOPAliases)) + } + } + + return nil +} + +// Validate validates the ExecutorPoolConfig. +func (p *ExecutorPoolConfig) Validate(poolName string, nops map[string]NOPConfig) error { + if len(p.NOPAliases) == 0 { + return fmt.Errorf("executor pool requires at least one NOP") + } + + for _, alias := range p.NOPAliases { + if _, ok := nops[alias]; !ok { + return fmt.Errorf("executor pool references unknown NOP alias %q", alias) + } + } + + return nil +} + +// GetNOPsForPool returns the NOP aliases for a given executor pool. +func (c *EnvConfig) GetNOPsForPool(poolName string) ([]string, error) { + pool, ok := c.ExecutorPools[poolName] + if !ok { + return nil, fmt.Errorf("executor pool %q not found", poolName) + } + return pool.NOPAliases, nil +} + +// GetNOPsForCommittee returns the NOP aliases that are members of a committee on any chain. +func (c *EnvConfig) GetNOPsForCommittee(committeeQualifier string) ([]string, error) { + committee, ok := c.NOPTopology.Committees[committeeQualifier] + if !ok { + return nil, fmt.Errorf("committee %q not found", committeeQualifier) + } + + nopSet := make(map[string]struct{}) + for _, chainCfg := range committee.ChainConfigs { + for _, alias := range chainCfg.NOPAliases { + nopSet[alias] = struct{}{} + } + } + + nops := make([]string, 0, len(nopSet)) + for alias := range nopSet { + nops = append(nops, alias) + } + + return nops, nil +} + +// GetCommitteesForNOP returns the committee qualifiers that include a NOP on any chain. +func (c *EnvConfig) GetCommitteesForNOP(nopAlias string) []string { + var committees []string + for qualifier, committee := range c.NOPTopology.Committees { + for _, chainCfg := range committee.ChainConfigs { + if slices.Contains(chainCfg.NOPAliases, nopAlias) { + committees = append(committees, qualifier) + break + } + } + } + return committees +} + +// GetPoolsForNOP returns the executor pool names that include a NOP. +func (c *EnvConfig) GetPoolsForNOP(nopAlias string) []string { + var pools []string + for poolName, pool := range c.ExecutorPools { + if slices.Contains(pool.NOPAliases, nopAlias) { + pools = append(pools, poolName) + } + } + return pools +} diff --git a/deployments/envconfig_test.go b/deployments/envconfig_test.go new file mode 100644 index 00000000..2b879e84 --- /dev/null +++ b/deployments/envconfig_test.go @@ -0,0 +1,325 @@ +package deployments_test + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-ccv/deployments" +) + +func TestLoadEnvConfig_LoadsValidConfig(t *testing.T) { + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "env.toml") + + configContent := ` +indexer_address = "http://indexer:8100" +pyroscope_url = "http://pyroscope:4040" + +[monitoring] +Enabled = true +Type = "beholder" + +[monitoring.Beholder] +InsecureConnection = true +OtelExporterHTTPEndpoint = "otel:4318" +MetricReaderInterval = 5 +TraceSampleRatio = 1.0 +TraceBatchTimeout = 10 + +[executor_pools.default] +nop_aliases = ["nop-1", "nop-2"] +execution_interval = "15s" + +[nop_topology.nops.nop-1] +alias = "nop-1" +name = "NOP One" + +[nop_topology.nops.nop-2] +alias = "nop-2" +name = "NOP Two" + +[nop_topology.committees.default] +qualifier = "default" +verifier_version = "1.7.0" + +[nop_topology.committees.default.chain_configs."16015286601757825753"] +nop_aliases = ["nop-1", "nop-2"] +threshold = 2 + +[[nop_topology.committees.default.aggregators]] +name = "instance-1" +address = "aggregator-1:443" +insecure_connection = false +` + err := os.WriteFile(configPath, []byte(configContent), 0o600) + require.NoError(t, err) + + cfg, err := deployments.LoadEnvConfig(configPath) + require.NoError(t, err) + + assert.Equal(t, "http://indexer:8100", cfg.IndexerAddress) + assert.Equal(t, "http://pyroscope:4040", cfg.PyroscopeURL) + assert.True(t, cfg.Monitoring.Enabled) + assert.Equal(t, "beholder", cfg.Monitoring.Type) + + require.Len(t, cfg.NOPTopology.NOPs, 2) + assert.Equal(t, "NOP One", cfg.NOPTopology.NOPs["nop-1"].Name) + assert.Equal(t, "NOP Two", cfg.NOPTopology.NOPs["nop-2"].Name) + + require.Len(t, cfg.NOPTopology.Committees, 1) + committee := cfg.NOPTopology.Committees["default"] + assert.Equal(t, "default", committee.Qualifier) + require.Len(t, committee.Aggregators, 1) + assert.Equal(t, "instance-1", committee.Aggregators[0].Name) + + require.Len(t, cfg.ExecutorPools, 1) + pool := cfg.ExecutorPools["default"] + assert.Equal(t, []string{"nop-1", "nop-2"}, pool.NOPAliases) + assert.Equal(t, 15*time.Second, pool.ExecutionInterval) +} + +func TestWriteEnvConfig_WritesValidConfig(t *testing.T) { + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "env.toml") + + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + PyroscopeURL: "http://pyroscope:4040", + Monitoring: deployments.MonitoringConfig{ + Enabled: true, + Type: "beholder", + Beholder: deployments.BeholderConfig{ + InsecureConnection: true, + OtelExporterHTTPEndpoint: "otel:4318", + MetricReaderInterval: 5, + TraceSampleRatio: 1.0, + TraceBatchTimeout: 10, + }, + }, + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One"}, + "nop-2": {Alias: "nop-2", Name: "NOP Two"}, + }, + Committees: map[string]deployments.CommitteeConfig{ + "default": { + Qualifier: "default", + VerifierVersion: "1.7.0", + ChainConfigs: map[string]deployments.ChainCommitteeConfig{ + "16015286601757825753": {NOPAliases: []string{"nop-1", "nop-2"}, Threshold: 2}, + }, + Aggregators: []deployments.AggregatorConfig{ + {Name: "instance-1", Address: "aggregator-1:443"}, + }, + }, + }, + }, + ExecutorPools: map[string]deployments.ExecutorPoolConfig{ + "default": { + NOPAliases: []string{"nop-1", "nop-2"}, + ExecutionInterval: 15 * time.Second, + }, + }, + } + + err := deployments.WriteEnvConfig(configPath, cfg) + require.NoError(t, err) + + loaded, err := deployments.LoadEnvConfig(configPath) + require.NoError(t, err) + + assert.Equal(t, cfg.IndexerAddress, loaded.IndexerAddress) + assert.Equal(t, cfg.PyroscopeURL, loaded.PyroscopeURL) + assert.Len(t, loaded.NOPTopology.NOPs, 2) + assert.Len(t, loaded.NOPTopology.Committees, 1) + assert.Len(t, loaded.ExecutorPools, 1) +} + +func TestEnvConfig_GetNOPsForPool(t *testing.T) { + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One"}, + "nop-2": {Alias: "nop-2", Name: "NOP Two"}, + }, + Committees: map[string]deployments.CommitteeConfig{}, + }, + ExecutorPools: map[string]deployments.ExecutorPoolConfig{ + "default": {NOPAliases: []string{"nop-1", "nop-2"}}, + }, + } + + nops, err := cfg.GetNOPsForPool("default") + require.NoError(t, err) + assert.ElementsMatch(t, []string{"nop-1", "nop-2"}, nops) + + _, err = cfg.GetNOPsForPool("nonexistent") + require.Error(t, err) +} + +func TestEnvConfig_GetNOPsForCommittee(t *testing.T) { + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One"}, + "nop-2": {Alias: "nop-2", Name: "NOP Two"}, + "nop-3": {Alias: "nop-3", Name: "NOP Three"}, + }, + Committees: map[string]deployments.CommitteeConfig{ + "default": { + Qualifier: "default", + ChainConfigs: map[string]deployments.ChainCommitteeConfig{ + "123": {NOPAliases: []string{"nop-1", "nop-2"}, Threshold: 2}, + "456": {NOPAliases: []string{"nop-2", "nop-3"}, Threshold: 2}, + }, + Aggregators: []deployments.AggregatorConfig{{Name: "agg", Address: "addr"}}, + }, + }, + }, + ExecutorPools: map[string]deployments.ExecutorPoolConfig{}, + } + + nops, err := cfg.GetNOPsForCommittee("default") + require.NoError(t, err) + assert.ElementsMatch(t, []string{"nop-1", "nop-2", "nop-3"}, nops) + + _, err = cfg.GetNOPsForCommittee("nonexistent") + require.Error(t, err) +} + +func TestEnvConfig_GetCommitteesForNOP(t *testing.T) { + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One"}, + }, + Committees: map[string]deployments.CommitteeConfig{ + "committee-a": { + Qualifier: "committee-a", + ChainConfigs: map[string]deployments.ChainCommitteeConfig{ + "123": {NOPAliases: []string{"nop-1"}, Threshold: 1}, + }, + Aggregators: []deployments.AggregatorConfig{{Name: "agg", Address: "addr"}}, + }, + "committee-b": { + Qualifier: "committee-b", + ChainConfigs: map[string]deployments.ChainCommitteeConfig{ + "456": {NOPAliases: []string{"nop-1"}, Threshold: 1}, + }, + Aggregators: []deployments.AggregatorConfig{{Name: "agg", Address: "addr"}}, + }, + }, + }, + ExecutorPools: map[string]deployments.ExecutorPoolConfig{}, + } + + committees := cfg.GetCommitteesForNOP("nop-1") + assert.ElementsMatch(t, []string{"committee-a", "committee-b"}, committees) + + committees = cfg.GetCommitteesForNOP("nonexistent") + assert.Empty(t, committees) +} + +func TestEnvConfig_GetPoolsForNOP(t *testing.T) { + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One"}, + }, + Committees: map[string]deployments.CommitteeConfig{}, + }, + ExecutorPools: map[string]deployments.ExecutorPoolConfig{ + "pool-a": {NOPAliases: []string{"nop-1"}}, + "pool-b": {NOPAliases: []string{"nop-1", "nop-2"}}, + }, + } + + pools := cfg.GetPoolsForNOP("nop-1") + assert.ElementsMatch(t, []string{"pool-a", "pool-b"}, pools) + + pools = cfg.GetPoolsForNOP("nonexistent") + assert.Empty(t, pools) +} + +func TestEnvConfig_Validate_RequiresIndexerAddress(t *testing.T) { + cfg := deployments.EnvConfig{ + IndexerAddress: "", + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "indexer_address is required") +} + +func TestEnvConfig_Validate_NOPAliasMismatch(t *testing.T) { + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "wrong-key": {Alias: "correct-alias", Name: "NOP"}, + }, + Committees: map[string]deployments.CommitteeConfig{}, + }, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "NOP alias mismatch") +} + +func TestEnvConfig_Validate_CommitteeReferencesUnknownNOP(t *testing.T) { + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One"}, + }, + Committees: map[string]deployments.CommitteeConfig{ + "default": { + Qualifier: "default", + ChainConfigs: map[string]deployments.ChainCommitteeConfig{ + "123": {NOPAliases: []string{"nop-1", "unknown-nop"}, Threshold: 2}, + }, + Aggregators: []deployments.AggregatorConfig{{Name: "agg", Address: "addr"}}, + }, + }, + }, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown NOP alias") +} + +func TestEnvConfig_Validate_ThresholdExceedsNOPCount(t *testing.T) { + cfg := deployments.EnvConfig{ + IndexerAddress: "http://indexer:8100", + NOPTopology: deployments.NOPTopology{ + NOPs: map[string]deployments.NOPConfig{ + "nop-1": {Alias: "nop-1", Name: "NOP One"}, + }, + Committees: map[string]deployments.CommitteeConfig{ + "default": { + Qualifier: "default", + ChainConfigs: map[string]deployments.ChainCommitteeConfig{ + "123": {NOPAliases: []string{"nop-1"}, Threshold: 5}, + }, + Aggregators: []deployments.AggregatorConfig{{Name: "agg", Address: "addr"}}, + }, + }, + }, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "threshold 5 exceeds NOP count 1") +} diff --git a/deployments/go.mod b/deployments/go.mod index 02949307..c2ed80cd 100644 --- a/deployments/go.mod +++ b/deployments/go.mod @@ -10,6 +10,7 @@ replace ( ) require ( + github.com/BurntSushi/toml v1.5.0 github.com/Masterminds/semver/v3 v3.4.0 github.com/ethereum/go-ethereum v1.16.8 github.com/evanphx/json-patch/v5 v5.9.11 @@ -29,12 +30,12 @@ require ( dario.cat/mergo v1.0.2 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/BurntSushi/toml v1.5.0 // indirect github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect github.com/VictoriaMetrics/fastcache v1.13.0 // indirect github.com/XSAM/otelsql v0.37.0 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/aptos-labs/aptos-go-sdk v1.11.0 // indirect github.com/avast/retry-go v3.0.0+incompatible // indirect github.com/avast/retry-go/v4 v4.6.1 // indirect @@ -53,11 +54,15 @@ require ( github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect github.com/btcsuite/btcutil v1.0.2 // indirect github.com/buger/jsonparser v1.1.1 // indirect + github.com/bytedance/sonic v1.11.6 // indirect + github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudevents/sdk-go/binding/format/protobuf/v2 v2.16.1 // indirect github.com/cloudevents/sdk-go/v2 v2.16.1 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect @@ -99,6 +104,8 @@ require ( github.com/gagliardetto/solana-go v1.13.0 // indirect github.com/gagliardetto/treeout v0.1.4 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/gin-gonic/gin v1.10.1 // indirect github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -108,6 +115,7 @@ require ( github.com/go-playground/validator/v10 v10.28.0 // indirect github.com/go-resty/resty/v2 v2.16.5 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect @@ -122,6 +130,7 @@ require ( github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-plugin v1.7.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/yamux v0.1.2 // indirect github.com/hasura/go-graphql-client v0.14.5 // indirect github.com/hdevalence/ed25519consensus v0.2.0 // indirect @@ -176,6 +185,7 @@ require ( github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oapi-codegen/runtime v1.1.2 // indirect github.com/oklog/run v1.2.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect @@ -237,7 +247,9 @@ require ( github.com/tidwall/pretty v1.2.1 // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect github.com/tklauser/numcpus v0.10.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect github.com/urfave/cli/v2 v2.27.7 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect @@ -271,6 +283,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect go.uber.org/zap v1.27.1 // indirect + golang.org/x/arch v0.8.0 // indirect golang.org/x/crypto v0.46.0 // indirect golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc // indirect golang.org/x/net v0.47.0 // indirect diff --git a/deployments/go.sum b/deployments/go.sum index a7beb38c..6bae058d 100644 --- a/deployments/go.sum +++ b/deployments/go.sum @@ -21,6 +21,7 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU= github.com/XSAM/otelsql v0.37.0 h1:ya5RNw028JW0eJW8Ma4AmoKxAYsJSGuNVbC7F1J457A= @@ -30,6 +31,8 @@ github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKS github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/apache/arrow-go/v18 v18.3.1 h1:oYZT8FqONiK74JhlH3WKVv+2NKYoyZ7C2ioD4Dj3ixk= github.com/apache/arrow-go/v18 v18.3.1/go.mod h1:12QBya5JZT6PnBihi5NJTzbACrDGXYkrgjujz3MRQXU= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/aptos-labs/aptos-go-sdk v1.11.0 h1:vIL1hpjECUiu7zMl9Wz6VV8ttXsrDqKUj0HxoeaIER4= github.com/aptos-labs/aptos-go-sdk v1.11.0/go.mod h1:8YvYwRg93UcG6pTStCpZdYiscCtKh51sYfeLgIy/41c= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= @@ -85,6 +88,7 @@ github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHf github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= github.com/block-vision/sui-go-sdk v1.1.2 h1:p9DPfb51mEcTmF0Lx9ORpH+Nh9Rzg4Sv3Pu5gsJZ2AA= github.com/block-vision/sui-go-sdk v1.1.2/go.mod h1:KlibJnwEpWt8qhQkIPxc/2ZE4kwh0Md6LvMHmW5kemA= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= @@ -118,6 +122,10 @@ github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/ github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= +github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= +github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= @@ -135,6 +143,10 @@ github.com/cloudevents/sdk-go/binding/format/protobuf/v2 v2.16.1 h1:nLaJZcVAnaqc github.com/cloudevents/sdk-go/binding/format/protobuf/v2 v2.16.1/go.mod h1:6Q+F2puKpJ6zWv+R02BVnizJICf7++oRT5zwpZQAsbk= github.com/cloudevents/sdk-go/v2 v2.16.1 h1:G91iUdqvl88BZ1GYYr9vScTj5zzXSyEuqbfE63gbu9Q= github.com/cloudevents/sdk-go/v2 v2.16.1/go.mod h1:v/kVOaWjNfbvc6tkhhlkhvLapj8Aa8kvXiH5GiOHCKI= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= @@ -263,6 +275,10 @@ github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8x github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ= +github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY= @@ -347,6 +363,10 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/pyroscope-go v1.2.7 h1:VWBBlqxjyR0Cwk2W6UrE8CdcdD80GOFNutj0Kb1T8ac= +github.com/grafana/pyroscope-go v1.2.7/go.mod h1:o/bpSLiJYYP6HQtvcoVKiE9s5RiNgjYTj1DhiddP2Pc= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/graph-gophers/graphql-go v1.5.0 h1:fDqblo50TEpD0LY7RXk/LFVYEVqo3+tXMNMPSVXA1yc= github.com/graph-gophers/graphql-go v1.5.0/go.mod h1:YtmJZDLbF1YYNrlNAuiO5zAStUWc3XZT07iGsVqe1Os= github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA= @@ -367,6 +387,8 @@ github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshf github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= github.com/hasura/go-graphql-client v0.14.5 h1:M9HxxGLCcDZnxJGYyWXAzDYEpommgjW+sUW3V8EaGms= @@ -461,6 +483,7 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52 h1:msKODTL1m0wigztaqILOtla9HeW1ciscYG4xjLtvk5I= github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52/go.mod h1:qk1sX/IBgppQNcGCRoj90u6EGC056EBoIc1oEjCWla8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -469,8 +492,10 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -568,6 +593,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI= +github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E= github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= @@ -592,6 +619,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= @@ -736,6 +765,7 @@ github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stephenlacy/go-ethereum-hdwallet v0.0.0-20230913225845-a4fa94429863 h1:ba4VRWSkRzgdP5hB5OxexIzBXZbSwgcw8bEu06ivGQI= github.com/stephenlacy/go-ethereum-hdwallet v0.0.0-20230913225845-a4fa94429863/go.mod h1:oPTjPNrRucLv9mU27iNPj6n0CWWcNFhoXFOLVGJwHCA= github.com/streamingfast/logging v0.0.0-20230608130331-f22c91403091 h1:RN5mrigyirb8anBEtdjtHFIufXdacyTi6i4KBfeNXeo= @@ -783,8 +813,12 @@ github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8O github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= @@ -882,6 +916,9 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= +golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= @@ -1138,3 +1175,5 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/deployments/loader.go b/deployments/loader.go index e859b3b6..61d768d4 100644 --- a/deployments/loader.go +++ b/deployments/loader.go @@ -3,6 +3,7 @@ package deployments import ( "encoding/json" "fmt" + "maps" jsonpatch "github.com/evanphx/json-patch/v5" @@ -19,6 +20,9 @@ type OffchainConfigs struct { Aggregators map[string]*model.Committee `json:"aggregators,omitempty"` // Indexers maps service identifier (e.g., "indexer") to generated verifier config. Indexers map[string]*config.GeneratedConfig `json:"indexers,omitempty"` + // NOPJobSpecs maps NOP alias to a map of job spec ID to job spec TOML. + // This groups all job specs (verifier, executor) for a given NOP together. + NOPJobSpecs map[string]map[string]string `json:"nopJobSpecs,omitempty"` } // CCVEnvMetadata represents the expected structure of env_metadata.json for CCV. @@ -129,6 +133,154 @@ func GetIndexerConfig(ds datastore.DataStore, serviceIdentifier string) (*config return cfg, nil } +// SaveNOPJobSpec saves a job spec to the datastore under the given NOP alias and job spec ID. +// This allows grouping all job specs (verifier, executor) for a given NOP together. +func SaveNOPJobSpec(ds datastore.MutableDataStore, nopAlias, jobSpecID, jobSpec string) error { + ccvMeta, err := loadOrCreateCCVEnvMetadata(ds) + if err != nil { + return err + } + + if ccvMeta.OffchainConfigs == nil { + ccvMeta.OffchainConfigs = &OffchainConfigs{} + } + if ccvMeta.OffchainConfigs.NOPJobSpecs == nil { + ccvMeta.OffchainConfigs.NOPJobSpecs = make(map[string]map[string]string) + } + if ccvMeta.OffchainConfigs.NOPJobSpecs[nopAlias] == nil { + ccvMeta.OffchainConfigs.NOPJobSpecs[nopAlias] = make(map[string]string) + } + + ccvMeta.OffchainConfigs.NOPJobSpecs[nopAlias][jobSpecID] = jobSpec + + return saveCCVEnvMetadata(ds, ccvMeta) +} + +// GetNOPJobSpec retrieves a specific job spec from the datastore by NOP alias and job spec ID. +func GetNOPJobSpec(ds datastore.DataStore, nopAlias, jobSpecID string) (string, error) { + ccvMeta, err := loadCCVEnvMetadata(ds) + if err != nil { + return "", err + } + + if ccvMeta.OffchainConfigs == nil || ccvMeta.OffchainConfigs.NOPJobSpecs == nil { + return "", fmt.Errorf("no NOP job specs found") + } + + nopSpecs, ok := ccvMeta.OffchainConfigs.NOPJobSpecs[nopAlias] + if !ok { + return "", fmt.Errorf("no job specs found for NOP %q", nopAlias) + } + + jobSpec, ok := nopSpecs[jobSpecID] + if !ok { + return "", fmt.Errorf("job spec %q not found for NOP %q", jobSpecID, nopAlias) + } + + return jobSpec, nil +} + +// GetNOPJobSpecs retrieves all job specs for a given NOP alias. +func GetNOPJobSpecs(ds datastore.DataStore, nopAlias string) (map[string]string, error) { + ccvMeta, err := loadCCVEnvMetadata(ds) + if err != nil { + return nil, err + } + + if ccvMeta.OffchainConfigs == nil || ccvMeta.OffchainConfigs.NOPJobSpecs == nil { + return nil, fmt.Errorf("no NOP job specs found") + } + + nopSpecs, ok := ccvMeta.OffchainConfigs.NOPJobSpecs[nopAlias] + if !ok { + return nil, fmt.Errorf("no job specs found for NOP %q", nopAlias) + } + + return nopSpecs, nil +} + +// GetAllNOPJobSpecs retrieves all NOP job specs from the datastore. +// Returns a map of NOP alias to map of job spec ID to job spec content. +// Returns an empty map (not error) if no job specs exist. +func GetAllNOPJobSpecs(ds datastore.DataStore) (map[string]map[string]string, error) { + ccvMeta, err := loadCCVEnvMetadata(ds) + if err != nil { + return nil, err + } + + if ccvMeta.OffchainConfigs == nil || ccvMeta.OffchainConfigs.NOPJobSpecs == nil { + return make(map[string]map[string]string), nil + } + + return ccvMeta.OffchainConfigs.NOPJobSpecs, nil +} + +// DeleteNOPJobSpec removes a specific job spec from the datastore by NOP alias and job spec ID. +// Returns nil if the job spec doesn't exist (idempotent delete). +func DeleteNOPJobSpec(ds datastore.MutableDataStore, nopAlias, jobSpecID string) error { + ccvMeta, err := loadOrCreateCCVEnvMetadata(ds) + if err != nil { + return err + } + + if ccvMeta.OffchainConfigs == nil || ccvMeta.OffchainConfigs.NOPJobSpecs == nil { + return nil // Nothing to delete + } + + nopSpecs, ok := ccvMeta.OffchainConfigs.NOPJobSpecs[nopAlias] + if !ok { + return nil // NOP not found, nothing to delete + } + + if _, ok := nopSpecs[jobSpecID]; !ok { + return nil // Job spec not found, nothing to delete + } + + delete(ccvMeta.OffchainConfigs.NOPJobSpecs[nopAlias], jobSpecID) + + // Clean up empty NOP entry + if len(ccvMeta.OffchainConfigs.NOPJobSpecs[nopAlias]) == 0 { + delete(ccvMeta.OffchainConfigs.NOPJobSpecs, nopAlias) + } + + // Use full replacement instead of merge patch to properly handle deletions + return replaceCCVEnvMetadata(ds, ccvMeta) +} + +// replaceCCVEnvMetadata replaces the CCV metadata completely (not merge). +// This is needed for delete operations since JSON Merge Patch doesn't remove missing keys. +func replaceCCVEnvMetadata(ds datastore.MutableDataStore, ccvMeta *CCVEnvMetadata) error { + // Get existing metadata to preserve non-CCV fields + var existingMeta map[string]any + if envMeta, err := ds.EnvMetadata().Get(); err == nil && envMeta.Metadata != nil { + data, err := json.Marshal(envMeta.Metadata) + if err != nil { + return err + } + if err := json.Unmarshal(data, &existingMeta); err != nil { + return err + } + } else { + existingMeta = make(map[string]any) + } + + // Marshal the CCV metadata + ccvData, err := json.Marshal(ccvMeta) + if err != nil { + return err + } + + var ccvMap map[string]any + if err := json.Unmarshal(ccvData, &ccvMap); err != nil { + return err + } + + // Replace CCV-specific fields (offchainConfigs) completely + maps.Copy(existingMeta, ccvMap) + + return ds.EnvMetadata().Set(datastore.EnvMetadata{Metadata: existingMeta}) +} + func loadOrCreateCCVEnvMetadata(ds datastore.MutableDataStore) (*CCVEnvMetadata, error) { envMeta, err := ds.EnvMetadata().Get() if err != nil { diff --git a/deployments/loader_test.go b/deployments/loader_test.go index e0314e00..7c913664 100644 --- a/deployments/loader_test.go +++ b/deployments/loader_test.go @@ -1,105 +1,101 @@ -package deployments_test +package deployments import ( - "encoding/json" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-deployments-framework/datastore" - "github.com/smartcontractkit/chainlink-ccv/aggregator/pkg/model" - "github.com/smartcontractkit/chainlink-ccv/deployments" - "github.com/smartcontractkit/chainlink-ccv/indexer/pkg/config" + "github.com/smartcontractkit/chainlink-deployments-framework/datastore" ) -func TestSaveAggregatorConfig_PreservesUnknownProperties(t *testing.T) { +func TestSaveNOPJobSpec_PreservesOtherJobSpecsForSameNOP(t *testing.T) { ds := datastore.NewMemoryDataStore() - err := ds.EnvMetadata().Set(datastore.EnvMetadata{ - Metadata: map[string]any{ - "ccipConfig": map[string]any{"setting": "enabled"}, - }, - }) + // Save first job spec for nop-1 + err := SaveNOPJobSpec(ds, "nop-1", "nop-1-verifier-1", "verifier-job-spec-1") require.NoError(t, err) - cfg := &model.Committee{ - QuorumConfigs: map[model.SourceSelector]*model.QuorumConfig{}, - } - err = deployments.SaveAggregatorConfig(ds, "test-aggregator", cfg) + // Save second job spec for same nop-1 + err = SaveNOPJobSpec(ds, "nop-1", "nop-1-executor-1", "executor-job-spec-1") require.NoError(t, err) - result := getMetadataAsMap(t, ds) - assert.Contains(t, result, "ccipConfig") - assert.Contains(t, result, "offchainConfigs") + // Verify both job specs exist + jobSpec1, err := GetNOPJobSpec(ds.Seal(), "nop-1", "nop-1-verifier-1") + require.NoError(t, err) + assert.Equal(t, "verifier-job-spec-1", jobSpec1) - ccipConfig, ok := result["ccipConfig"].(map[string]any) - require.True(t, ok) - assert.Equal(t, "enabled", ccipConfig["setting"]) + jobSpec2, err := GetNOPJobSpec(ds.Seal(), "nop-1", "nop-1-executor-1") + require.NoError(t, err) + assert.Equal(t, "executor-job-spec-1", jobSpec2) + + // Verify GetNOPJobSpecs returns both + allSpecs, err := GetNOPJobSpecs(ds.Seal(), "nop-1") + require.NoError(t, err) + assert.Len(t, allSpecs, 2) } -func TestSaveIndexerConfig_PreservesUnknownProperties(t *testing.T) { +func TestSaveNOPJobSpec_PreservesOtherNOPJobSpecs(t *testing.T) { ds := datastore.NewMemoryDataStore() - err := ds.EnvMetadata().Set(datastore.EnvMetadata{ - Metadata: map[string]any{ - "externalConfig": map[string]any{"version": "1.0"}, - }, - }) + // Save job spec for nop-1 + err := SaveNOPJobSpec(ds, "nop-1", "nop-1-verifier", "nop-1-verifier-content") require.NoError(t, err) - cfg := &config.GeneratedConfig{} - err = deployments.SaveIndexerConfig(ds, "test-indexer", cfg) + // Save job spec for nop-2 + err = SaveNOPJobSpec(ds, "nop-2", "nop-2-verifier", "nop-2-verifier-content") require.NoError(t, err) - result := getMetadataAsMap(t, ds) - assert.Contains(t, result, "externalConfig") - assert.Contains(t, result, "offchainConfigs") + // Verify nop-1 job spec still exists + jobSpec1, err := GetNOPJobSpec(ds.Seal(), "nop-1", "nop-1-verifier") + require.NoError(t, err) + assert.Equal(t, "nop-1-verifier-content", jobSpec1) - externalConfig, ok := result["externalConfig"].(map[string]any) - require.True(t, ok) - assert.Equal(t, "1.0", externalConfig["version"]) + // Verify nop-2 job spec exists + jobSpec2, err := GetNOPJobSpec(ds.Seal(), "nop-2", "nop-2-verifier") + require.NoError(t, err) + assert.Equal(t, "nop-2-verifier-content", jobSpec2) } -func TestMultipleSaves_PreservesAllUnknownProperties(t *testing.T) { +func TestSaveNOPJobSpec_UpdatesExistingJobSpec(t *testing.T) { ds := datastore.NewMemoryDataStore() - err := ds.EnvMetadata().Set(datastore.EnvMetadata{ - Metadata: map[string]any{ - "domainA": map[string]any{"a": 1}, - "domainB": map[string]any{"b": 2}, - }, - }) + // Save initial job spec + err := SaveNOPJobSpec(ds, "nop-1", "nop-1-verifier", "original-content") require.NoError(t, err) - aggCfg := &model.Committee{ - QuorumConfigs: map[model.SourceSelector]*model.QuorumConfig{}, - } - err = deployments.SaveAggregatorConfig(ds, "agg", aggCfg) + // Update the same job spec + err = SaveNOPJobSpec(ds, "nop-1", "nop-1-verifier", "updated-content") require.NoError(t, err) - idxCfg := &config.GeneratedConfig{} - err = deployments.SaveIndexerConfig(ds, "idx", idxCfg) + // Verify it was updated + jobSpec, err := GetNOPJobSpec(ds.Seal(), "nop-1", "nop-1-verifier") require.NoError(t, err) - - result := getMetadataAsMap(t, ds) - assert.Contains(t, result, "domainA") - assert.Contains(t, result, "domainB") - assert.Contains(t, result, "offchainConfigs") + assert.Equal(t, "updated-content", jobSpec) } -func getMetadataAsMap(t *testing.T, ds datastore.MutableDataStore) map[string]any { - t.Helper() - envMeta, err := ds.EnvMetadata().Get() - require.NoError(t, err) +func TestSaveNOPJobSpec_PreservesOtherOffchainConfigs(t *testing.T) { + ds := datastore.NewMemoryDataStore() - data, err := json.Marshal(envMeta.Metadata) + // Save an aggregator config first with actual data + err := SaveAggregatorConfig(ds, "test-agg", &model.Committee{ + QuorumConfigs: map[model.SourceSelector]*model.QuorumConfig{ + "12345": {Threshold: 2}, + }, + }) require.NoError(t, err) - var result map[string]any - err = json.Unmarshal(data, &result) + // Save a NOP job spec + err = SaveNOPJobSpec(ds, "nop-1", "nop-1-verifier", "verifier-content") require.NoError(t, err) - return result + // Verify both exist - aggregator config should still be there + cfg, err := GetAggregatorConfig(ds.Seal(), "test-agg") + require.NoError(t, err, "aggregator config should be preserved") + assert.Equal(t, uint8(2), cfg.QuorumConfigs["12345"].Threshold) + + jobSpec, err := GetNOPJobSpec(ds.Seal(), "nop-1", "nop-1-verifier") + require.NoError(t, err) + assert.Equal(t, "verifier-content", jobSpec) } diff --git a/deployments/operations/executor_config/config.go b/deployments/operations/executor_config/config.go new file mode 100644 index 00000000..7b1eff3f --- /dev/null +++ b/deployments/operations/executor_config/config.go @@ -0,0 +1,90 @@ +package executor_config + +import ( + "fmt" + "strconv" + + "github.com/Masterminds/semver/v3" + + execcontract "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/executor" + offrampoperations "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/offramp" + "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/rmn_remote" + "github.com/smartcontractkit/chainlink-ccv/deployments/operations/shared" + "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + "github.com/smartcontractkit/chainlink-deployments-framework/operations" +) + +// ExecutorChainConfig contains the per-chain configuration for the executor. +type ExecutorChainConfig struct { + OffRampAddress string `json:"off_ramp_address"` + RmnAddress string `json:"rmn_address"` + DefaultExecutorAddress string `json:"default_executor_address"` +} + +// ExecutorGeneratedConfig contains the contract addresses resolved from the datastore. +type ExecutorGeneratedConfig struct { + ChainConfigs map[string]ExecutorChainConfig `json:"chain_configs"` +} + +// BuildConfigInput contains the input parameters for building the executor config. +type BuildConfigInput struct { + ExecutorQualifier string + ChainSelectors []uint64 +} + +// BuildConfigOutput contains the generated executor configuration. +type BuildConfigOutput struct { + Config *ExecutorGeneratedConfig +} + +// BuildConfigDeps contains the dependencies for building the executor config. +type BuildConfigDeps struct { + Env deployment.Environment +} + +// BuildConfig is an operation that generates the executor configuration +// by querying the datastore for contract addresses. +var BuildConfig = operations.NewOperation( + "build-executor-config", + semver.MustParse("1.0.0"), + "Builds the executor configuration from datastore contract addresses", + func(b operations.Bundle, deps BuildConfigDeps, input BuildConfigInput) (BuildConfigOutput, error) { + ds := deps.Env.DataStore + + chainConfigs := make(map[string]ExecutorChainConfig) + + for _, chainSelector := range input.ChainSelectors { + chainSelectorStr := strconv.FormatUint(chainSelector, 10) + + offRampAddr, err := shared.ResolveContractAddress( + ds, chainSelector, "", offrampoperations.ContractType) + if err != nil { + return BuildConfigOutput{}, fmt.Errorf("failed to get off ramp address for chain %d: %w", chainSelector, err) + } + + rmnRemoteAddr, err := shared.ResolveContractAddress( + ds, chainSelector, "", rmn_remote.ContractType) + if err != nil { + return BuildConfigOutput{}, fmt.Errorf("failed to get rmn remote address for chain %d: %w", chainSelector, err) + } + + executorAddr, err := shared.ResolveContractAddress( + ds, chainSelector, input.ExecutorQualifier, execcontract.ProxyType) + if err != nil { + return BuildConfigOutput{}, fmt.Errorf("failed to get executor proxy address for chain %d: %w", chainSelector, err) + } + + chainConfigs[chainSelectorStr] = ExecutorChainConfig{ + OffRampAddress: offRampAddr, + RmnAddress: rmnRemoteAddr, + DefaultExecutorAddress: executorAddr, + } + } + + return BuildConfigOutput{ + Config: &ExecutorGeneratedConfig{ + ChainConfigs: chainConfigs, + }, + }, nil + }, +) diff --git a/deployments/operations/indexer_config/config.go b/deployments/operations/indexer_config/config.go index bd523ef7..e7d46e95 100644 --- a/deployments/operations/indexer_config/config.go +++ b/deployments/operations/indexer_config/config.go @@ -63,7 +63,6 @@ var BuildConfig = operations.NewOperation( if err != nil { return BuildConfigOutput{}, fmt.Errorf("failed to get resolver addresses for verifier %q (qualifier %q): %w", name, qualifier, err) } - verifiers = append(verifiers, GeneratedVerifier{ Name: name, IssuerAddresses: addresses, diff --git a/deployments/operations/shared/resolve.go b/deployments/operations/shared/resolve.go new file mode 100644 index 00000000..7a123683 --- /dev/null +++ b/deployments/operations/shared/resolve.go @@ -0,0 +1,36 @@ +package shared + +import ( + "fmt" + + "github.com/smartcontractkit/chainlink-deployments-framework/datastore" + "github.com/smartcontractkit/chainlink-deployments-framework/deployment" +) + +// ResolveContractAddress looks up a single contract address from the datastore +// using the provided chain selector, qualifier, and contract type. +// It returns an error if no contract is found or if multiple contracts are found. +func ResolveContractAddress( + ds datastore.DataStore, + chainSelector uint64, + qualifier string, + contractType deployment.ContractType, +) (string, error) { + refs := ds.Addresses().Filter( + datastore.AddressRefByChainSelector(chainSelector), + datastore.AddressRefByQualifier(qualifier), + datastore.AddressRefByType(datastore.ContractType(contractType)), + ) + + if len(refs) == 0 { + return "", fmt.Errorf("no contract found for chain %d with qualifier %q and type %q", + chainSelector, qualifier, contractType) + } + + if len(refs) > 1 { + return "", fmt.Errorf("multiple contracts found for chain %d with qualifier %q and type %q", + chainSelector, qualifier, contractType) + } + + return refs[0].Address, nil +} diff --git a/deployments/operations/verifier_config/config.go b/deployments/operations/verifier_config/config.go new file mode 100644 index 00000000..4151a0b5 --- /dev/null +++ b/deployments/operations/verifier_config/config.go @@ -0,0 +1,98 @@ +package verifier_config + +import ( + "fmt" + "strconv" + + "github.com/Masterminds/semver/v3" + + "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/committee_verifier" + "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/executor" + onrampoperations "github.com/smartcontractkit/chainlink-ccip/ccv/chains/evm/deployment/v1_7_0/operations/onramp" + "github.com/smartcontractkit/chainlink-ccip/chains/evm/deployment/v1_6_0/operations/rmn_remote" + "github.com/smartcontractkit/chainlink-ccv/deployments/operations/shared" + "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + "github.com/smartcontractkit/chainlink-deployments-framework/operations" +) + +// VerifierGeneratedConfig contains the contract addresses resolved from the datastore. +type VerifierGeneratedConfig struct { + CommitteeVerifierAddresses map[string]string `json:"committee_verifier_addresses"` + OnRampAddresses map[string]string `json:"on_ramp_addresses"` + DefaultExecutorOnRampAddresses map[string]string `json:"default_executor_on_ramp_addresses"` + RMNRemoteAddresses map[string]string `json:"rmn_remote_addresses"` +} + +// BuildConfigInput contains the input parameters for building the verifier config. +type BuildConfigInput struct { + CommitteeQualifier string + ExecutorQualifier string + ChainSelectors []uint64 +} + +// BuildConfigOutput contains the generated verifier configuration. +type BuildConfigOutput struct { + Config *VerifierGeneratedConfig +} + +// BuildConfigDeps contains the dependencies for building the verifier config. +type BuildConfigDeps struct { + Env deployment.Environment +} + +// BuildConfig is an operation that generates the verifier configuration +// by querying the datastore for contract addresses. +var BuildConfig = operations.NewOperation( + "build-verifier-config", + semver.MustParse("1.0.0"), + "Builds the verifier configuration from datastore contract addresses", + func(b operations.Bundle, deps BuildConfigDeps, input BuildConfigInput) (BuildConfigOutput, error) { + ds := deps.Env.DataStore + + committeeVerifierAddresses := make(map[string]string) + onRampAddresses := make(map[string]string) + defaultExecutorOnRampAddresses := make(map[string]string) + rmnRemoteAddresses := make(map[string]string) + + for _, chainSelector := range input.ChainSelectors { + chainSelectorStr := strconv.FormatUint(chainSelector, 10) + + committeeVerifierAddr, err := shared.ResolveContractAddress( + ds, chainSelector, input.CommitteeQualifier, committee_verifier.ResolverType) + if err != nil { + return BuildConfigOutput{}, fmt.Errorf("failed to get committee verifier address for chain %d: %w", chainSelector, err) + } + committeeVerifierAddresses[chainSelectorStr] = committeeVerifierAddr + + onRampAddr, err := shared.ResolveContractAddress( + ds, chainSelector, "", onrampoperations.ContractType) + if err != nil { + return BuildConfigOutput{}, fmt.Errorf("failed to get on ramp address for chain %d: %w", chainSelector, err) + } + onRampAddresses[chainSelectorStr] = onRampAddr + + executorAddr, err := shared.ResolveContractAddress( + ds, chainSelector, input.ExecutorQualifier, executor.ProxyType) + if err != nil { + return BuildConfigOutput{}, fmt.Errorf("failed to get executor proxy address for chain %d: %w", chainSelector, err) + } + defaultExecutorOnRampAddresses[chainSelectorStr] = executorAddr + + rmnRemoteAddr, err := shared.ResolveContractAddress( + ds, chainSelector, "", rmn_remote.ContractType) + if err != nil { + return BuildConfigOutput{}, fmt.Errorf("failed to get rmn remote address for chain %d: %w", chainSelector, err) + } + rmnRemoteAddresses[chainSelectorStr] = rmnRemoteAddr + } + + return BuildConfigOutput{ + Config: &VerifierGeneratedConfig{ + CommitteeVerifierAddresses: committeeVerifierAddresses, + OnRampAddresses: onRampAddresses, + DefaultExecutorOnRampAddresses: defaultExecutorOnRampAddresses, + RMNRemoteAddresses: rmnRemoteAddresses, + }, + }, nil + }, +) diff --git a/deployments/sequences/generate_executor_config.go b/deployments/sequences/generate_executor_config.go new file mode 100644 index 00000000..edbf6bdc --- /dev/null +++ b/deployments/sequences/generate_executor_config.go @@ -0,0 +1,51 @@ +package sequences + +import ( + "fmt" + + "github.com/Masterminds/semver/v3" + + "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + "github.com/smartcontractkit/chainlink-deployments-framework/operations" + + executorconfig "github.com/smartcontractkit/chainlink-ccv/deployments/operations/executor_config" +) + +// GenerateExecutorConfigInput contains the input for the executor config generation sequence. +type GenerateExecutorConfigInput struct { + ExecutorQualifier string + ChainSelectors []uint64 +} + +// GenerateExecutorConfigOutput contains the output of the executor config generation sequence. +type GenerateExecutorConfigOutput struct { + Config *executorconfig.ExecutorGeneratedConfig +} + +// GenerateExecutorConfigDeps contains the dependencies for the sequence. +type GenerateExecutorConfigDeps struct { + Env deployment.Environment +} + +// GenerateExecutorConfig is a sequence that generates the executor configuration +// by querying the datastore for contract addresses. +var GenerateExecutorConfig = operations.NewSequence( + "generate-executor-config", + semver.MustParse("1.0.0"), + "Generates the executor configuration from datastore contract addresses", + func(b operations.Bundle, deps GenerateExecutorConfigDeps, input GenerateExecutorConfigInput) (GenerateExecutorConfigOutput, error) { + result, err := operations.ExecuteOperation(b, executorconfig.BuildConfig, executorconfig.BuildConfigDeps{ + Env: deps.Env, + }, executorconfig.BuildConfigInput{ + ExecutorQualifier: input.ExecutorQualifier, + ChainSelectors: input.ChainSelectors, + }) + if err != nil { + return GenerateExecutorConfigOutput{}, fmt.Errorf("failed to build executor config: %w", err) + } + + return GenerateExecutorConfigOutput{ + Config: result.Output.Config, + }, nil + }, +) diff --git a/deployments/sequences/generate_verifier_config.go b/deployments/sequences/generate_verifier_config.go new file mode 100644 index 00000000..d2769636 --- /dev/null +++ b/deployments/sequences/generate_verifier_config.go @@ -0,0 +1,53 @@ +package sequences + +import ( + "fmt" + + "github.com/Masterminds/semver/v3" + + "github.com/smartcontractkit/chainlink-deployments-framework/deployment" + "github.com/smartcontractkit/chainlink-deployments-framework/operations" + + verifierconfig "github.com/smartcontractkit/chainlink-ccv/deployments/operations/verifier_config" +) + +// GenerateVerifierConfigInput contains the input for the verifier config generation sequence. +type GenerateVerifierConfigInput struct { + CommitteeQualifier string + ExecutorQualifier string + ChainSelectors []uint64 +} + +// GenerateVerifierConfigOutput contains the output of the verifier config generation sequence. +type GenerateVerifierConfigOutput struct { + Config *verifierconfig.VerifierGeneratedConfig +} + +// GenerateVerifierConfigDeps contains the dependencies for the sequence. +type GenerateVerifierConfigDeps struct { + Env deployment.Environment +} + +// GenerateVerifierConfig is a sequence that generates the verifier configuration +// by querying the datastore for contract addresses. +var GenerateVerifierConfig = operations.NewSequence( + "generate-verifier-config", + semver.MustParse("1.0.0"), + "Generates the verifier configuration from datastore contract addresses", + func(b operations.Bundle, deps GenerateVerifierConfigDeps, input GenerateVerifierConfigInput) (GenerateVerifierConfigOutput, error) { + result, err := operations.ExecuteOperation(b, verifierconfig.BuildConfig, verifierconfig.BuildConfigDeps{ + Env: deps.Env, + }, verifierconfig.BuildConfigInput{ + CommitteeQualifier: input.CommitteeQualifier, + ExecutorQualifier: input.ExecutorQualifier, + ChainSelectors: input.ChainSelectors, + }) + if err != nil { + return GenerateVerifierConfigOutput{}, fmt.Errorf("failed to build verifier config: %w", err) + } + + return GenerateVerifierConfigOutput{ + Config: result.Output.Config, + }, nil + }, +)