diff --git a/node_management/config_migration/.gitignore b/node_management/config_migration/.gitignore new file mode 100644 index 00000000..33a954f4 --- /dev/null +++ b/node_management/config_migration/.gitignore @@ -0,0 +1,13 @@ +# Ignore all files in the ruff cache directory +**/.ruff_cache + +**/build/ + +# egg-info +**/*.egg-info/ + +# python cache +**/__pycache__/ + +# pytest +**/.hypothesis/ \ No newline at end of file diff --git a/node_management/config_migration/README.md b/node_management/config_migration/README.md new file mode 100644 index 00000000..aae89263 --- /dev/null +++ b/node_management/config_migration/README.md @@ -0,0 +1,220 @@ + +### Usage Example + + +alias rpc-v8=~/Documents/share/repo/smr-moonshot-testnet/target/devopt/rpc_node +alias rpc-v9=~/Documents/share/repo/smr-moonshot/target/release/rpc_node + +alias supra-v8="~/Documents/share/repo/smr-moonshot-testnet/target/devopt/supra" +alias supra-v9="~/Documents/share/repo/smr-moonshot/target/release/supra" + + +1. Install the `migrate-config` tool + +```sh +pip install . +``` + +2. Migrate rpc + +```sh +# Migrate rpc config from v7 to v9 +migrate-config rpc -p v7-v9 -f config.toml -t config.toml +# Migrate db from v7 to v8 +rpc-v8 migrate-db config.toml +# Migrate db from v8 to v9 +rpc-v9 migrate-db config.toml +``` + +3. Migrate smr/validator + +```sh +# Migrate cli profile from v7 to v8 +supra-v8 migrate --network localnet +cp validator_identity.pem node_identity.pem +# Migrate cli profile from v8 to v9 +supra-v9 profile migrate + +# Migrate smr_settings from v7 to v9 +migrate-config smr -p v7-v9 -f smr_settings.toml -t smr_settings.toml +# Migrate db from v7 to v9 +supra-v9 data migrate -p smr_settings.toml +``` + + +### Migrate from v7 to v9 example flow + +#### RPC + +1. Config migration + +$ migrate-config rpc -p v7-v9 -f config.toml -t config.toml + + Running migration function: migrate_v7_to_v9 + + Scanning root level configuration ... + ✓ `consensus_access_tokens` not found in original config, using new version's default value: [] + `allowed_origin = block_to_tx_ordered: Migrated 8537 records, up to 239 block height + certified_block -> certified_block_dehydrated: Migrated 244 records, up to 244 block height + databases_checked: + - chain_store + - archive + +#### SMR + +1. Profile/Identity migration + +**You need both v8 and v9 supra binary to run the profile migratino.** + +$ supra-v8 migrate --network mainnet + +$ cp validator_identity.pem node_identity.pem + +$ supra-v9 profile migrate + +2. Config migration + +$ migrate-config smr -p v7-v9 -f smr_settings.toml -t smr_settings.toml + + Running migration function: migrate_v7_to_v9 + + Scanning node root configuration ... + `connection_refresh_timeout_sec = 2` is not recommended for new version. + Do you want to apply the recommended config: `connection_refresh_timeout_sec = 1`? [y/N]: y + ✓ Apply recommended config: `connection_refresh_timeout_sec = 1` + + Scanning ledger configuration ... + ✓ `enable_pruning` not found in original config, using new version's default value: True + ✓ `enable_snapshots` not found in original config, using new version's default value: False + + Scanning chain store configuration ... + ✓ `enable_snapshots` not found in original config, using new version's default value: False + + Scanning prune configuration ... + + Scanning mempool configuration ... + `max_batch_delay_ms = 1500` is not recommended for new version. + Do you want to apply the recommended config: `max_batch_delay_ms = 500`? [y/N]: y + ✓ Apply recommended config: `max_batch_delay_ms = 500` + + Scanning moonshot configuration ... + `message_recency_bound_rounds = 20` is not recommended for new version. + Do you want to apply the recommended config: `message_recency_bound_rounds = 1000`? [y/N]: y + ✓ Apply recommended config: `message_recency_bound_rounds = 1000` + `sync_retry_delay_ms = 2000` is not recommended for new version. + Do you want to apply the recommended config: `sync_retry_delay_ms = 1000`? [y/N]: y + ✓ Apply recommended config: `sync_retry_delay_ms = 1000` + `timeout_delay_ms = 5000` is not recommended for new version. + Do you want to apply the recommended config: `timeout_delay_ms = 3500`? [y/N]: y + ✓ Apply recommended config: `timeout_delay_ms = 3500` + Writing new config to /tmp/new_smr.toml + |----------------- Begin diff v7 vs v9 -----------------| + --- v7 + +++ v9 + .. + |----------------- End diff v7 vs v9 -----------------| + + ###################################################################### + # Config migrated from tests/smr_settings_v7.1.x.toml to /tmp/new_smr.toml. + # + # Please review the diff above for changes made during migration. + # + # Please ensure to use the new config file for target binary version. + ###################################################################### + +3. DB migration + +**You need v9 supra binary to run the db migration.** + +$ supra-v9 data migrate -p smr_settings.toml + + Counting the number of entries in certified_block... + Migrating certified_block to certified_block_dehydrated: [00:00:00] ████████████████████ 69/69 00:00:00 + Counting the number of entries in uncommitted_block... + Preparing to clean up uncommitted_block: [00:00:00] ████████████████████ 74/74 00:00:00 + Cleaning up uncommitted_block: [00:00:00] ████████████████████ 4/4 00:00:00 + Counting the number of entries in certified_block... + Counting the number of entries in certified_block_dehydrated... + Counting the number of entries in uncommitted_block... + Counting the number of entries in qc... + Verifying certified_block_dehydrated: [00:00:00] ████████████████████ 70/70 00:00:00 + Counting the number of entries to remove from prune_index... + Cleaning up prune index: [00:00:00] ████████████████████ 70/70 00:00:00 + dropped: + - certified_block + migrated: + certified_block -> certified_block_dehydrated: Migrated 70 records, up to 244 block height + databases_checked: + - chain_store + + +### Test harness of migrate-config script + +`PYTHONPATH=src pytest` \ No newline at end of file diff --git a/node_management/config_migration/pyproject.toml b/node_management/config_migration/pyproject.toml new file mode 100644 index 00000000..abc3a956 --- /dev/null +++ b/node_management/config_migration/pyproject.toml @@ -0,0 +1,27 @@ +[project] +name = "migrate-config" +version = "0.1.0" +description = "Unified CLI tool to migrate Supra RPC and SMR configs." +readme = "README.md" +requires-python = ">=3.10" +dependencies = [ + "tomlkit>=0.13.2", + "click>=8.0.0" +] + +[project.scripts] +migrate-config = "cli.main:main" + +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools] +package-dir = {"" = "src"} + +[tool.setuptools.packages.find] +where = ["src"] + +[tool.setuptools.package-data] +rpc_config = ["rpc_config_v9_1_x_mainnet_template.toml"] +smr_settings = ["smr_settings_v9_1_x_mainnet_template.toml"] diff --git a/node_management/config_migration/src/cli/__init__.py b/node_management/config_migration/src/cli/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/node_management/config_migration/src/cli/main.py b/node_management/config_migration/src/cli/main.py new file mode 100644 index 00000000..29efbfe5 --- /dev/null +++ b/node_management/config_migration/src/cli/main.py @@ -0,0 +1,77 @@ +import click +from rpc_config.migrate_path import RPC_CONFIG_MIGRATE_PATH +from rpc_config.migrate_path import run_migration as migrate_rpc_config +from smr_settings.migrate_path import SMR_SETTINGS_MIGRATE_PATH +from smr_settings.migrate_path import run_migration as migrate_smr_config +import common.globals + + +@click.group() +def main(): + """Migration CLI for Supra configs.""" + + +@main.command() +@click.option( + "--migrate-path", + "-p", + required=True, + type=click.Choice(RPC_CONFIG_MIGRATE_PATH, case_sensitive=True), + help=f"Migration path (choices: {', '.join(RPC_CONFIG_MIGRATE_PATH)})", +) +@click.option( + "--from-file", + "-f", + required=True, + type=click.Path(exists=True), + help="Source config file", +) +@click.option( + "--to-file", "-t", required=True, type=click.Path(), help="Output config file" +) +@click.option( + "--assume-yes", + "-y", + is_flag=True, + default=False, + help="Assume yes for all prompts (default: False)", +) +def rpc(migrate_path, from_file, to_file, assume_yes): + """Migrate RPC config.""" + common.globals.ASSUME_YES = assume_yes + migrate_rpc_config(migrate_path, from_file, to_file) + + +@main.command() +@click.option( + "--migrate-path", + "-p", + required=True, + type=click.Choice(SMR_SETTINGS_MIGRATE_PATH, case_sensitive=True), + help=f"Migration path (choices: {', '.join(SMR_SETTINGS_MIGRATE_PATH)})", +) +@click.option( + "--from-file", + "-f", + required=True, + type=click.Path(exists=True), + help="Source config file", +) +@click.option( + "--to-file", "-t", required=True, type=click.Path(), help="Output config file" +) +@click.option( + "--assume-yes", + "-y", + is_flag=True, + default=False, + help="Assume yes for all prompts (default: False)", +) +def smr(migrate_path, from_file, to_file, assume_yes): + """Migrate SMR config.""" + common.globals.ASSUME_YES = assume_yes + migrate_smr_config(migrate_path, from_file, to_file) + + +if __name__ == "__main__": + main() diff --git a/node_management/config_migration/src/common/__init__ b/node_management/config_migration/src/common/__init__ new file mode 100644 index 00000000..e69de29b diff --git a/node_management/config_migration/src/common/globals.py b/node_management/config_migration/src/common/globals.py new file mode 100644 index 00000000..9b8bd68b --- /dev/null +++ b/node_management/config_migration/src/common/globals.py @@ -0,0 +1 @@ +ASSUME_YES = False diff --git a/node_management/config_migration/src/common/migration.py b/node_management/config_migration/src/common/migration.py new file mode 100644 index 00000000..9c145136 --- /dev/null +++ b/node_management/config_migration/src/common/migration.py @@ -0,0 +1,95 @@ +import typing as ty +import tomlkit +from copy import deepcopy +from . import utils +import shutil +from common.globals import ASSUME_YES + + +class MigrationPathSet: + """ + Base class for migration paths. + """ + + def __init__(self, migrate_paths: ty.Dict[str, ty.List[ty.Callable]]): + self.migrate_paths = migrate_paths + + def get_versions(self, key: str) -> ty.Tuple[str, str]: + """Split the key into from_version and to_version.""" + if key not in self.migrate_paths: + raise ValueError(f"Invalid key: {key}") + from_version, to_version = key.split("-", 1) + return from_version, to_version + + def get_migration_functions(self, key: str) -> ty.List[ty.Callable]: + """Get the migration functions for the given key.""" + if key not in self.migrate_paths: + raise ValueError(f"Unknown migration path: {key}") + return self.migrate_paths[key] + + +class Migration: + """ + Top level migration class that handles the migration of config files. + """ + + def __init__(self, migrate_path: ty.Dict[str, ty.List[ty.Callable]]): + self.migrate_path = MigrationPathSet(migrate_path) + + def migrate_config(self, migrate_choice: str, from_path: str, to_path: str): + migrate_functions = self.migrate_path.get_migration_functions(migrate_choice) + from_version, to_version = self.migrate_path.get_versions(migrate_choice) + default_backup_path = f"{from_path}_{from_version}.bak" + + with open(from_path, "r") as f: + toml_data = tomlkit.load(f) + + original_toml_data = deepcopy(toml_data) + + for fn in migrate_functions: + print(f"Running migration function: {fn.__name__}") + toml_data = fn(toml_data) + + # Check before overwriting the same file. + if from_path == to_path: + print( + f"Warning: The source and destination paths are the same ({from_path})." + ) + print( + f"A backup of your original config will be saved to: {default_backup_path}" + ) + confirm = utils.prompt_or_assume_yes( + "This will overwrite your original config file. Continue?", ASSUME_YES + ) + if not confirm: + raise SystemExit("Migration aborted by user. No changes were made.") + # Backup old config + print(f"Backing up old config to {default_backup_path}") + shutil.copyfile(from_path, default_backup_path) + + # Write new config + print(f"Writing new config to {to_path}") + with open(to_path, "w") as f: + tomlkit.dump(toml_data, f) + # Print the diff + from_str = tomlkit.dumps(original_toml_data).splitlines(keepends=True) + to_str = tomlkit.dumps(toml_data).splitlines(keepends=True) + + utils.unified_diff( + from_str, + to_str, + fromfile=from_version, + tofile=to_version, + ) + + print( + f""" + ###################################################################### + # Config migrated from {from_path} to {to_path}. + # + # Please review the diff above for changes made during migration. + # + # Please ensure to use the new config file for target binary version. + ###################################################################### + """ + ) diff --git a/node_management/config_migration/src/common/utils.py b/node_management/config_migration/src/common/utils.py new file mode 100644 index 00000000..675a5afb --- /dev/null +++ b/node_management/config_migration/src/common/utils.py @@ -0,0 +1,86 @@ +import difflib +import tomlkit + + +def unified_diff(from_str, to_str, fromfile, tofile): + print(f"|----------------- Begin diff {fromfile} vs {tofile} -----------------|") + diff = difflib.unified_diff(from_str, to_str, fromfile=fromfile, tofile=tofile) + __print_colored_diff(diff) + print(f"|----------------- End diff {fromfile} vs {tofile} -----------------|") + + +def __print_colored_diff(diff): + # The color is added here manually using ANSI escape codes. + for line in diff: + if line.startswith("+") and not line.startswith("+++"): + print(f"\033[32m{line}\033[0m", end="") # Green for additions + elif line.startswith("-") and not line.startswith("---"): + print(f"\033[31m{line}\033[0m", end="") # Red for deletions + elif line.startswith("@@"): + print(f"\033[36m{line}\033[0m", end="") # Cyan for hunk headers + else: + print(line, end="") + + +def print_with_checkmark(message): + """ + Print a message with a checkmark. + """ + print(f"✓ {message}") + + +def prompt_or_assume_yes(message, assume_yes=False) -> bool: + """ + Prompt the user for confirmation or assume 'yes' for all prompts + """ + if not assume_yes: + response = input(f"{message} [y/N]: ").strip().lower() + return response in ("y", "yes") + else: + print(f"{message} (assuming yes)") + return True + + +def truncate(val, max_len=50): + """ + Truncate a string representation of a value to a maximum length. + """ + s = str(val) + return s if len(s) <= max_len else s[: max_len - 3] + "..." + + +def scan_and_recommend_updates( + original_table: tomlkit.items.Table, to_table: tomlkit.items.Table +): + """ + Scan the original table and recommend updates to the new version's table. + + Only scan top level keys and values (i.e. skip nested tables). + + If a key exists in both tables, compare their values and prompt the user + to either keep the original value or use the new version's recommended value. + """ + from .globals import ASSUME_YES + + for k, v in to_table.items(): + if not isinstance(v, tomlkit.items.AbstractTable): + if k in original_table: + if to_table[k] != original_table[k]: + use_recommended = prompt_or_assume_yes( + f"`{k} = {truncate(original_table[k])}` is not recommended for new version.\n" + f"Do you want to apply the recommended config: `{k} = {truncate(to_table[k])}`?", + ASSUME_YES, + ) + if use_recommended: + print_with_checkmark( + f"Apply recommended config: `{k} = {truncate(to_table[k])}`" + ) + else: + print_with_checkmark( + f"Keep original config: `{k} = {truncate(original_table[k])}`" + ) + to_table[k] = original_table[k] + else: + print_with_checkmark( + f"`{k}` not found in original config, using new version's default value: {truncate(v)}" + ) diff --git a/node_management/config_migration/src/rpc_config/__init__.py b/node_management/config_migration/src/rpc_config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/node_management/config_migration/src/rpc_config/from_v7_to_v9.py b/node_management/config_migration/src/rpc_config/from_v7_to_v9.py new file mode 100644 index 00000000..cb4a7866 --- /dev/null +++ b/node_management/config_migration/src/rpc_config/from_v7_to_v9.py @@ -0,0 +1,154 @@ +""" +This module provides migration utilities to upgrade Supra's RPC configuration files from version v7 to v9. + +Migration Steps: +---------------- +- Loads a v9 template configuration as the migration base. +- Migrates root-level fields such as 'supra_committees_config' and 'bind_addr'. +- Migrates synchronization WebSocket settings, including consensus RPC and certificate paths. +- Migrates chain state assembler configuration. +- Migrates database paths for archive, chain store, and ledger components. +- Migrates snapshot configuration paths if present; skips if not found in v7. +- Migrates prune configuration if present; skips if not found in v7. +- For each section, scans and recommends updates for any legacy fields. +- Exit early if any unexpected sections in v7 (e.g., 'synchronization', 'chain_state_assembler') are present before migration. + +The main entrypoint is `migrate_v7_to_v9(v7_toml_data)`, which returns a new TOML data structure compatible with v9. +""" + +import tomlkit +import importlib.resources +from common.utils import ( + scan_and_recommend_updates, +) + + +def __migrate_root_config(v7_toml_data, v9_toml_data): + v9_toml_data["supra_committees_config"] = v7_toml_data["supra_committees_config"] + + v9_toml_data["bind_addr"] = v7_toml_data["bind_addr"] + + print("\nScanning root level configuration ...") + scan_and_recommend_updates(v7_toml_data, v9_toml_data) + + +def __migrate_sync_ws_config(v7_toml_data, v9_toml_data): + if "synchronization" in v7_toml_data: + raise SystemExit( + "Error: [synchronization] table should not exist in v7 config. Please check your migration path matching the version of your config file." + ) + v9_sync_ws_config = v9_toml_data["synchronization"]["ws"] + v9_sync_ws_config["consensus_rpc"] = v7_toml_data["consensus_rpc"] + + v9_sync_ws_certificates = v9_sync_ws_config["certificates"] + v9_sync_ws_certificates["cert_path"] = v7_toml_data["consensus_client_cert_path"] + v9_sync_ws_certificates["private_key_path"] = v7_toml_data[ + "consensus_client_private_key_path" + ] + v9_sync_ws_certificates["root_ca_cert_path"] = v7_toml_data[ + "consensus_root_ca_cert_path" + ] + + +def __migrate_chain_state_assembler_config(v7_toml_data, v9_toml_data): + if "chain_state_assembler" in v7_toml_data: + raise SystemExit( + "Error: [chain_state_assembler] table should not exist in v7 config." + ) + + print("\nScanning chain state assembler configuration ...") + scan_and_recommend_updates(v7_toml_data, v9_toml_data["chain_state_assembler"]) + + +def __migrate_db_archive_config(v7_toml_data, v9_toml_data): + v9_db_archive_config = v9_toml_data["database_setup"]["dbs"]["archive"]["rocks_db"] + v7_db_archive_config = v7_toml_data["database_setup"]["dbs"]["archive"]["rocks_db"] + + v9_db_archive_config["path"] = v7_db_archive_config["path"] + + print("\nScanning archive configuration ...") + scan_and_recommend_updates(v7_db_archive_config, v9_db_archive_config) + + +def __migrate_db_chain_store_config(v7_toml_data, v9_toml_data): + v9_db_chain_store_config = v9_toml_data["database_setup"]["dbs"]["chain_store"][ + "rocks_db" + ] + v7_db_chain_store_config = v7_toml_data["database_setup"]["dbs"]["chain_store"][ + "rocks_db" + ] + + v9_db_chain_store_config["path"] = v7_db_chain_store_config["path"] + + print("\nScanning chain store configuration ...") + scan_and_recommend_updates(v7_db_chain_store_config, v9_db_chain_store_config) + + +def __migrate_db_ledger_config(v7_toml_data, v9_toml_data): + v9_db_ledger_config = v9_toml_data["database_setup"]["dbs"]["ledger"]["rocks_db"] + v7_db_ledger_config = v7_toml_data["database_setup"]["dbs"]["ledger"]["rocks_db"] + v9_db_ledger_config["path"] = v7_db_ledger_config["path"] + print("\nScanning ledger configuration ...") + scan_and_recommend_updates(v7_db_ledger_config, v9_db_ledger_config) + + +def __migrate_snapshot_config(v7_toml_data, v9_toml_data): + # Optional config + if "snapshot_config" not in v7_toml_data["database_setup"]: + print( + "Warning: [database_setup.snapshot_config] table not found in v7 config. Skipping migration." + ) + return + if "snapshot_config" not in v9_toml_data["database_setup"]: + raise SystemExit( + "Error: [database_setup.snapshot_config] table should exist in v9 template." + ) + + v9_snapshot_config = v9_toml_data["database_setup"]["snapshot_config"] + v7_snapshot_config = v7_toml_data["database_setup"]["snapshot_config"] + + v9_snapshot_config["path"] = v7_snapshot_config["path"] + + print("\nScanning snapshot configuration ...") + scan_and_recommend_updates(v7_snapshot_config, v9_snapshot_config) + + +def __migrate_prune_config(v7_toml_data, v9_toml_data): + # Optional config + if "prune_config" not in v7_toml_data["database_setup"]: + print( + "Warning: [database_setup.prune_config] table not found in v7 config. Skipping migration." + ) + return + if "prune_config" not in v9_toml_data["database_setup"]: + raise SystemExit( + "Error: [database_setup.prune_config] table should exist in v9 template." + ) + v9_prune_config = v9_toml_data["database_setup"]["prune_config"] + v7_prune_config = v7_toml_data["database_setup"]["prune_config"] + + print("\nScanning prune configuration ...") + scan_and_recommend_updates(v7_prune_config, v9_prune_config) + + +def migrate_v7_to_v9(v7_toml_data): + """ + Returns a new TOML data structure that is compatible with RPC config v9. + """ + + with ( + importlib.resources.files(__package__) + .joinpath("rpc_config_v9_1_x_mainnet_template.toml") + .open("r") as f + ): + template = f.read() + v9_toml_data = tomlkit.parse(template) + __migrate_root_config(v7_toml_data, v9_toml_data) + __migrate_sync_ws_config(v7_toml_data, v9_toml_data) + __migrate_chain_state_assembler_config(v7_toml_data, v9_toml_data) + __migrate_db_archive_config(v7_toml_data, v9_toml_data) + __migrate_db_ledger_config(v7_toml_data, v9_toml_data) + __migrate_db_chain_store_config(v7_toml_data, v9_toml_data) + __migrate_snapshot_config(v7_toml_data, v9_toml_data) + __migrate_prune_config(v7_toml_data, v9_toml_data) + return v9_toml_data diff --git a/node_management/config_migration/src/rpc_config/migrate_path.py b/node_management/config_migration/src/rpc_config/migrate_path.py new file mode 100644 index 00000000..00018344 --- /dev/null +++ b/node_management/config_migration/src/rpc_config/migrate_path.py @@ -0,0 +1,13 @@ +from .from_v7_to_v9 import migrate_v7_to_v9 +from common import migration + + +RPC_CONFIG_MIGRATE_PATH = { + "v7-v9": [migrate_v7_to_v9], +} + + +def run_migration(migrate_path: str, from_path: str, to_path: str): + migration.Migration(RPC_CONFIG_MIGRATE_PATH).migrate_config( + migrate_path, from_path, to_path + ) diff --git a/node_management/config_migration/src/rpc_config/rpc_config_v9_1_x_mainnet_template.toml b/node_management/config_migration/src/rpc_config/rpc_config_v9_1_x_mainnet_template.toml new file mode 100644 index 00000000..8d599e43 --- /dev/null +++ b/node_management/config_migration/src/rpc_config/rpc_config_v9_1_x_mainnet_template.toml @@ -0,0 +1,158 @@ +# Version: v9.1.x mainnet + +####################################### PROTOCOL PARAMETERS ####################################### + +# The below parameters are fixed for the protocol and must be agreed upon by all node operators +# at genesis. They may subsequently be updated via governance decisions. + +# Core protocol parameters. +# The below parameters are node-specific and may be configured as required by the operator. + +# The port on which the node should listen for incoming RPC requests. +bind_addr = "0.0.0.0:26000" +# If `true` then blocks will not be verified before execution. This value should be `false` +# unless you also control the node from which this RPC node is receiving blocks. +block_provider_is_trusted = true +# If true, all components will attempt to load their previous state from disk. Otherwise, +# all components will start in their default state. +resume = true +# The path to `supra_committees.json`. +supra_committees_config = "./configs/supra_committees.json" +# The access tokens used to authenticate public RPC requests to this RPC node. +consensus_access_tokens = [] + +# A unique identifier for this instance of the Supra protocol. Prevents replay attacks across chains. +[chain_instance] +chain_id = 8 +# The length of an epoch in seconds. +epoch_duration_secs = 7200 +# The number of seconds that stake locked in a Stake Pool will automatically be locked up for when +# its current lockup expires, if no request is made to unlock it. +# +# 48 hours. +recurring_lockup_duration_secs = 172800 +# The number of seconds allocated for voting on governance proposals. Governance will initially be controlled by The Supra Foundation. +# +# 46 hours. +voting_duration_secs = 165600 +# Determines whether the network will start with a faucet, amongst other things. +is_testnet = false +# Wednesday, Nov 20, 2024 12:00:00.000 AM (UTC). +genesis_timestamp_microseconds = 1732060800000000 + + +######################################### NODE PARAMETERS ######################################### +[chain_state_assembler] +# Number of certified blocks stored in memory as reference to pending blocks to be executed. +# Only this amount of certified blocks are stored in memory, for the rest memo is kept. +certified_block_cache_bucket_size = 50 +# Retry interval for the sync requests for which no response yet available. +sync_retry_interval_in_secs = 1 + +[synchronization.ws] +# The websocket address of the attached validator. +consensus_rpc = "ws://:26000" + +[synchronization.ws.certificates] +# The path to the TLS certificate for the connection with the attached validator. +cert_path = "./configs/client_supra_certificate.pem" +# The path to the private key to be used when negotiating TLS connections. +private_key_path = "./configs/client_supra_key.pem" +# The path to the TLS root certificate authority certificate. +root_ca_cert_path = "./configs/ca_certificate.pem" + +# Parameters for the RPC Archive database. This database stores the indexes used to serve RPC API calls. +[database_setup.dbs.archive.rocks_db] +# The path at which the database should be created. +path = "./configs/rpc_archive" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters for the DKG database. +[database_setup.dbs.ledger.rocks_db] +# The path at which the database should be created. +path = "./configs/rpc_ledger" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters for the blockchain database. +[database_setup.dbs.chain_store.rocks_db] +# The path at which the database should be created. +path = "./configs/rpc_store" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters related to database pruning. +[database_setup.prune_config] +# Data stored more than `epochs_to_retain` ago will be pruned if `enable_pruning = true`. +epochs_to_retain = 1008 + +# Parameters for the database snapshot service. +[database_setup.snapshot_config] +# The number of snapshots to retain, including the latest. +depth = 2 +# The interval between snapshots in seconds. +interval_in_seconds = 1800 +# The path at which the snapshots should be stored. +path = "./configs/snapshot" +# The number of times to retry a snapshot in the event that it fails unexpectedly. +retry_count = 3 +# The interval in seconds to wait before retrying a snapshot. +retry_interval_in_seconds = 5 + +# CORS settings for RPC API requests. +# The below settings are the default values required for use in RPC nodes run by validator node operators. +# They are optional for non-validators. +[[allowed_origin]] +url = "https://rpc-mainnet.supra.com" +description = "RPC For Supra" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet1.supra.com" +description = "RPC For nodeops group1" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet2.supra.com" +description = "RPC For nodeops group2" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet3.supra.com" +description = "RPC For nodeops group3" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet4.supra.com" +description = "RPC For nodeops group4" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet5.supra.com" +description = "RPC For nodeops group5" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-wallet-mainnet.supra.com" +description = "RPC For Supra Wallet" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-suprascan-mainnet.supra.com" +description = "RPC For suprascan" +mode = "Server" + +[[allowed_origin]] +url = "http://localhost:26000" +description = "LocalNet" +mode = "Server" diff --git a/node_management/config_migration/src/smr_settings/__init__.py b/node_management/config_migration/src/smr_settings/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/node_management/config_migration/src/smr_settings/from_v7_to_v9.py b/node_management/config_migration/src/smr_settings/from_v7_to_v9.py new file mode 100644 index 00000000..376c2557 --- /dev/null +++ b/node_management/config_migration/src/smr_settings/from_v7_to_v9.py @@ -0,0 +1,142 @@ +""" +This module provides migration utilities to upgrade Supra's Validator configuration files from version v7 to v9. + +Migration Steps: +---------------- +- Loads a v9 template configuration as the migration base. +- Migrates root-level fields such as rpc_access_port and certificate paths. +- Migrates database paths for chain store and ledger components. +- Migrates snapshot configuration paths if present; skips if not found in v7. +- Migrates prune configuration if present; skips if not found in v7. +- Migrates mempool and moonshot sections, preserving all settings. +- For each section, scans and recommends updates for any legacy fields. +- Exit early if any unexpected sections in v7 (e.g., [node.ws_server]) are present before migration. + +The main entrypoint is `migrate_v7_to_v9(v7_toml_data)`, which returns a new TOML data structure compatible with v9. + +""" + +import tomlkit +import importlib.resources +from common.utils import scan_and_recommend_updates + + +def __migrate_node_root_config(v7_toml_data, v9_toml_data): + if "ws_server" in v7_toml_data["node"]: + raise SystemExit( + "Error: [node.ws_server] table should not exist in v7 config. Please check your migration path matching the version of your config file." + ) + + v9_node_data = v9_toml_data["node"] + v7_node_data = v7_toml_data["node"] + v9_node_data["rpc_access_port"] = v7_node_data["rpc_access_port"] + + v9_node_ws_certificates = v9_node_data["ws_server"]["certificates"] + v9_node_ws_certificates["root_ca_cert_path"] = v7_node_data["root_ca_cert_path"] + v9_node_ws_certificates["cert_path"] = v7_node_data["server_cert_path"] + v9_node_ws_certificates["private_key_path"] = v7_node_data[ + "server_private_key_path" + ] + + print("\nScanning node root configuration ...") + scan_and_recommend_updates(v7_node_data, v9_node_data) + + +def __migrate_db_chain_store(v7_toml_data, v9_toml_data): + v9_db_chain_store = v9_toml_data["node"]["database_setup"]["dbs"]["chain_store"][ + "rocks_db" + ] + v7_db_chain_store = v7_toml_data["node"]["database_setup"]["dbs"]["chain_store"][ + "rocks_db" + ] + v9_db_chain_store["path"] = v7_db_chain_store["path"] + + print("\nScanning chain store configuration ...") + scan_and_recommend_updates(v7_db_chain_store, v9_db_chain_store) + + +def __migrate_db_ledger(v7_toml_data, v9_toml_data): + v9_db_ledger = v9_toml_data["node"]["database_setup"]["dbs"]["ledger"]["rocks_db"] + v7_db_ledger = v7_toml_data["node"]["database_setup"]["dbs"]["ledger"]["rocks_db"] + v9_db_ledger["path"] = v7_db_ledger["path"] + + print("\nScanning ledger configuration ...") + scan_and_recommend_updates(v7_db_ledger, v9_db_ledger) + + +def __migrate_snapshot_config(v7_toml_data, v9_toml_data): + """ + snapshot_config is optional, + - if absent in v7, the v9 config template will be used as is. + """ + if "snapshot_config" not in v7_toml_data["node"]["database_setup"]: + return + + if "snapshot_config" not in v9_toml_data["node"]["database_setup"]: + raise SystemExit( + "Error: [node.database_setup.snapshot_config] table should exist in v9 template." + ) + + v9_snapshot_config = v9_toml_data["node"]["database_setup"]["snapshot_config"] + v7_snapshot_config = v7_toml_data["node"]["database_setup"]["snapshot_config"] + + v9_snapshot_config["path"] = v7_snapshot_config["path"] + + print("\nScanning snapshot configuration ...") + scan_and_recommend_updates(v7_snapshot_config, v9_snapshot_config) + + +def __migrate_prune_config(v7_toml_data, v9_toml_data): + """ + prune_config is optional, so we skip if it does not exist in v7. + """ + if "prune_config" not in v7_toml_data["node"]["database_setup"]: + return + + if "prune_config" not in v9_toml_data["node"]["database_setup"]: + raise SystemExit( + "Error: [node.database_setup.prune_config] table should exist in v9 template." + ) + + v9_prune_config = v9_toml_data["node"]["database_setup"]["prune_config"] + v7_prune_config = v7_toml_data["node"]["database_setup"]["prune_config"] + + print("\nScanning prune configuration ...") + scan_and_recommend_updates(v7_prune_config, v9_prune_config) + + +def __migrate_mempool_config(v7_toml_data, v9_toml_data): + v9_mempool_config = v9_toml_data["mempool"] + v7_mempool_config = v7_toml_data["mempool"] + + print("\nScanning mempool configuration ...") + scan_and_recommend_updates(v7_mempool_config, v9_mempool_config) + + +def __migrate_moonshot_config(v7_toml_data, v9_toml_data): + v9_moonshot_config = v9_toml_data["moonshot"] + v7_moonshot_config = v7_toml_data["moonshot"] + + print("\nScanning moonshot configuration ...") + scan_and_recommend_updates(v7_moonshot_config, v9_moonshot_config) + + +def migrate_v7_to_v9(v7_toml_data): + """ + Returns a new TOML data structure that is compatible with SMR settings v9. + """ + with ( + importlib.resources.files(__package__) + .joinpath("smr_settings_v9_1_x_mainnet_template.toml") + .open("r") as f + ): + template = f.read() + v9_toml_data = tomlkit.parse(template) + __migrate_node_root_config(v7_toml_data, v9_toml_data) + __migrate_db_ledger(v7_toml_data, v9_toml_data) + __migrate_db_chain_store(v7_toml_data, v9_toml_data) + __migrate_snapshot_config(v7_toml_data, v9_toml_data) + __migrate_prune_config(v7_toml_data, v9_toml_data) + __migrate_mempool_config(v7_toml_data, v9_toml_data) + __migrate_moonshot_config(v7_toml_data, v9_toml_data) + return v9_toml_data diff --git a/node_management/config_migration/src/smr_settings/migrate_path.py b/node_management/config_migration/src/smr_settings/migrate_path.py new file mode 100644 index 00000000..829da3a5 --- /dev/null +++ b/node_management/config_migration/src/smr_settings/migrate_path.py @@ -0,0 +1,14 @@ +from .from_v7_to_v9 import migrate_v7_to_v9 + +from common import migration + + +SMR_SETTINGS_MIGRATE_PATH = { + "v7-v9": [migrate_v7_to_v9], +} + + +def run_migration(migrate_path: str, from_path: str, to_path: str): + migration.Migration(SMR_SETTINGS_MIGRATE_PATH).migrate_config( + migrate_path, from_path, to_path + ) diff --git a/node_management/config_migration/src/smr_settings/smr_settings_v9_1_x_mainnet_template.toml b/node_management/config_migration/src/smr_settings/smr_settings_v9_1_x_mainnet_template.toml new file mode 100644 index 00000000..2cb73209 --- /dev/null +++ b/node_management/config_migration/src/smr_settings/smr_settings_v9_1_x_mainnet_template.toml @@ -0,0 +1,145 @@ +# Version: v9.1.x mainnet + +####################################### PROTOCOL PARAMETERS ####################################### + +# The below parameters are fixed for the protocol and must be agreed upon by all node operators +# at genesis. They may subsequently be updated via governance decisions. Paths are set relative +# to $SUPRA_HOME. + +# Core protocol parameters. +[instance] +# A unique identifier for this instance of the Supra protocol. Prevents replay attacks across chains. +chain_id = 8 +# The length of an epoch in seconds. +epoch_duration_secs = 7200 +# The number of seconds that stake locked in a Stake Pool will automatically be locked up for when +# its current lockup expires, if no request is made to unlock it. +# +# 48 hours. +recurring_lockup_duration_secs = 172800 +# The number of seconds allocated for voting on governance proposals. Governance will initially be +# controlled by The Supra Foundation. +# +# 46 hours. +voting_duration_secs = 165600 +# Determines whether the network will start with a faucet, amongst other things. +is_testnet = false +# Wednesday, Nov 20, 2024 12:00:00.000 AM (UTC). +genesis_timestamp_microseconds = 1732060800000000 + +# Parameters related to the mempool. +[mempool] +# The maximum number of milliseconds that a node will wait before proposing a batch when it has +# at least one transaction to process. +max_batch_delay_ms = 500 +# The maximum size of a batch. If `max_batch_size_bytes` is reached before `max_batch_delay_ms` +# then a batch will be proposed immediately. +max_batch_size_bytes = 5000000 +# The amount of time that a node will wait before repeating a sync request for a batch that it +# is missing. +sync_retry_delay_ms = 2000 +# The number of signers of the related batch certificate that a node should ask for a batch +# attempting to retry a sync request. +sync_retry_nodes = 3 + +# Parameters related to the Moonshot consensus protocol. See https:#arxiv.org/abs/2401.01791. +[moonshot] +# The maximum number of milliseconds that the timestamp of a proposed block may be +# ahead of a node's local time when it attempts to vote for the block. Validators +# must wait until the timestamp of a certified block has passed before advancing to +# the next round and leaders must wait until the timestamp of the parent block has +# passed before proposing, so this limit prevents Byzantine leaders from forcing +# honest nodes to wait indefinitely by proposing blocks with timestamps that are +# arbitrarily far in the future. +block_recency_bound_ms = 500 +# Causes the node to stop producing blocks when there are no transactions to be +# processed. If all nodes set this value to `true` then the chain will not produce +# new blocks when there are no transactions to process, conserving disk space. +halt_block_production_when_no_txs = false +# The type of leader election function to use. This function generates a schedule that ensures +# that each node eventually succeeds every other. +leader_elector = "FairSuccession" +# The delay after which the block proposer will create a new block despite not having any +# payload items to propose. Denominated in ms. +max_block_delay_ms = 1250 +# The maximum number of batch availability certificates that may be included in a single +# consensus block. +max_payload_items_per_block = 50 +# The number of rounds ahead of self.round for which this node should accept Optimistic Proposals, +# Votes and Timeouts. Must be the same for all nodes. This parameter helps to limit the amount +# of memory that Byzantine nodes can consume, but the larger it is the more efficient syncing can +# become. This trade-off must be balanced. +message_recency_bound_rounds = 1000 +# The delay after which the node will try to repeat sync requests for missing blocks. +# Denominated in ms. Should be the same for all nodes. +sync_retry_delay_ms = 1000 +# The delay after which the node will send a Timeout message for its current Moonshot round, +# measured from the start of the round. Denominated in ms. Must be the same for all nodes. +timeout_delay_ms = 3500 + +######################################### NODE PARAMETERS ######################################### + +# The below parameters are node-specific and may be configured as required by the operator. Paths +# should be specified either as absolute paths or as relative to the `supra` binary. +# +# When running the node in Docker and working with a mounted directory, the paths must be specified +# with reference to the location of the mounter directory in the Docker filesystem rather than the +# host filesystem. The default settings provided below assume the default Docker configuration +# provided by the `manage_supra_nodes.sh` script. + +[node] +# The duration in seconds that a node waits between polling its connections to its peers. +connection_refresh_timeout_sec = 1 +# If true, all components will attempt to load their previous state from disk. Otherwise, +# all components will start in their default state. Should always be `true` for testnet and +# mainnet. +resume = true +# The port on which to listen for connections from the associated RPC node. Each validator +# may serve at most one RPC node. +rpc_access_port = 26000 + +[node.ws_server.certificates] +# The path to the TLS root certificate authority certificate. +root_ca_cert_path = "./configs/ca_certificate.pem" +# The path to the TLS certificate for this node. +cert_path = "./configs/server_supra_certificate.pem" +# The path to the private key to be used when negotiating TLS connections. +private_key_path = "./configs/server_supra_key.pem" + +# Parameters for the blockchain database. +[node.database_setup.dbs.chain_store.rocks_db] +# The path at which the database should be created. +path = "./configs/smr_storage" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters for the DKG database. +[node.database_setup.dbs.ledger.rocks_db] +# The path at which the database should be created. +path = "./configs/ledger_storage" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters related to database pruning. +[node.database_setup.prune_config] +# Data stored more than `epochs_to_retain` ago will be pruned if `enable_pruning = true`. +epochs_to_retain = 84 + +# Parameters for the database snapshot service. +[node.database_setup.snapshot_config] +# The number of snapshots to retain, including the latest. +depth = 2 +# The interval between snapshots in seconds. +interval_in_seconds = 1800 +# The path at which the snapshots should be stored. +path = "./configs/snapshot" +# The number of times to retry a snapshot in the event that it fails unexpectedly. +retry_count = 3 +# The interval in seconds to wait before retrying a snapshot. +retry_interval_in_seconds = 5 diff --git a/node_management/config_migration/tests/config_v7.1.8.toml b/node_management/config_migration/tests/config_v7.1.8.toml new file mode 100644 index 00000000..79649d19 --- /dev/null +++ b/node_management/config_migration/tests/config_v7.1.8.toml @@ -0,0 +1,131 @@ +# Version: v7.1.8 +####################################### PROTOCOL PARAMETERS ####################################### + +# The below parameters are fixed for the protocol and must be agreed upon by all node operators +# at genesis. They may subsequently be updated via governance decisions. + +# Core protocol parameters. + +# A unique identifier for this instance of the Supra protocol. Prevents replay attacks across chains. +chain_instance.chain_id = 8 +# The length of an epoch in seconds. +chain_instance.epoch_duration_secs = 7200 +# The number of seconds that stake locked in a Stake Pool will automatically be locked up for when +# its current lockup expires, if no request is made to unlock it. +# +# 48 hours. +chain_instance.recurring_lockup_duration_secs = 172800 +# The number of seconds allocated for voting on governance proposals. Governance will initially be +# controlled by The Supra Foundation. +# +# 46 hours. +chain_instance.voting_duration_secs = 165600 +# Determines whether the network will start with a faucet, amongst other things. +chain_instance.is_testnet = false +# Wednesday, Nov 20, 2024 12:00:00.000 AM (UTC). +chain_instance.genesis_timestamp_microseconds = 1732060800000000 + + +######################################### NODE PARAMETERS ######################################### + +# The below parameters are node-specific and may be configured as required by the operator. + +# The port on which the node should listen for incoming RPC requests. +bind_addr = "1.1.1.1:26000" +# If `true` then blocks will not be verified before execution. This value should be `false` +# unless you also control the node from which this RPC node is receiving blocks. +block_provider_is_trusted = true +# The path to the TLS certificate for the connection with the attached validator. +consensus_client_cert_path = "./xyz/client_supra_certificate.pem" +# The path to the private key to be used when negotiating TLS connections. +consensus_client_private_key_path = "./xyz/client_supra_key.pem" +# The path to the TLS root certificate authority certificate. +consensus_root_ca_cert_path = "./xyz/ca_certificate.pem" +# The websocket address of the attached validator. +consensus_rpc = "ws://2.2.2.2:26000" +# If true, all components will attempt to load their previous state from disk. Otherwise, +# all components will start in their default state. Should always be `true` for testnet and +# mainnet. +resume = true +# The path to `supra_committees.json`. +supra_committees_config = "./xyz/supra_committees.json" +# The number of seconds to wait before retrying a block sync request. +sync_retry_interval_in_secs = 1 + +# Parameters for the RPC Archive database. This database stores the indexes used to serve RPC API calls. +[database_setup.dbs.archive.rocks_db] +# The path at which the database should be created. +path = "./xyz/rpc_archive" +# Whether snapshots should be taken of the database. +enable_snapshots = true + +# Parameters for the DKG database. +[database_setup.dbs.ledger.rocks_db] +# The path at which the database should be created. +path = "./xyz/rpc_ledger" + +# Parameters for the blockchain database. +[database_setup.dbs.chain_store.rocks_db] +# The path at which the database should be created. +path = "./xyz/rpc_store" +# Whether snapshots should be taken of the database. +enable_snapshots = true + +# Parameters for the database snapshot service. +[database_setup.snapshot_config] +# The number of snapshots to retain, including the latest. +depth = 2 +# The interval between snapshots in seconds. +interval_in_seconds = 1800 +# The path at which the snapshots should be stored. +path = "./xyz/snapshot" +# The number of times to retry a snapshot in the event that it fails unexpectedly. +retry_count = 3 +# The interval in seconds to wait before retring a snapshot. +retry_interval_in_seconds = 5 + +# CORS settings for RPC API requests. +[[allowed_origin]] +url = "https://rpc-mainnet.supra.com" +description = "RPC For Supra" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet1.supra.com" +description = "RPC For nodeops group1" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet2.supra.com" +description = "RPC For nodeops group2" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet3.supra.com" +description = "RPC For nodeops group3" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet4.supra.com" +description = "RPC For nodeops group4" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet5.supra.com" +description = "RPC For nodeops group5" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-wallet-mainnet.supra.com" +description = "RPC For Supra Wallet" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-suprascan-mainnet.supra.com" +description = "RPC For suprascan" +mode = "Server" + +[[allowed_origin]] +url = "http://localhost:27000" +description = "LocalNet" +mode = "Server" \ No newline at end of file diff --git a/node_management/config_migration/tests/config_v7_to_v9_expected.toml b/node_management/config_migration/tests/config_v7_to_v9_expected.toml new file mode 100644 index 00000000..420f6bab --- /dev/null +++ b/node_management/config_migration/tests/config_v7_to_v9_expected.toml @@ -0,0 +1,158 @@ +# Version: v9.1.x mainnet + +####################################### PROTOCOL PARAMETERS ####################################### + +# The below parameters are fixed for the protocol and must be agreed upon by all node operators +# at genesis. They may subsequently be updated via governance decisions. + +# Core protocol parameters. +# The below parameters are node-specific and may be configured as required by the operator. + +# The port on which the node should listen for incoming RPC requests. +bind_addr = "1.1.1.1:26000" +# If `true` then blocks will not be verified before execution. This value should be `false` +# unless you also control the node from which this RPC node is receiving blocks. +block_provider_is_trusted = true +# If true, all components will attempt to load their previous state from disk. Otherwise, +# all components will start in their default state. +resume = true +# The path to `supra_committees.json`. +supra_committees_config = "./xyz/supra_committees.json" +# The access tokens used to authenticate public RPC requests to this RPC node. +consensus_access_tokens = [] + +# A unique identifier for this instance of the Supra protocol. Prevents replay attacks across chains. +[chain_instance] +chain_id = 8 +# The length of an epoch in seconds. +epoch_duration_secs = 7200 +# The number of seconds that stake locked in a Stake Pool will automatically be locked up for when +# its current lockup expires, if no request is made to unlock it. +# +# 48 hours. +recurring_lockup_duration_secs = 172800 +# The number of seconds allocated for voting on governance proposals. Governance will initially be controlled by The Supra Foundation. +# +# 46 hours. +voting_duration_secs = 165600 +# Determines whether the network will start with a faucet, amongst other things. +is_testnet = false +# Wednesday, Nov 20, 2024 12:00:00.000 AM (UTC). +genesis_timestamp_microseconds = 1732060800000000 + + +######################################### NODE PARAMETERS ######################################### +[chain_state_assembler] +# Number of certified blocks stored in memory as reference to pending blocks to be executed. +# Only this amount of certified blocks are stored in memory, for the rest memo is kept. +certified_block_cache_bucket_size = 50 +# Retry interval for the sync requests for which no response yet available. +sync_retry_interval_in_secs = 1 + +[synchronization.ws] +# The websocket address of the attached validator. +consensus_rpc = "ws://2.2.2.2:26000" + +[synchronization.ws.certificates] +# The path to the TLS certificate for the connection with the attached validator. +cert_path = "./xyz/client_supra_certificate.pem" +# The path to the private key to be used when negotiating TLS connections. +private_key_path = "./xyz/client_supra_key.pem" +# The path to the TLS root certificate authority certificate. +root_ca_cert_path = "./xyz/ca_certificate.pem" + +# Parameters for the RPC Archive database. This database stores the indexes used to serve RPC API calls. +[database_setup.dbs.archive.rocks_db] +# The path at which the database should be created. +path = "./xyz/rpc_archive" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters for the DKG database. +[database_setup.dbs.ledger.rocks_db] +# The path at which the database should be created. +path = "./xyz/rpc_ledger" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters for the blockchain database. +[database_setup.dbs.chain_store.rocks_db] +# The path at which the database should be created. +path = "./xyz/rpc_store" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters related to database pruning. +[database_setup.prune_config] +# Data stored more than `epochs_to_retain` ago will be pruned if `enable_pruning = true`. +epochs_to_retain = 1008 + +# Parameters for the database snapshot service. +[database_setup.snapshot_config] +# The number of snapshots to retain, including the latest. +depth = 2 +# The interval between snapshots in seconds. +interval_in_seconds = 1800 +# The path at which the snapshots should be stored. +path = "./xyz/snapshot" +# The number of times to retry a snapshot in the event that it fails unexpectedly. +retry_count = 3 +# The interval in seconds to wait before retrying a snapshot. +retry_interval_in_seconds = 5 + +# CORS settings for RPC API requests. +# The below settings are the default values required for use in RPC nodes run by validator node operators. +# They are optional for non-validators. +[[allowed_origin]] +url = "https://rpc-mainnet.supra.com" +description = "RPC For Supra" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet1.supra.com" +description = "RPC For nodeops group1" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet2.supra.com" +description = "RPC For nodeops group2" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet3.supra.com" +description = "RPC For nodeops group3" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet4.supra.com" +description = "RPC For nodeops group4" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-mainnet5.supra.com" +description = "RPC For nodeops group5" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-wallet-mainnet.supra.com" +description = "RPC For Supra Wallet" +mode = "Server" + +[[allowed_origin]] +url = "https://rpc-suprascan-mainnet.supra.com" +description = "RPC For suprascan" +mode = "Server" + +[[allowed_origin]] +url = "http://localhost:26000" +description = "LocalNet" +mode = "Server" \ No newline at end of file diff --git a/node_management/config_migration/tests/smr_settings_v7.1.x.toml b/node_management/config_migration/tests/smr_settings_v7.1.x.toml new file mode 100644 index 00000000..355aa86a --- /dev/null +++ b/node_management/config_migration/tests/smr_settings_v7.1.x.toml @@ -0,0 +1,167 @@ +####################################### PROTOCOL PARAMETERS ####################################### + +# The below parameters are fixed for the protocol and must be agreed upon by all node operators +# at genesis. They may subsequently be updated via governance decisions. Paths are set relative +# to $SUPRA_HOME. + +# Core protocol parameters. +[instance] +# A unique identifier for this instance of the Supra protocol. Prevents replay attacks across chains. +chain_id = 8 +# The length of an epoch in seconds. +epoch_duration_secs = 7200 +# The number of seconds that stake locked in a Stake Pool will automatically be locked up for when +# its current lockup expires, if no request is made to unlock it. +# +# 48 hours. +recurring_lockup_duration_secs = 172800 +# The number of seconds allocated for voting on governance proposals. Governance will initially be +# controlled by The Supra Foundation. +# +# 46 hours. +voting_duration_secs = 165600 +# Determines whether the network will start with a faucet, amongst other things. +is_testnet = false +# Wednesday, Nov 20, 2024 12:00:00.000 AM (UTC). +genesis_timestamp_microseconds = 1732060800000000 + +# Parameters related to the mempool. +[mempool] +# The maximum number of milliseconds that a node will wait before proposing a batch when it has +# at least one transaction to process. +max_batch_delay_ms = 1500 +# The maximum size of a batch. If `max_batch_size_bytes` is reached before `max_batch_delay_ms` +# then a batch will be proposed immediately. +max_batch_size_bytes = 5000000 +# The amount of time that a node will wait before repeating a sync request for a batch that it +# is missing. +sync_retry_delay_ms = 2000 +# The number of signers of the related batch certificate that a node should ask for a batch +# attempting to retry a sync request. +sync_retry_nodes = 3 + +# Parameters related to the Moonshot consensus protocol. See https:#arxiv.org/abs/2401.01791. +[moonshot] +# The maximum number of milliseconds that the timestamp of a proposed block may be +# ahead of a node's local time when it attempts to vote for the block. Validators +# must wait until the timestamp of a certified block has passed before advancing to +# the next round and leaders must wait until the timestamp of the parent block has +# passed before proposing, so this limit prevents Byzantine leaders from forcing +# honest nodes to wait indefinitely by proposing blocks with timestamps that are +# arbitrarily far in the future. +block_recency_bound_ms = 500 +# Causes the node to stop producing blocks when there are no transactions to be +# processed. If all nodes set this value to `true` then the chain will not produce +# new blocks when there are no transactions to process, conserving disk space. +halt_block_production_when_no_txs = false +# The type of leader election function to use. This function generates a schedule that ensures +# that each node eventually succeeds every other. +leader_elector = "FairSuccession" +# The delay after which the block proposer will create a new block despite not having any +# payload items to propose. Denominated in ms. +max_block_delay_ms = 1250 +# The maximum number of batch availability certificates that may be included in a single +# consensus block. +max_payload_items_per_block = 50 +# The number of rounds ahead of self.round for which this node should accept Optimistic Proposals, +# Votes and Timeouts. Must be the same for all nodes. This parameter helps to limit the amount +# of memory that Byzantine nodes can consume, but the larger it is the more efficient syncing can +# become. This trade-off must be balanced. +message_recency_bound_rounds = 20 +# The delay after which the node will try to repeat sync requests for missing blocks. +# Denominated in ms. Should be the same for all nodes. +sync_retry_delay_ms = 2000 +# The delay after which the node will send a Timeout message for its current Moonshot round, +# measured from the start of the round. Denominated in ms. Must be the same for all nodes. +timeout_delay_ms = 5000 + +# Parameters related to the MoveVM. Primarily related to governance features. +[move_vm] +# Initially `false` until the network matures. +allow_new_validators = false +# The maximum stake that may be allocated to a Supra Validator. We are not currently doing +# stake-weighted voting, so this value does not impact our decentralization quotient. This +# may change in the future. Initially set to the total supply. +# +# Measured in Quants (1 Quant = 10^-8 SUPRA). Equal to 100_000_000_000 SUPRA. +max_stake = "10000000000000000000" +# The minimum stake required to run a Supra Validator. There is no minimum at genesis +# because The Foundation's stake is added after the stake pools are created. We will increase +# it to its intended value of 55M SUPRA via governance. New nodes will not be able to join the +# validator set before the update is made. Measured in Quants (1 Quant = 10^-8 SUPRA). +min_stake = 0 +# The number of tokens initially allocated to node operators. Tokens will be earned through block +# rewards. +operator_account_balance = 0 +# The amount of Quants to transfer from each validator owner account to the corresponding stake pool +# after it has been created, during the genesis transaction. Adding the stake after creating the +# pool allows us to ensure that The Foundation's stake is not subject to the PBO locking schedule, +# which is only intended to apply to winners of the Project Blast Off campaign. +# +# Measured in Quants (1 Quant = 10^-8 SUPRA). Equal to 55_000_000 SUPRA. +pbo_owner_stake = 5500000000000000 +# The number of seconds after `genesis_timestamp_microseconds` at which all accounts with +# allocations at genesis will be able to unlock their initial amounts. These amounts are stored +# in vesting contracts. +# +# Corresponds to Wednesday, Nov 20, 2024 12:00:00.000 AM (UTC). +remaining_balance_lockup_cliff_period_in_seconds = 0 +# The amount of SUPRA required to qualify as a proposer (this parameter is currently unused). +required_proposer_stake = 0 +# The annual percent yield for validators, proportional to their stake. Specified as a percentage +# with 2 decimals of precision in u64 format due to limitations in the MoveVM. The below value +# represents 12.85%. +rewards_apy_percentage = 1285 +# The percentage of staking rewards earned by Supra Foundation controlled nodes that will be paid +# to the corresponding node operators. Specified as a percentage with 2 decimals of precision in +# u64 format due to limitations in the MoveVM. The below value represents 37.74%. +validator_commission_rate_percentage = 3774 +# The percentage of new stake relative to the current total stake that can join the validator +# set or be added to existing validators within a single epoch. +voting_power_increase_limit = 33 + + +######################################### NODE PARAMETERS ######################################### + +# The below parameters are node-specific and may be configured as required by the operator. Paths +# should be specified either as absolute paths or as relative to the `supra` binary. +# +# When running the node in Docker and working with a mounted directory, the paths must be specified +# with reference to the location of the mounter directory in the Docker filesystem rather than the +# host filesystem. The default settings provided below assume the default Docker configuration +# provided by the `manage_supra_nodes.sh` script. + +[node] +# The duration in seconds that a node waits between polling its connections to its peers. +connection_refresh_timeout_sec = 2 +# If true, all components will attempt to load their previous state from disk. Otherwise, +# all components will start in their default state. Should always be `true` for testnet and +# mainnet. +resume = true +# The path to the TLS root certificate authority certificate. +root_ca_cert_path = "./tests/ca_certificate.pem" +# The port on which to listen for connections from the associated RPC node. Each validator +# may serve at most one RPC node. +rpc_access_port = 26000 +# The path to the TLS certificate for this node. +server_cert_path = "./tests/server_supra_certificate.pem" +# The path to the private key to be used when negotiating TLS connections. +server_private_key_path = "./tests/server_supra_key.pem" + +# Parameters for the blockchain database. +[node.database_setup.dbs.chain_store.rocks_db] +# The path at which the database should be created. +path = "./tests/smr_storage" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true + +# Parameters for the DKG database. +[node.database_setup.dbs.ledger.rocks_db] +# The path at which the database should be created. +path = "./tests/ledger_storage" + +# Parameters related to database pruning. +[node.database_setup.prune_config] +# Data stored more than `epochs_to_retain` ago will be pruned if `enable_pruning = true`. +epochs_to_retain = 84 diff --git a/node_management/config_migration/tests/smr_settings_v7_to_v9_expected.toml b/node_management/config_migration/tests/smr_settings_v7_to_v9_expected.toml new file mode 100644 index 00000000..d9c3e73f --- /dev/null +++ b/node_management/config_migration/tests/smr_settings_v7_to_v9_expected.toml @@ -0,0 +1,145 @@ +# Version: v9.1.x mainnet + +####################################### PROTOCOL PARAMETERS ####################################### + +# The below parameters are fixed for the protocol and must be agreed upon by all node operators +# at genesis. They may subsequently be updated via governance decisions. Paths are set relative +# to $SUPRA_HOME. + +# Core protocol parameters. +[instance] +# A unique identifier for this instance of the Supra protocol. Prevents replay attacks across chains. +chain_id = 8 +# The length of an epoch in seconds. +epoch_duration_secs = 7200 +# The number of seconds that stake locked in a Stake Pool will automatically be locked up for when +# its current lockup expires, if no request is made to unlock it. +# +# 48 hours. +recurring_lockup_duration_secs = 172800 +# The number of seconds allocated for voting on governance proposals. Governance will initially be +# controlled by The Supra Foundation. +# +# 46 hours. +voting_duration_secs = 165600 +# Determines whether the network will start with a faucet, amongst other things. +is_testnet = false +# Wednesday, Nov 20, 2024 12:00:00.000 AM (UTC). +genesis_timestamp_microseconds = 1732060800000000 + +# Parameters related to the mempool. +[mempool] +# The maximum number of milliseconds that a node will wait before proposing a batch when it has +# at least one transaction to process. +max_batch_delay_ms = 500 +# The maximum size of a batch. If `max_batch_size_bytes` is reached before `max_batch_delay_ms` +# then a batch will be proposed immediately. +max_batch_size_bytes = 5000000 +# The amount of time that a node will wait before repeating a sync request for a batch that it +# is missing. +sync_retry_delay_ms = 2000 +# The number of signers of the related batch certificate that a node should ask for a batch +# attempting to retry a sync request. +sync_retry_nodes = 3 + +# Parameters related to the Moonshot consensus protocol. See https:#arxiv.org/abs/2401.01791. +[moonshot] +# The maximum number of milliseconds that the timestamp of a proposed block may be +# ahead of a node's local time when it attempts to vote for the block. Validators +# must wait until the timestamp of a certified block has passed before advancing to +# the next round and leaders must wait until the timestamp of the parent block has +# passed before proposing, so this limit prevents Byzantine leaders from forcing +# honest nodes to wait indefinitely by proposing blocks with timestamps that are +# arbitrarily far in the future. +block_recency_bound_ms = 500 +# Causes the node to stop producing blocks when there are no transactions to be +# processed. If all nodes set this value to `true` then the chain will not produce +# new blocks when there are no transactions to process, conserving disk space. +halt_block_production_when_no_txs = false +# The type of leader election function to use. This function generates a schedule that ensures +# that each node eventually succeeds every other. +leader_elector = "FairSuccession" +# The delay after which the block proposer will create a new block despite not having any +# payload items to propose. Denominated in ms. +max_block_delay_ms = 1250 +# The maximum number of batch availability certificates that may be included in a single +# consensus block. +max_payload_items_per_block = 50 +# The number of rounds ahead of self.round for which this node should accept Optimistic Proposals, +# Votes and Timeouts. Must be the same for all nodes. This parameter helps to limit the amount +# of memory that Byzantine nodes can consume, but the larger it is the more efficient syncing can +# become. This trade-off must be balanced. +message_recency_bound_rounds = 1000 +# The delay after which the node will try to repeat sync requests for missing blocks. +# Denominated in ms. Should be the same for all nodes. +sync_retry_delay_ms = 1000 +# The delay after which the node will send a Timeout message for its current Moonshot round, +# measured from the start of the round. Denominated in ms. Must be the same for all nodes. +timeout_delay_ms = 3500 + +######################################### NODE PARAMETERS ######################################### + +# The below parameters are node-specific and may be configured as required by the operator. Paths +# should be specified either as absolute paths or as relative to the `supra` binary. +# +# When running the node in Docker and working with a mounted directory, the paths must be specified +# with reference to the location of the mounter directory in the Docker filesystem rather than the +# host filesystem. The default settings provided below assume the default Docker configuration +# provided by the `manage_supra_nodes.sh` script. + +[node] +# The duration in seconds that a node waits between polling its connections to its peers. +connection_refresh_timeout_sec = 1 +# If true, all components will attempt to load their previous state from disk. Otherwise, +# all components will start in their default state. Should always be `true` for testnet and +# mainnet. +resume = true +# The port on which to listen for connections from the associated RPC node. Each validator +# may serve at most one RPC node. +rpc_access_port = 26000 + +[node.ws_server.certificates] +# The path to the TLS root certificate authority certificate. +root_ca_cert_path = "./tests/ca_certificate.pem" +# The path to the TLS certificate for this node. +cert_path = "./tests/server_supra_certificate.pem" +# The path to the private key to be used when negotiating TLS connections. +private_key_path = "./tests/server_supra_key.pem" + +# Parameters for the blockchain database. +[node.database_setup.dbs.chain_store.rocks_db] +# The path at which the database should be created. +path = "./tests/smr_storage" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters for the DKG database. +[node.database_setup.dbs.ledger.rocks_db] +# The path at which the database should be created. +path = "./tests/ledger_storage" +# Whether the database should be pruned. If `true`, data that is more than `epochs_to_retain` +# old will be deleted. +enable_pruning = true +# Whether snapshots should be taken of the database. +enable_snapshots = false + +# Parameters related to database pruning. +[node.database_setup.prune_config] +# Data stored more than `epochs_to_retain` ago will be pruned if `enable_pruning = true`. +epochs_to_retain = 84 + +# Parameters for the database snapshot service. +[node.database_setup.snapshot_config] +# The number of snapshots to retain, including the latest. +depth = 2 +# The interval between snapshots in seconds. +interval_in_seconds = 1800 +# The path at which the snapshots should be stored. +path = "./configs/snapshot" +# The number of times to retry a snapshot in the event that it fails unexpectedly. +retry_count = 3 +# The interval in seconds to wait before retrying a snapshot. +retry_interval_in_seconds = 5 diff --git a/node_management/config_migration/tests/test_rpc_migration.py b/node_management/config_migration/tests/test_rpc_migration.py new file mode 100644 index 00000000..8795ad71 --- /dev/null +++ b/node_management/config_migration/tests/test_rpc_migration.py @@ -0,0 +1,36 @@ +import os +import shutil +import tomlkit +import pytest +from rpc_config.migrate_path import run_migration as rpc_run_migration + + +@pytest.mark.parametrize( + "from_file,to_file,migrate_path", + [ + ("config_v7.1.8.toml", "config_v7_to_v9_expected.toml", "v7-v9"), + ], +) +def test_migration(tmp_path, from_file, to_file, migrate_path): + # Copy config files to temp dir + src_dir = os.path.dirname(os.path.abspath(__file__)) + from_path = os.path.join(src_dir, from_file) + to_path = os.path.join(src_dir, to_file) + tmp_from = tmp_path / from_file + tmp_to = tmp_path / to_file + shutil.copy(from_path, tmp_from) + # Run migration + import common.globals + + common.globals.ASSUME_YES = True + rpc_run_migration(migrate_path, str(tmp_from), str(tmp_to)) + # Load both files + with open(tmp_to, "r") as f: + migrated = tomlkit.parse(f.read()) + with open(to_path, "r") as f: + expected = tomlkit.parse(f.read()) + + # Compare TOML dicts + assert migrated == expected, ( + f"Migration {migrate_path} failed: {from_file} -> {to_file}" + ) diff --git a/node_management/config_migration/tests/test_smr_migration.py b/node_management/config_migration/tests/test_smr_migration.py new file mode 100644 index 00000000..4f9b0c9a --- /dev/null +++ b/node_management/config_migration/tests/test_smr_migration.py @@ -0,0 +1,36 @@ +import os +import shutil +import tomlkit +import pytest +from smr_settings.migrate_path import run_migration as smr_run_migration + + +@pytest.mark.parametrize( + "from_file,to_file,migrate_path", + [ + ("smr_settings_v7.1.x.toml", "smr_settings_v7_to_v9_expected.toml", "v7-v9"), + ], +) +def test_migration(tmp_path, from_file, to_file, migrate_path): + # Copy config files to temp dir + src_dir = os.path.dirname(os.path.abspath(__file__)) + from_path = os.path.join(src_dir, from_file) + to_path = os.path.join(src_dir, to_file) + tmp_from = tmp_path / from_file + tmp_to = tmp_path / to_file + shutil.copy(from_path, tmp_from) + # Run migration + import common.globals + + common.globals.ASSUME_YES = True + + smr_run_migration(migrate_path, str(tmp_from), str(tmp_to)) + # Load both files + with open(tmp_to, "r") as f: + migrated = tomlkit.parse(f.read()) + with open(to_path, "r") as f: + expected = tomlkit.parse(f.read()) + # Compare TOML dicts + assert migrated == expected, ( + f"Migration {migrate_path} failed: {from_file} -> {to_file}" + ) diff --git a/node_management/manage_supra_nodes.sh b/node_management/manage_supra_nodes.sh index 2ea178ee..7fc3dedc 100755 --- a/node_management/manage_supra_nodes.sh +++ b/node_management/manage_supra_nodes.sh @@ -222,8 +222,8 @@ function start_validator_docker_container() { --name "$CONTAINER_NAME" \ --user "${user_id}:${group_id}" \ -v "$HOST_SUPRA_HOME:/supra/configs" \ - -e "RUST_LOG=debug,sop2p=info,multistream_select=off,libp2p_swarm=off,yamux=off" \ -e "SUPRA_HOME=/supra/configs/" \ + -e "RUST_LOG=debug,sop2p=info,multistream_select=off,libp2p_swarm=off,yamux=off" \ -e "SUPRA_LOG_DIR=/supra/configs/supra_node_logs" \ -e "SUPRA_MAX_LOG_FILE_SIZE=500000000" \ -e "SUPRA_MAX_UNCOMPRESSED_LOGS=20" \ @@ -240,8 +240,8 @@ function start_rpc_docker_container() { --name "$CONTAINER_NAME" \ --user "${user_id}:${group_id}" \ -v "$HOST_SUPRA_HOME:/supra/configs" \ - -e "RUST_LOG=debug,sop2p=info,multistream_select=off,libp2p_swarm=off,yamux=off" \ -e "SUPRA_HOME=/supra/configs/" \ + -e "RUST_LOG=debug,sop2p=info,multistream_select=off,libp2p_swarm=off,yamux=off" \ -e "SUPRA_LOG_DIR=/supra/configs/rpc_node_logs" \ -e "SUPRA_MAX_LOG_FILE_SIZE=500000000" \ -e "SUPRA_MAX_UNCOMPRESSED_LOGS=20" \ @@ -512,24 +512,12 @@ function update() { #---------------------------------------------------------- Start ---------------------------------------------------------- -function copy_rpc_root_config_files() { - docker cp "$HOST_SUPRA_HOME"/config.toml "$CONTAINER_NAME:/supra/" - docker cp "$HOST_SUPRA_HOME"/genesis.blob "$CONTAINER_NAME:/supra/" -} - -function copy_validator_root_config_files() { - docker cp "$HOST_SUPRA_HOME"/smr_settings.toml "$CONTAINER_NAME:/supra/" - docker cp "$HOST_SUPRA_HOME"/genesis.blob "$CONTAINER_NAME:/supra/" -} - function start_rpc_node() { - copy_rpc_root_config_files start_rpc_docker_container docker exec -itd $CONTAINER_NAME /supra/rpc_node start } function start_validator_node() { - copy_validator_root_config_files start_validator_docker_container prompt_for_cli_password @@ -615,6 +603,12 @@ EOF if [ "$NETWORK" == "mainnet" ]; then export AWS_ACCESS_KEY_ID="c64bed98a85ccd3197169bf7363ce94f" export AWS_SECRET_ACCESS_KEY="0b7f15dbeef4ebe871ee8ce483e3fc8bab97be0da6a362b2c4d80f020cae9df7" + + if is_validator; then + bucket_name="mainnet-validator-snapshot" + elif is_rpc; then + bucket_name="mainnet-snapshot" + fi elif [ "$NETWORK" == "testnet" ]; then export AWS_ACCESS_KEY_ID="229502d7eedd0007640348c057869c90" export AWS_SECRET_ACCESS_KEY="799d15f4fd23c57cd0f182f2ab85a19d885887d745e2391975bb27853e2db949" diff --git a/node_management/migrate_config_and_db_mainnet_v7_to_v9.sh b/node_management/migrate_config_and_db_mainnet_v7_to_v9.sh new file mode 100755 index 00000000..348383ea --- /dev/null +++ b/node_management/migrate_config_and_db_mainnet_v7_to_v9.sh @@ -0,0 +1,202 @@ +#!/bin/bash + + +set -euo pipefail + +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +SCRIPT_NAME="migrate_from_v7_to_v9" + +# This script is expected to be installed with `install_management_scripts.sh`, which +# creates the `.supra` directory and retrieves the `node_management` directory. +source "$SCRIPT_DIR/.supra/node_management/utils.sh" + +function parse_args() { + NODE_TYPE="$1" + CONTAINER_NAME="$2" + HOST_SUPRA_HOME="$3" +} + +function basic_usage() { + echo "Usage: ./$SCRIPT_NAME.sh " >&2 + echo "Parameters:" >&2 + node_type_usage + container_name_usage + host_supra_home_usage + exit 1 +} + + +function verify_rpc() { + if ! verify_container_name || ! verify_host_supra_home; then + basic_usage + fi +} + +function verify_validator() { + if ! verify_container_name || ! verify_host_supra_home; then + basic_usage + fi +} + +function verify_args() { + if [[ "$NODE_TYPE" == "rpc" ]]; then + verify_rpc + elif [[ "$NODE_TYPE" == "validator" ]]; then + verify_validator + else + basic_usage + fi +} + + +#---------------------------------------------------------- RPC ---------------------------------------------------------- + +function migrate_rpc() { + echo "Migrating RPC $CONTAINER_NAME at $HOST_SUPRA_HOME to v9" + # TODO(sc): replace with mainnet image + RPC_V9_IMAGE=asia-docker.pkg.dev/supra-devnet-misc/supra-testnet/rpc-node:v9.0.12 + RPC_V8_IMAGE=asia-docker.pkg.dev/supra-devnet-misc/supra-mainnet/rpc-node:v8.0.3 + + rpc-v8() { docker exec -it rpc-v8 /supra/rpc_node "$@"; } + rpc-v9() { docker exec -it rpc-v9 /supra/rpc_node "$@"; } + + echo "Stop the user's container if it is running." + docker stop "$CONTAINER_NAME" || : + + echo "Prepare containers needed for running migration." + # Stop+Remove rpc containers for migration if they exist. + docker rm -f rpc-v8 || : + docker rm -f rpc-v9 || : + docker container ls + + # Start rpc containers with proper env and volume mounts. + docker run --name rpc-v8 \ + -v "$HOST_SUPRA_HOME:/supra/configs" \ + -e "SUPRA_HOME=/supra/configs/" \ + -itd "$RPC_V8_IMAGE" + + docker run --name rpc-v9 \ + -v "$HOST_SUPRA_HOME:/supra/configs" \ + -e "SUPRA_HOME=/supra/configs/" \ + -itd "$RPC_V9_IMAGE" + + echo "Migrate rpc config from v7 to v9" + # TODO(SC) to run in docker context, update path to `./configs/config.toml` + migrate-config rpc -p v7-v9 -f $HOST_SUPRA_HOME/config.toml -t $HOST_SUPRA_HOME/config.toml + # Localnet only (Optional: local env path is different from docker env path, need to be modified to use docker env path) + sed -i "" "s#${HOST_SUPRA_HOME}#configs#g" ${HOST_SUPRA_HOME}/config.toml + + # Run in docker context. + echo "Migrate db from v7 to v8" + rpc-v8 migrate-db configs/config.toml + echo "Migrate db from v8 to v9" + rpc-v9 migrate-db configs/config.toml + + echo "Cleanup containers used for migration." + # Remove containers again because we do not need them after migration + docker rm -f rpc-v8 || : + docker rm -f rpc-v9 || : + docker container ls + + echo "Remove snapshot and snapshots directories" + # Remove any existing snapshots. If we don't do this then they will start to take + # up a large amount of disk space during the sync. + # TODO(sc): WHY? + rm -rf "$HOST_SUPRA_HOME/snapshot" + rm -rf "$HOST_SUPRA_HOME/snapshots" + + # Finished migration, and follow guide that start the node with the new image with new config and sync + # ------ + + # ./manage_supra_nodes.sh \ + # sync \ + # --exact-timestamps \ + # --snapshot-source testnet-archive-snapshot \ + # rpc \ + # "$HOST_SUPRA_HOME" \ + # testnet + # echo "Migration complete. Please transfer all custom settings from $v8_config_toml to " + # echo -n "$config_toml before starting your node." +} + +#---------------------------------------------------------- Validator ---------------------------------------------------------- + +function migrate_validator() { + echo "Migrating validator $CONTAINER_NAME at $HOST_SUPRA_HOME to v9" + + SUPRA_V9_IMAGE=asia-docker.pkg.dev/supra-devnet-misc/supra-mainnet/validator-node:v9.0.5 + SUPRA_V8_IMAGE=asia-docker.pkg.dev/supra-devnet-misc/supra-mainnet/validator-node:v8.0.3 + supra-v8() { docker exec -it supra-v8 /supra/supra "$@"; } + supra-v9() { docker exec -it supra-v9 /supra/supra "$@"; } + + + echo "Stop the users' container if it is running." + docker stop "$CONTAINER_NAME" || : + + echo "Prepare containers needed for running migration." + # Stop+Remove supra containers for migration if they exist. + docker rm -f supra-v8 || : + docker rm -f supra-v9 || : + docker container ls + + # Start supra containers with proper env and volume mounts. + docker run --name supra-v8 \ + -v "$HOST_SUPRA_HOME:/supra/configs" \ + -e "SUPRA_HOME=/supra/configs/" \ + -itd "$SUPRA_V8_IMAGE" + + docker run --name supra-v9 \ + -v "$HOST_SUPRA_HOME:/supra/configs" \ + -e "SUPRA_HOME=/supra/configs/" \ + -itd "$SUPRA_V9_IMAGE" + + + + echo "Migrate cli profile from v7 to v8" + supra-v8 migrate --network mainnet + cp $HOST_SUPRA_HOME/validator_identity.pem $HOST_SUPRA_HOME/node_identity.pem + echo "Migrate cli profile from v8 to v9" + supra-v9 profile migrate + echo "Migrate smr_settings from v7 to v9" + # TODO(SC) to be run in docker context, update path to `./configs/config.toml` + migrate-config smr -p v7-v9 -f $HOST_SUPRA_HOME/smr_settings.toml -t $HOST_SUPRA_HOME/smr_settings.toml + + # Localnet only (Optional: local env path is different from docker env path, need to be modified to use docker env path) + sed -i "" "s#${HOST_SUPRA_HOME}#configs#g" ${HOST_SUPRA_HOME}/smr_settings.toml + echo "Migrate db from v7 to v9" + supra-v9 data migrate -p configs/smr_settings.toml + + + echo "Cleanup containers used for migration." + docker rm -f supra-v8 || : + docker rm -f supra-v9 || : + docker container ls + + echo "Remove snapshot and snapshots directories" + # Remove any existing snapshots. If we don't do this then they will start to take + # up a large amount of disk space during the sync. + # TODO(sc): WHY? + rm -rf "$HOST_SUPRA_HOME/snapshot" + rm -rf "$HOST_SUPRA_HOME/snapshots" + + # Finished migration, and follow guide that start the node with the new image with new config and sync + + echo "Migration complete." +} + +function main() { + if [ "$#" -lt 3 ]; then + basic_usage + fi + parse_args "$@" + verify_args + ensure_supra_home_is_absolute_path + + if [ "$NODE_TYPE" == "validator" ]; then + migrate_validator + elif [ "$NODE_TYPE" == "rpc" ]; then + migrate_rpc + fi +} + +main "$@" diff --git a/node_management/migrate_config_mainnet_v7_to_v9_docker.sh b/node_management/migrate_config_mainnet_v7_to_v9_docker.sh new file mode 100755 index 00000000..cfc6d09b --- /dev/null +++ b/node_management/migrate_config_mainnet_v7_to_v9_docker.sh @@ -0,0 +1,181 @@ +#!/bin/bash + + +set -euo pipefail + +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +SCRIPT_NAME="migrate_from_v7_to_v9" + +# This script is expected to be installed with `install_management_scripts.sh`, which +# creates the `.supra` directory and retrieves the `node_management` directory. +source "$SCRIPT_DIR/.supra/node_management/utils.sh" + +function parse_args() { + NODE_TYPE="$1" + CONTAINER_NAME="$2" + HOST_SUPRA_HOME="$3" +} + +function basic_usage() { + echo "Usage: ./$SCRIPT_NAME.sh " >&2 + echo "Parameters:" >&2 + node_type_usage + container_name_usage + host_supra_home_usage + exit 1 +} + + +function verify_rpc() { + if ! verify_container_name || ! verify_host_supra_home; then + basic_usage + fi +} + +function verify_validator() { + if ! verify_container_name || ! verify_host_supra_home; then + basic_usage + fi +} + +function verify_args() { + if [[ "$NODE_TYPE" == "rpc" ]]; then + verify_rpc + elif [[ "$NODE_TYPE" == "validator" ]]; then + verify_validator + else + basic_usage + fi +} + + +#---------------------------------------------------------- RPC ---------------------------------------------------------- + +function migrate_rpc() { + echo "Migrating RPC $CONTAINER_NAME at $HOST_SUPRA_HOME to v9" + # TODO(sc): replace with mainnet image + RPC_V9_IMAGE=asia-docker.pkg.dev/supra-devnet-misc/supra-testnet/rpc-node:v9.0.12 + + rpc-v9() { docker exec -it rpc-v9 /supra/rpc_node "$@"; } + + echo "Stop the user's container if it is running." + docker rm -f "$CONTAINER_NAME" || : + + echo "Prepare containers needed for running migration." + # Stop+Remove rpc containers for migration if they exist. + docker rm -f rpc-v9 || : + + docker run --name rpc-v9 \ + -v "$HOST_SUPRA_HOME:/supra/configs" \ + -e "SUPRA_HOME=/supra/configs/" \ + -itd "$RPC_V9_IMAGE" + + echo "Migrate rpc config from v7 to v9" + # TODO(SC) to run in docker context, update path to `./configs/config.toml` + migrate-config rpc -p v7-v9 -f $HOST_SUPRA_HOME/config.toml -t $HOST_SUPRA_HOME/config.toml + + + echo "Cleanup containers used for migration." + docker rm -f rpc-v9 || : + docker container ls + + echo "Remove snapshot and snapshots directories" + # Remove any existing snapshots. If we don't do this then they will start to take + # up a large amount of disk space during the sync. + # TODO(sc): WHY? + rm -rf "$HOST_SUPRA_HOME/snapshot" + rm -rf "$HOST_SUPRA_HOME/snapshots" + + # Finished migration, and follow guide that start the node with the new image with new config and sync + # ------ + + # ./manage_supra_nodes.sh \ + # sync \ + # --exact-timestamps \ + # --snapshot-source testnet-archive-snapshot \ + # rpc \ + # "$HOST_SUPRA_HOME" \ + # testnet + # echo "Migration complete. Please transfer all custom settings from $v8_config_toml to " + # echo -n "$config_toml before starting your node." + + echo "Migration rpc config complete." + +} + +#---------------------------------------------------------- Validator ---------------------------------------------------------- + +function migrate_validator() { + echo "Migrating validator $CONTAINER_NAME at $HOST_SUPRA_HOME to v9" + + SUPRA_V9_IMAGE=asia-docker.pkg.dev/supra-devnet-misc/supra-mainnet/validator-node:v9.0.5 + SUPRA_V8_IMAGE=asia-docker.pkg.dev/supra-devnet-misc/supra-mainnet/validator-node:v8.0.3 + supra-v8() { docker exec -it supra-v8 /supra/supra "$@"; } + supra-v9() { docker exec -it supra-v9 /supra/supra "$@"; } + + + echo "Stop the users' container if it is running." + docker stop "$CONTAINER_NAME" || : + + echo "Prepare containers needed for running migration." + docker rm -f supra-v8 || : + docker rm -f supra-v9 || : + + # Start supra containers with proper env and volume mounts. + docker run --name supra-v8 \ + -v "$HOST_SUPRA_HOME:/supra/configs" \ + -e "SUPRA_HOME=/supra/configs/" \ + -itd "$SUPRA_V8_IMAGE" + + docker run --name supra-v9 \ + -v "$HOST_SUPRA_HOME:/supra/configs" \ + -e "SUPRA_HOME=/supra/configs/" \ + -itd "$SUPRA_V9_IMAGE" + + + + echo "Migrate cli profile from v7 to v8" + supra-v8 migrate --network mainnet + cp $HOST_SUPRA_HOME/validator_identity.pem $HOST_SUPRA_HOME/node_identity.pem + echo "Migrate cli profile from v8 to v9" + supra-v9 profile migrate + echo "Migrate smr_settings from v7 to v9" + # TODO(SC) to be run in docker context, update path to `./configs/config.toml` + migrate-config smr -p v7-v9 -f $HOST_SUPRA_HOME/smr_settings.toml -t $HOST_SUPRA_HOME/smr_settings.toml + + # # Localnet only (Optional: local env path is different from docker env path, need to be modified to use docker env path) + # sed -i "" "s#${HOST_SUPRA_HOME}#configs#g" ${HOST_SUPRA_HOME}/smr_settings.toml + + echo "Cleanup containers used for migration." + docker rm -f supra-v8 || : + docker rm -f supra-v9 || : + docker container ls + + echo "Remove snapshot and snapshots directories" + # Remove any existing snapshots. If we don't do this then they will start to take + # up a large amount of disk space during the sync. + # TODO(sc): WHY? + rm -rf "$HOST_SUPRA_HOME/snapshot" + rm -rf "$HOST_SUPRA_HOME/snapshots" + + # Finished migration, and follow guide that start the node with the new image with new config and sync + + echo "Migration validator config complete." +} + +function main() { + if [ "$#" -lt 3 ]; then + basic_usage + fi + parse_args "$@" + verify_args + ensure_supra_home_is_absolute_path + + if [ "$NODE_TYPE" == "validator" ]; then + migrate_validator + elif [ "$NODE_TYPE" == "rpc" ]; then + migrate_rpc + fi +} + +main "$@"