From a542d6d9426bceefc809c90dfc97504f0739c2da Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 1 Sep 2023 01:20:41 +0100 Subject: [PATCH 01/28] WIP move to ruimarinho images + other changes --- .gitignore | 3 +- src/templates/Dockerfile | 66 ----------------------- src/templates/example.graphml | 36 +++++++------ src/templates/torrc | 5 +- src/templates/torrc.da | 5 ++ src/templates/warnet_entrypoint.sh | 13 +++++ src/utils/gen_dockerfiles.py | 39 ++++++++++++++ src/warnet/cli.py | 29 +++++----- src/warnet/client.py | 41 ++++++-------- src/warnet/tank.py | 64 +++++++++++++++------- src/warnet/utils.py | 43 ++++++++------- src/warnet/warnet.py | 85 +++++++++++++++++++++++------- src/warnet/warnetd.py | 63 +++++++++++++--------- 13 files changed, 285 insertions(+), 207 deletions(-) delete mode 100644 src/templates/Dockerfile create mode 100755 src/templates/warnet_entrypoint.sh create mode 100644 src/utils/gen_dockerfiles.py diff --git a/.gitignore b/.gitignore index 6133dc87a..ecc95d9c8 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,5 @@ __pycache__ .venv warnet.egg-info .python-version -.env \ No newline at end of file +.env +src/templates/Dockerfile* diff --git a/src/templates/Dockerfile b/src/templates/Dockerfile deleted file mode 100644 index 6cd2ac46e..000000000 --- a/src/templates/Dockerfile +++ /dev/null @@ -1,66 +0,0 @@ -FROM ubuntu:20.04 -ENV DEBIAN_FRONTEND=noninteractive - -ARG ARCH -ARG BITCOIN_URL -ARG BITCOIN_VERSION -ARG REPO -ARG BRANCH - -# Base requirements -RUN apt-get update && apt-get install -y \ - ccache \ - python3 \ - vim \ - build-essential \ - wget \ - tor \ - iproute2; \ - apt-get clean; - -# Download binary -RUN if [ -n "${BITCOIN_URL}" ]; then \ - wget "${BITCOIN_URL}"; \ - tar -xzf "bitcoin-${BITCOIN_VERSION}-${ARCH}-linux-gnu.tar.gz" -C /usr/local --strip-components=1; \ - fi; - -# Extra requirements to build from source -RUN if [ -n "${REPO}" ]; then \ - apt-get install -y \ - autotools-dev \ - libtool \ - automake \ - pkg-config \ - libboost-dev \ - libevent-dev \ - libdb5.3++-dev \ - libminiupnpc-dev \ - libnatpmp-dev \ - libzmq3-dev \ - libqrencode-dev \ - libsqlite3-dev \ - git; \ - apt-get clean; \ - fi; - -# Build from source -RUN if [ -n "${REPO}" ]; then \ - mkdir build; \ - cd /build; \ - git clone --depth 1 --branch "${BRANCH}" "https://github.com/${REPO}"; \ - cd /build/bitcoin; \ - ./autogen.sh; \ - ./configure \ - --disable-tests \ - --without-gui \ - --disable-bench \ - --disable-fuzz-binary \ - --enable-suppress-external-warnings; \ - make -j$(nproc); \ - make install; \ - fi; - - -# Start tor with user debian-tor and the Bitcoin Core daemon. -CMD gosu debian-tor tor & \ - /usr/local/bin/bitcoind --datadir=/root/.bitcoin --conf=/root/.bitcoin/bitcoin.conf diff --git a/src/templates/example.graphml b/src/templates/example.graphml index 417240e14..71169bcdd 100644 --- a/src/templates/example.graphml +++ b/src/templates/example.graphml @@ -2,54 +2,58 @@ - + - 24.0 + 22.0 uacomment=w0 - 24.0 + 22.0 uacomment=w1 - 24.0 + 22.0 uacomment=w2 - 24.0 + 22.0 uacomment=w3 - + - 25.0 + 23.0 uacomment=w4 - 25.0 + 23.0 uacomment=w5 - 25.0 + 23.0 uacomment=w6 - 25.0 + 23.0 uacomment=w7 - + - 24.1 + 22.0 uacomment=w8 - 24.1 + 22.0 uacomment=w9 - - vasild/bitcoin#relay_tx_to_priv_nets - sensitiverelayowntx=1,debugexclude=addrman,debug=sensitiverelay,debug=tor,debug=net,uacomment=sensitive_relay + 22.0 + uacomment=w10 + + + + + diff --git a/src/templates/torrc b/src/templates/torrc index 5fa8ea1ad..6360b3cda 100644 --- a/src/templates/torrc +++ b/src/templates/torrc @@ -1,8 +1,11 @@ # Common Log debug file /var/log/tor/debug.log +DataDirectory /home/debian-tor/.tor/ RunAsDaemon 1 ControlPort 9051 CookieAuthentication 1 +CookieAuthFileGroupReadable 1 +DataDirectoryGroupReadable 1 ORPort 9001 ExitPolicy accept *:* TestingTorNetwork 1 @@ -13,4 +16,4 @@ AssumeReachable 1 PathsNeededToBuildCircuits 0.25 TestingDirAuthVoteExit * TestingDirAuthVoteHSDir * -V3AuthNIntervalsValid 2 \ No newline at end of file +V3AuthNIntervalsValid 2 diff --git a/src/templates/torrc.da b/src/templates/torrc.da index 479c106c4..7d16148d5 100644 --- a/src/templates/torrc.da +++ b/src/templates/torrc.da @@ -1,11 +1,16 @@ # Common Log debug file /var/log/tor/debug.log +DataDirectory /home/debian-tor/.tor RunAsDaemon 1 ControlPort 9051 CookieAuthentication 1 +CookieAuthFileGroupReadable 1 +DataDirectoryGroupReadable 1 ORPort 9001 ExitPolicy accept *:* TestingTorNetwork 1 +# Nasty hardcode hack +Address 100.20.15.18 # Relay DirAuthority DAeesohphoox orport=9030 no-v2 v3ident=0303840D6B3AD1BDE9FC731EAA8387BD1939C29C 100.20.15.18:9030 6A68434F4CDC4664A8B129929CE4A4D39D4BC8B4 diff --git a/src/templates/warnet_entrypoint.sh b/src/templates/warnet_entrypoint.sh new file mode 100755 index 000000000..f3393d459 --- /dev/null +++ b/src/templates/warnet_entrypoint.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# Custom warnet entrypoint instructions, will be run before base image entrypoint.sh + +# bitcoin +usermod -a -G debian-tor bitcoin + +# tor +mkdir -p /home/debian-tor/.tor/keys +chown -R debian-tor:debian-tor /home/debian-tor +chown -R debian-tor:debian-tor /etc/tor +gosu debian-tor tor + +exec /entrypoint.sh bitcoind diff --git a/src/utils/gen_dockerfiles.py b/src/utils/gen_dockerfiles.py new file mode 100644 index 000000000..b5687ef16 --- /dev/null +++ b/src/utils/gen_dockerfiles.py @@ -0,0 +1,39 @@ +from templates import TEMPLATES + +# Tags +tags = [ + "23.0", + "22.0", + "0.21.1", + "0.20.1", + "0.19.1", + "0.18.1", + "0.17.1", + "0.16.3", + "0.15.1", +] + +base_url = "ruimarinho/bitcoin-core" + +dockerfile_template = """FROM {base_url}:{tag} + +RUN apt-get update && apt-get install -y --no-install-recommends \\ + python3 \\ + vim \\ + tor \\ + iproute2; \\ + apt-get clean; + +COPY tor-keys/* /home/debian-tor/.tor/keys/ +COPY warnet_entrypoint.sh /warnet_entrypoint.sh +""" + +for tag in tags: + dockerfile_content = dockerfile_template.format(base_url=base_url, tag=tag) + + with open(TEMPLATES / f"Dockerfile_{tag}", "w") as file: + file.write(dockerfile_content) + + print(f"generated Dockerfile for tag {tag}") + +print("done") diff --git a/src/warnet/cli.py b/src/warnet/cli.py index ac1680fcc..38cbed845 100644 --- a/src/warnet/cli.py +++ b/src/warnet/cli.py @@ -1,3 +1,4 @@ +import os import requests from typing_extensions import Annotated from typing import Optional, Any, Tuple, Dict, Union @@ -16,7 +17,6 @@ debug = typer.Typer() cli.add_typer(debug, name="debug", help="Various warnet debug commands") - def rpc(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]]]): payload = request(rpc_method, params) response = requests.post(f"http://localhost:{WARNETD_PORT}/api", json=payload) @@ -37,7 +37,7 @@ def bcli( network: str = "warnet", ): """ - Call bitcoin-cli on in + Call bitcoin-cli on in <--network> """ try: result = rpc( @@ -104,8 +104,7 @@ def run(scenario: str): @debug.command() def generate_compose(graph_file: str, network: str = "warnet"): """ - Generate the docker-compose file for a given graph_file and return it. - Does not start the network. + Generate the docker-compose file for a given and <--network> name and return it. """ try: result = rpc("generate_compose", {"graph_file": graph_file, "network": network}) @@ -116,7 +115,7 @@ def generate_compose(graph_file: str, network: str = "warnet"): @cli.command() def start(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): """ - Start a warnet with topology loaded from a into [network] (default: "warnet") + Start a warnet with topology loaded from a into <--network> (default: "warnet") """ try: result = rpc("from_file", {"graph_file": str(graph_file), "network": network}) @@ -126,36 +125,36 @@ def start(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): @cli.command() -def stop(network: str = "warnet"): +def up(network: str = "warnet"): """ - Stop all docker containers in [network] (default: "warnet"). + Run 'docker-compose up' on a warnet named <--network> (default: "warnet"). """ try: - result = rpc("stop", {"network": network}) + result = rpc("up", {"network": network}) print(result) except Exception as e: - print(f"As we endeavored to cease operations, adversity struck: {e}") + print(f"Error creating network: {e}") @cli.command() -def remove(network: str = "warnet"): +def down(network: str = "warnet"): """ - Stop and then erase all docker containers in [network] (default: "warnet"). + Run 'docker-compose down on a warnet named <--network> (default: "warnet"). """ try: - result = rpc("remove", {"network": network}) + result = rpc("down", {"network": network}) print(result) except Exception as e: - print(f"Error removing network: {e}") + print(f"As we endeavored to cease operations, adversity struck: {e}") @cli.command() -def stop_daemon(): +def stop(): """ Stop the warnetd daemon. """ try: - result = rpc("stop_daemon", None) + result = rpc("stop", None) print(result) except Exception as e: print(f"As we endeavored to cease operations, adversity struck: {e}") diff --git a/src/warnet/client.py b/src/warnet/client.py index a4173c6b8..5f66a5200 100644 --- a/src/warnet/client.py +++ b/src/warnet/client.py @@ -7,6 +7,7 @@ from warnet.utils import parse_raw_messages from warnet.tank import Tank +from warnet.warnet import Warnet logger = logging.getLogger("warnet.client") @@ -14,7 +15,7 @@ def get_bitcoin_debug_log(network: str, index: int) -> str: tank = Tank.from_docker_env(network, index) subdir = "/" if tank.bitcoin_network == "main" else f"{tank.bitcoin_network}/" - data, stat = tank.container.get_archive(f"/root/.bitcoin/{subdir}debug.log") + data, stat = tank.container.get_archive(f"/home/bitcoin/.bitcoin/{subdir}debug.log") out = "" for chunk in data: out += chunk.decode() @@ -27,9 +28,11 @@ def get_bitcoin_debug_log(network: str, index: int) -> str: def get_bitcoin_cli(network: str, index: int, method: str, params=None) -> str: tank = Tank.from_docker_env(network, index) - return tank.exec( - f"bitcoin-cli {method} {' '.join(map(str, params))}" - ).output.decode() + if params: + cmd = f"bitcoin-cli {method} {' '.join(map(str, params))}" + else: + cmd = f"bitcoin-cli {method}" + return tank.exec(cmd=cmd, user="bitcoin") def get_messages(network: str, src_index: int, dst_index: int) -> List[Optional[str]]: @@ -42,14 +45,14 @@ def get_messages(network: str, src_index: int, dst_index: int) -> List[Optional[ subdir = ( "/" if src_node.bitcoin_network == "main" else f"{src_node.bitcoin_network}/" ) - exit_code, dirs = src_node.exec(f"ls /root/.bitcoin/{subdir}message_capture") + exit_code, dirs = src_node.exec(f"ls /home/bitcoin/.bitcoin/{subdir}message_capture") dirs = dirs.decode().splitlines() messages = [] for dir_name in dirs: if dst_ip in dir_name: for file, outbound in [["msgs_recv.dat", False], ["msgs_sent.dat", True]]: data, stat = src_node.container.get_archive( - f"/root/.bitcoin/{subdir}message_capture/{dir_name}/{file}" + f"/home/bitcoin/.bitcoin/{subdir}message_capture/{dir_name}/{file}" ) blob = b"" for chunk in data: @@ -84,24 +87,10 @@ def thread_stop(): threading.Thread(target=thread_stop).start() return True - -def remove_container(c): - logger.warning(f"removing container: {c.name}") - c.remove() - -def remove_network(network_name="warnet") -> bool: - def thread_remove_network(): - d = docker.from_env() - network = d.networks.get(network_name) - containers = network.containers - - with concurrent.futures.ThreadPoolExecutor() as executor: - executor.map(stop_container, containers) - - # Use a second executor to ensure all stops complete before removes - with concurrent.futures.ThreadPoolExecutor() as executor: - executor.map(remove_container, containers) - - threading.Thread(target=thread_remove_network).start() +def compose_down(network="warnet") -> bool: + """ + Run docker-compose down on a warnet + """ + wn = Warnet.from_network(network=network) + wn.docker_compose_down() return True - diff --git a/src/warnet/tank.py b/src/warnet/tank.py index 9f075a6f7..446b450a0 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -4,13 +4,17 @@ import docker import logging +import shutil from copy import deepcopy +from pathlib import Path +from docker.models.containers import Container from templates import TEMPLATES from warnet.utils import ( - get_architecture, + exponential_backoff, generate_ipv4_addr, sanitize_tc_netem_command, dump_bitcoin_conf, + SUPPORTED_TAGS, ) CONTAINER_PREFIX_BITCOIND = "tank" @@ -27,6 +31,7 @@ def __init__(self): self.version = "25.0" self.conf = "" self.conf_file = None + self.torrc_file = None self.netem = None self.rpc_port = 18443 self.rpc_user = "warnet_user" @@ -36,6 +41,7 @@ def __init__(self): self._ipv4 = None self._bitcoind_name = None self._exporter_name = None + self.config_dir = Path() def __str__(self) -> str: return (f"Tank(\n" @@ -58,11 +64,17 @@ def from_graph_node(cls, index, warnet): self.index = int(index) node = warnet.graph.nodes[index] if "version" in node: + if not "/" and "#" in self.version: + if node["version"] not in SUPPORTED_TAGS: + raise Exception(f"Unsupported version: can't be generated from Docker images: {node['version']}") self.version = node["version"] if "bitcoin_config" in node: self.conf = node["bitcoin_config"] if "tc_netem" in node: self.netem = node["tc_netem"] + self.config_dir = self.warnet.config_dir / str(self.suffix) + self.config_dir.mkdir(parents=True, exist_ok=True) + self.write_torrc() return self @classmethod @@ -103,13 +115,21 @@ def exporter_name(self): return self._exporter_name @property - def container(self): + def container(self) -> Container: + # logger.debug(f"Containers in environment: {[container.name for container in docker.from_env().containers.list()]}") + # logger.debug(f"bitcoind_name = {self.bitcoind_name}") if self._container is None: + # logger.debug(f"self._container for {self.bitcoind_name} is None") self._container = docker.from_env().containers.get(self.bitcoind_name) + # logger.debug(f"After self._container for {self.bitcoind_name} is {self._container}") return self._container - def exec(self, cmd): - return self.container.exec_run(cmd) + @exponential_backoff() + def exec(self, cmd: str, user: str = "root"): + result = self.container.exec_run(cmd=cmd, user=user) + if result.exit_code != 0: + raise Exception(f"Command failed with exit code {result.exit_code}: {result.output.decode('utf-8')}") + return result.output.decode('utf-8') def apply_network_conditions(self): if self.netem is None: @@ -149,12 +169,20 @@ def write_bitcoin_conf(self, base_bitcoin_conf): conf[self.bitcoin_network].append(("rpcport", self.rpc_port)) conf_file = dump_bitcoin_conf(conf) - path = self.warnet.tmpdir / f"bitcoin.conf.{self.suffix}" + path = self.config_dir / f"bitcoin.conf" logger.info(f"Wrote file {path}") with open(path, "w") as file: file.write(conf_file) self.conf_file = path + def write_torrc(self): + tor_node_type = 'torrc' if self.index != 0 else 'torrc.da' + src_tor_conf_file = TEMPLATES / tor_node_type + + dest_path = self.config_dir / "torrc" + shutil.copyfile(src_tor_conf_file, dest_path) + self.torrc_file = dest_path + def add_services(self, services): assert self.index is not None assert self.conf_file is not None @@ -162,10 +190,11 @@ def add_services(self, services): # Setup bitcoind, either release binary or build from source if "/" and "#" in self.version: # it's a git branch, building step is necessary + # TODO: broken repo, branch = self.version.split("#") build = { - "context": ".", - "dockerfile": str(TEMPLATES / "Dockerfile"), + "context": str(TEMPLATES), + "dockerfile": str(TEMPLATES / "Dockerfile_custom_build"), "args": { "REPO": repo, "BRANCH": branch, @@ -173,35 +202,30 @@ def add_services(self, services): } else: # assume it's a release version, get the binary - arch = get_architecture() build = { - "context": ".", - "dockerfile": str(TEMPLATES / "Dockerfile"), - "args": { - "ARCH": arch, - "BITCOIN_VERSION": self.version, - "BITCOIN_URL": f"https://bitcoincore.org/bin/bitcoin-core-{self.version}/bitcoin-{self.version}-{arch}-linux-gnu.tar.gz", - }, + "context": str(TEMPLATES), + "dockerfile": str(TEMPLATES / f"Dockerfile_{self.version}"), } # Add the bitcoind service services[self.bitcoind_name] = { "container_name": self.bitcoind_name, "build": build, + "entrypoint": "/warnet_entrypoint.sh", "volumes": [ - f"{self.conf_file}:/root/.bitcoin/bitcoin.conf", - f"{TEMPLATES / ('torrc' if self.index != 0 else 'torrc.da')}:/etc/tor/torrc" + f"{self.conf_file}:/home/bitcoin/.bitcoin/bitcoin.conf", + f"{self.torrc_file}:/etc/tor/torrc", ], "networks": { self.docker_network: { "ipv4_address": f"{self.ipv4}", } }, + "labels": { + "warnet": "tank" + }, "privileged": True, } - if self.index == 0: - services[self.bitcoind_name]["volumes"].append( - f"{TEMPLATES / 'tor-keys'}:/root/.tor/keys") # Add the prometheus data exporter in a neighboring container services[self.exporter_name] = { diff --git a/src/warnet/utils.py b/src/warnet/utils.py index d515a77f0..e746be343 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -4,16 +4,28 @@ import os import random import re -import subprocess import sys import time from io import BytesIO +from pathlib import Path from test_framework.p2p import MESSAGEMAP from test_framework.messages import ser_uint256 logger = logging.getLogger("utils") +SUPPORTED_TAGS = [ + "23.0", + "22.0", + "0.21.1", + "0.20.1", + "0.19.1", + "0.18.1", + "0.17.1", + "0.16.3", + "0.15.1", +] + def exponential_backoff(max_retries=5, base_delay=1, max_delay=32): """ @@ -33,12 +45,13 @@ def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: - logger.error(f"rpc error:\n\t{e}") + error_msg = str(e).replace('\n', ' ').replace('\t', ' ') + logger.error(f"rpc error: {error_msg}") retries += 1 if retries == max_retries: raise e delay = min(base_delay * (2**retries), max_delay) - logger.warning(f"retry in {delay} seconds...") + logger.warning(f"exponential_backoff: retry in {delay} seconds...") time.sleep(delay) return wrapper @@ -46,21 +59,6 @@ def wrapper(*args, **kwargs): return decorator -def get_architecture(): - """ - Get the architecture of the machine. - - :return: The architecture of the machine or None if an error occurred - """ - result = subprocess.run(["uname", "-m"], stdout=subprocess.PIPE) - arch = result.stdout.decode("utf-8").strip() - if arch == "arm64": - arch = "aarch64" - if arch is None: - raise Exception("Failed to detect architecture.") - return arch - - def generate_ipv4_addr(subnet): """ Generate a valid random IPv4 address within the given subnet. @@ -318,3 +316,12 @@ def parse_raw_messages(blob, outbound): msg_dict["body"] = to_jsonable(msg) messages.append(msg_dict) return messages + + +def gen_config_dir(network: str) -> Path: + """ + Determine a config dir based on network name + """ + xdg_config = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")) + config_dir = Path(xdg_config) / "warnet" / network + return config_dir diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index ffdec4815..8961ed5bc 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -5,34 +5,37 @@ import docker import logging import networkx +import shutil import subprocess import yaml from pathlib import Path -from tempfile import mkdtemp from templates import TEMPLATES from typing import List from warnet.tank import Tank -from warnet.utils import parse_bitcoin_conf +from warnet.utils import parse_bitcoin_conf, gen_config_dir logger = logging.getLogger("Warnet") TMPDIR_PREFIX = "warnet_tmp_" +logging.getLogger("docker.utils.config").setLevel(logging.WARNING) +logging.getLogger("docker.auth").setLevel(logging.WARNING) class Warnet: - def __init__(self): - self.tmpdir: Path = Path(mkdtemp(prefix=TMPDIR_PREFIX)) + + def __init__(self, config_dir): + self.config_dir: Path = config_dir self.docker = docker.from_env() self.bitcoin_network:str = "regtest" self.docker_network:str = "warnet" self.subnet: str = "100.0.0.0/8" self.graph = None + self.graph_name = "graph.graphml" self.tanks: List[Tank] = [] - logger.info(f"Created Warnet with temp directory {self.tmpdir}") def __str__(self) -> str: tanks_str = ',\n'.join([str(tank) for tank in self.tanks]) return (f"Warnet(\n" - f"\tTemp Directory: {self.tmpdir}\n" + f"\tTemp Directory: {self.config_dir}\n" f"\tBitcoin Network: {self.bitcoin_network}\n" f"\tDocker Network: {self.docker_network}\n" f"\tSubnet: {self.subnet}\n" @@ -42,23 +45,36 @@ def __str__(self) -> str: f")") @classmethod - def from_graph_file(cls, graph_file: str, network: str = "warnet"): - self = cls() + def from_graph_file(cls, graph_file: str, config_dir: Path, network: str = "warnet"): + self = cls(config_dir) + destination = self.config_dir / self.graph_name + destination.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(graph_file, destination) self.docker_network = network self.graph = networkx.read_graphml(graph_file, node_type=int) self.tanks_from_graph() + logger.info(f"Created Warnet with temp directory {self.config_dir}") return self @classmethod def from_graph(cls, graph): - self = cls() + self = cls(Path()) self.graph = graph self.tanks_from_graph() + logger.info(f"Created Warnet with temp directory {self.config_dir}") + return self + + @classmethod + def from_network(cls, config_dir: Path = Path(), network: str = "warnet"): + self = cls(config_dir) + self.config_dir = gen_config_dir(network) + self.graph = networkx.read_graphml(Path(self.config_dir / self.graph_name), node_type=int) + self.tanks_from_graph() return self @classmethod - def from_docker_env(cls, network_name): - self = cls() + def from_docker_env(cls, config_dir, network_name): + self = cls(config_dir) self.docker_network = network_name index = 0 while index <= 999999: @@ -96,14 +112,47 @@ def connect_edges(self): src_tank = self.tanks[int(src)] dst_ip = self.tanks[dst].ipv4 logger.info(f"Using `addnode` to connect tanks {src} to {dst}") - src_tank.exec(f"bitcoin-cli addpeeraddress {dst_ip} 18444") + cmd = f"bitcoin-cli addpeeraddress {dst_ip} 18444" + src_tank.exec(cmd=cmd, user="bitcoin") + + def docker_compose_build_up(self): + command = ["docker-compose", "-p", self.docker_network, "up", "-d", "--build"] + try: + with subprocess.Popen( + command, + cwd=str(self.config_dir), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) as process: + for line in process.stdout: + logger.info(line.decode().rstrip()) + except Exception as e: + logger.error( + f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" + ) def docker_compose_up(self): - command = ["docker-compose", "-p", "warnet", "up", "-d", "--build"] + command = ["docker-compose", "-p", self.docker_network, "up", "-d"] + try: + with subprocess.Popen( + command, + cwd=str(self.config_dir), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) as process: + for line in process.stdout: + logger.info(line.decode().rstrip()) + except Exception as e: + logger.error( + f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" + ) + + def docker_compose_down(self): + command = ["docker-compose", "down"] try: with subprocess.Popen( command, - cwd=str(self.tmpdir), + cwd=str(self.config_dir), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) as process: @@ -111,7 +160,7 @@ def docker_compose_up(self): logger.info(line.decode().rstrip()) except Exception as e: logger.error( - f"An error occurred while executing `{' '.join(command)}` in {self.tmpdir}: {e}" + f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" ) def write_docker_compose(self): @@ -137,7 +186,7 @@ def write_docker_compose(self): "container_name": "prometheus", "ports": ["9090:9090"], "volumes": [ - f"{self.tmpdir / 'prometheus.yml'}:/etc/prometheus/prometheus.yml" + f"{self.config_dir / 'prometheus.yml'}:/etc/prometheus/prometheus.yml" ], "command": ["--config.file=/etc/prometheus/prometheus.yml"], "networks": [self.docker_network], @@ -157,7 +206,7 @@ def write_docker_compose(self): "networks": [self.docker_network], } - docker_compose_path = self.tmpdir / "docker-compose.yml" + docker_compose_path = self.config_dir / "docker-compose.yml" try: with open(docker_compose_path, "w") as file: yaml.dump(compose, file) @@ -192,7 +241,7 @@ def write_prometheus_config(self): for tank in self.tanks: tank.add_scrapers(config["scrape_configs"]) - prometheus_path = self.tmpdir / "prometheus.yml" + prometheus_path = self.config_dir / "prometheus.yml" try: with open(prometheus_path, "w") as file: yaml.dump(config, file) diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index add63fdcf..f0a8e38d8 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -18,9 +18,9 @@ get_bitcoin_cli, get_bitcoin_debug_log, get_messages, - stop_network, - remove_network, + compose_down, ) +from warnet.utils import gen_config_dir WARNETD_PORT = 9276 continue_running = True @@ -54,13 +54,13 @@ @jsonrpc.method("bcli") -def bcli(node: int, method: str, params: list[str] = [], network: str = "warnet"): +def bcli(node: int, method: str, params: list[str] = [], network: str = "warnet") -> str: """ Call bitcoin-cli on in [network] """ try: result = get_bitcoin_cli(network, node, method, params) - return result + return str(result) except Exception as e: raise Exception(f"{e}") @@ -133,19 +133,39 @@ def run(scenario: str, network: str = "warnet") -> str: return f"Exception {e}" +@jsonrpc.method("up") +def up(network: str = "warnet") -> str: + config_dir = gen_config_dir(network) + wn = Warnet.from_network(config_dir, network) + def thread_start(wn): + try: + wn.docker_compose_up() + wn.apply_network_conditions() + wn.connect_edges() + logger.info(f"Resumed warnet named '{network}' from config dir {wn.config_dir}") + except Exception as e: + logger.error(f"Exception {e}") + + threading.Thread(target=lambda: thread_start(wn)).start() + return f"Resuming warnet..." + + @jsonrpc.method() def from_file(graph_file: str, network: str = "warnet") -> str: """ Run a warnet with topology loaded from a """ - wn = Warnet.from_graph_file(graph_file, network) + config_dir = gen_config_dir(network) + if config_dir.exists(): + return f"Config dir {config_dir} already exists, not overwriting existing warnet" + wn = Warnet.from_graph_file(graph_file, config_dir, network) def thread_start(wn): try: wn.write_bitcoin_confs() wn.write_docker_compose() wn.write_prometheus_config() - wn.docker_compose_up() + wn.docker_compose_build_up() wn.apply_network_conditions() wn.connect_edges() logger.info(f"Created warnet named '{network}' from graph file {graph_file}") @@ -161,42 +181,33 @@ def generate_compose(graph_file: str, network: str = "warnet") -> str: """ Generate the docker compose file for a graph file and return import """ - wn = Warnet.from_graph_file(graph_file, network) + config_dir = gen_config_dir(network) + if config_dir.exists(): + return f"Config dir {config_dir} already exists, not overwriting existing warnet" + wn = Warnet.from_graph_file(graph_file, config_dir, network) wn.write_bitcoin_confs() wn.write_docker_compose() - docker_compose_path = wn.tmpdir / "docker-compose.yml" + docker_compose_path = wn.config_dir / "docker-compose.yml" with open(docker_compose_path, "r") as f: return f.read() -@jsonrpc.method("stop") -def stop(network: str = "warnet") -> str: +@jsonrpc.method("down") +def down(network: str = "warnet") -> str: """ Stop all docker containers in . """ try: - _ = stop_network(network) + _ = compose_down(network) return "Stopping warnet" except Exception as e: return f"Exception {e}" -@jsonrpc.method("remove") -def remove(network: str = "warnet") -> str: - """ - Stop and then erase all docker containers in . - """ - try: - remove_network(network) - return "Stopping and wiping warnet" - except Exception as e: - return f"Exception {e}" - - -@jsonrpc.method("stop_daemon") -def stop_daemon() -> str: +@jsonrpc.method("stop") +def stop() -> str: """ - Stop the daemon. + Stop warnetd. """ os.kill(os.getppid(), signal.SIGTERM) return "Stopping daemon..." From 03ef58a5090d7e069239747be0d3b2f78b1a3511 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 1 Sep 2023 14:55:37 +0100 Subject: [PATCH 02/28] fixups to scenarios and custom builds --- src/scenarios/tx_flood.py | 1 - src/templates/example.graphml | 14 +++++++------- src/utils/gen_dockerfiles.py | 16 ++-------------- src/warnet/tank.py | 13 ++++++------- src/warnet/test_framework_bridge.py | 4 +++- src/warnet/warnet.py | 5 +++-- src/warnet/warnetd.py | 15 +++++++++------ 7 files changed, 30 insertions(+), 38 deletions(-) diff --git a/src/scenarios/tx_flood.py b/src/scenarios/tx_flood.py index 91a5da8f7..224e099e2 100755 --- a/src/scenarios/tx_flood.py +++ b/src/scenarios/tx_flood.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - from warnet.test_framework_bridge import WarnetTestFramework from scenarios.utils import ensure_miner diff --git a/src/templates/example.graphml b/src/templates/example.graphml index 71169bcdd..c43ef86f6 100644 --- a/src/templates/example.graphml +++ b/src/templates/example.graphml @@ -45,15 +45,15 @@ 22.0 uacomment=w9 - - 22.0 - uacomment=w10 - - - - + + + + + vasild/bitcoin#relay_tx_to_priv_nets + sensitiverelayowntx=1,debugexclude=addrman,debug=sensitiverelay,debug=tor,debug=net,uacomment=sensitive_relay + diff --git a/src/utils/gen_dockerfiles.py b/src/utils/gen_dockerfiles.py index b5687ef16..c73a2d6fd 100644 --- a/src/utils/gen_dockerfiles.py +++ b/src/utils/gen_dockerfiles.py @@ -1,17 +1,5 @@ from templates import TEMPLATES - -# Tags -tags = [ - "23.0", - "22.0", - "0.21.1", - "0.20.1", - "0.19.1", - "0.18.1", - "0.17.1", - "0.16.3", - "0.15.1", -] +from warnet.utils import SUPPORTED_TAGS base_url = "ruimarinho/bitcoin-core" @@ -28,7 +16,7 @@ COPY warnet_entrypoint.sh /warnet_entrypoint.sh """ -for tag in tags: +for tag in SUPPORTED_TAGS: dockerfile_content = dockerfile_template.format(base_url=base_url, tag=tag) with open(TEMPLATES / f"Dockerfile_{tag}", "w") as file: diff --git a/src/warnet/tank.py b/src/warnet/tank.py index 446b450a0..f5c468bbb 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -7,6 +7,7 @@ import shutil from copy import deepcopy from pathlib import Path +from docker.api import service from docker.models.containers import Container from templates import TEMPLATES from warnet.utils import ( @@ -116,12 +117,8 @@ def exporter_name(self): @property def container(self) -> Container: - # logger.debug(f"Containers in environment: {[container.name for container in docker.from_env().containers.list()]}") - # logger.debug(f"bitcoind_name = {self.bitcoind_name}") if self._container is None: - # logger.debug(f"self._container for {self.bitcoind_name} is None") self._container = docker.from_env().containers.get(self.bitcoind_name) - # logger.debug(f"After self._container for {self.bitcoind_name} is {self._container}") return self._container @exponential_backoff() @@ -186,6 +183,7 @@ def write_torrc(self): def add_services(self, services): assert self.index is not None assert self.conf_file is not None + services[self.bitcoind_name] = {} # Setup bitcoind, either release binary or build from source if "/" and "#" in self.version: @@ -206,12 +204,13 @@ def add_services(self, services): "context": str(TEMPLATES), "dockerfile": str(TEMPLATES / f"Dockerfile_{self.version}"), } + # Use entrypoint for derived build, but not for compiled build + services[self.bitcoind_name].update({"entrypoint": "/warnet_entrypoint.sh"}) # Add the bitcoind service - services[self.bitcoind_name] = { + services[self.bitcoind_name].update({ "container_name": self.bitcoind_name, "build": build, - "entrypoint": "/warnet_entrypoint.sh", "volumes": [ f"{self.conf_file}:/home/bitcoin/.bitcoin/bitcoin.conf", f"{self.torrc_file}:/etc/tor/torrc", @@ -225,7 +224,7 @@ def add_services(self, services): "warnet": "tank" }, "privileged": True, - } + }) # Add the prometheus data exporter in a neighboring container services[self.exporter_name] = { diff --git a/src/warnet/test_framework_bridge.py b/src/warnet/test_framework_bridge.py index 533eb4d20..82e60159d 100644 --- a/src/warnet/test_framework_bridge.py +++ b/src/warnet/test_framework_bridge.py @@ -33,7 +33,7 @@ def run_test(self): # the original methods from BitcoinTestFramework def setup(self): - warnet = Warnet.from_docker_env("warnet") + warnet = Warnet.from_docker_env(self.options.network) for i, tank in enumerate(warnet.tanks): ip = tank.ipv4 logger.info(f"Adding TestNode {i} from {tank.bitcoind_name} with IP {ip}") @@ -157,6 +157,8 @@ def parse_args(self): help="set a random seed for deterministically reproducing a previous test run") parser.add_argument("--timeout-factor", dest="timeout_factor", default=1, help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts") + parser.add_argument("--network", dest="network", default="warnet", + help="Designate which warnet this should run on (default: warnet)") self.add_options(parser) # Running TestShell in a Jupyter notebook causes an additional -f argument diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index 8961ed5bc..4269e8e23 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -73,7 +73,8 @@ def from_network(cls, config_dir: Path = Path(), network: str = "warnet"): return self @classmethod - def from_docker_env(cls, config_dir, network_name): + def from_docker_env(cls, network_name): + config_dir = gen_config_dir(network_name) self = cls(config_dir) self.docker_network = network_name index = 0 @@ -111,7 +112,7 @@ def connect_edges(self): (src, dst) = edge src_tank = self.tanks[int(src)] dst_ip = self.tanks[dst].ipv4 - logger.info(f"Using `addnode` to connect tanks {src} to {dst}") + logger.info(f"Using `addpeeraddress` to connect tanks {src} to {dst}") cmd = f"bitcoin-cli addpeeraddress {dst_ip} 18444" src_tank.exec(cmd=cmd, user="bitcoin") diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index f0a8e38d8..e95a43626 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -120,16 +120,19 @@ def run(scenario: str, network: str = "warnet") -> str: """ Run from the Warnet Test Framework """ + base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + scenario_path = os.path.join(base_dir, "scenarios", f"{scenario}.py") + + if not os.path.exists(scenario_path): + return f"Scenario {scenario} not found at {scenario_path}." + try: - # TODO: should handle network argument - dir_path = os.path.dirname(os.path.realpath(__file__)) - mod_path = os.path.join(dir_path, "..", "scenarios", f"{sys.argv[2]}.py") - run_cmd = [sys.executable, mod_path] + sys.argv[3:] + run_cmd = [sys.executable, scenario_path] + [f"--network={network}"] + logger.debug(f"Running {run_cmd}") subprocess.Popen(run_cmd, shell=False) - # TODO: We could here use python-prctl to give the background process - # a name prefixed with "warnet"? Might only work on linux... return f"Running scenario {scenario} in the background..." except Exception as e: + logger.error(f"Exception occurred while running the scenario: {e}") return f"Exception {e}" From ad180b7d0f12d65caad1133d29c292c663a4a3b3 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 1 Sep 2023 21:02:30 +0100 Subject: [PATCH 03/28] dedicated Tor DA container --- src/templates/Dockerfile_tor_da | 19 +++++ src/templates/example.graphml | 14 ++-- src/templates/tor-entrypoint.sh | 0 src/templates/tor-keys/authority_certificate | 66 +++++++-------- src/templates/tor-keys/authority_identity_key | 78 +++++++++--------- src/templates/tor-keys/authority_signing_key | 50 +++++------ .../tor-keys/ed25519_master_id_public_key | Bin 64 -> 0 bytes .../tor-keys/ed25519_master_id_secret_key | Bin 96 -> 0 bytes src/templates/tor-keys/ed25519_signing_cert | Bin 172 -> 0 bytes .../tor-keys/ed25519_signing_secret_key | Bin 96 -> 0 bytes src/templates/tor-keys/secret_id_key | 15 ---- src/templates/tor-keys/secret_onion_key | 15 ---- src/templates/tor-keys/secret_onion_key_ntor | Bin 96 -> 0 bytes src/templates/torrc | 2 +- src/templates/torrc.da | 6 +- src/templates/warnet_entrypoint.sh | 2 +- src/warnet/tank.py | 35 ++++---- src/warnet/warnet.py | 27 ++++-- src/warnet/warnetd.py | 5 +- 19 files changed, 168 insertions(+), 166 deletions(-) create mode 100644 src/templates/Dockerfile_tor_da create mode 100644 src/templates/tor-entrypoint.sh delete mode 100644 src/templates/tor-keys/ed25519_master_id_public_key delete mode 100644 src/templates/tor-keys/ed25519_master_id_secret_key delete mode 100644 src/templates/tor-keys/ed25519_signing_cert delete mode 100644 src/templates/tor-keys/ed25519_signing_secret_key delete mode 100644 src/templates/tor-keys/secret_id_key delete mode 100644 src/templates/tor-keys/secret_onion_key delete mode 100644 src/templates/tor-keys/secret_onion_key_ntor diff --git a/src/templates/Dockerfile_tor_da b/src/templates/Dockerfile_tor_da new file mode 100644 index 000000000..e07497642 --- /dev/null +++ b/src/templates/Dockerfile_tor_da @@ -0,0 +1,19 @@ +FROM alpine:latest + +RUN apk add --no-cache tor + +RUN addgroup -S debian-tor && adduser -S debian-tor -G debian-tor + +RUN mkdir -p /home/debian-tor/.tor/keys +RUN chown -R debian-tor:debian-tor /home/debian-tor +RUN mkdir -p /var/log/tor +RUN chown -R debian-tor:debian-tor /var/log/tor + +COPY tor-keys /home/debian-tor/.tor/keys +RUN chown -R debian-tor:debian-tor /home/debian-tor/.tor/keys +COPY torrc.da /etc/tor/torrc + +EXPOSE 9050 + +USER debian-tor +CMD ["tor", "-f", "/etc/tor/torrc"] diff --git a/src/templates/example.graphml b/src/templates/example.graphml index c43ef86f6..71169bcdd 100644 --- a/src/templates/example.graphml +++ b/src/templates/example.graphml @@ -45,15 +45,15 @@ 22.0 uacomment=w9 - - - - - - vasild/bitcoin#relay_tx_to_priv_nets - sensitiverelayowntx=1,debugexclude=addrman,debug=sensitiverelay,debug=tor,debug=net,uacomment=sensitive_relay + 22.0 + uacomment=w10 + + + + + diff --git a/src/templates/tor-entrypoint.sh b/src/templates/tor-entrypoint.sh new file mode 100644 index 000000000..e69de29bb diff --git a/src/templates/tor-keys/authority_certificate b/src/templates/tor-keys/authority_certificate index d94c6b8f6..5106d9ab2 100644 --- a/src/templates/tor-keys/authority_certificate +++ b/src/templates/tor-keys/authority_certificate @@ -1,46 +1,46 @@ dir-key-certificate-version 3 -dir-address 172.18.0.3:9030 -fingerprint 0303840D6B3AD1BDE9FC731EAA8387BD1939C29C -dir-key-published 2023-08-25 19:02:32 -dir-key-expires 2024-08-25 19:02:32 +dir-address 100.20.15.18:9030 +fingerprint 6892AEA34F17542F4E85E77AF2DF6C9F6C108798 +dir-key-published 2023-09-01 20:09:16 +dir-key-expires 2024-09-01 20:09:16 dir-identity-key -----BEGIN RSA PUBLIC KEY----- -MIIBigKCAYEA3jGqtZFeyIBSUKJzmh8dxP9wvR5HHxUVFmGUhWiJ7Ur36kAniNRZ -Kokrm3PMr8ptv4u7YpAWlMsNdikGsROeTLQaJaYOsiqSvP7lCQkVMQmqznmAnoHF -MIe68G26M5UhMP6r8N3BU3Y138AZcuQfr5NJ3BaZg0fvqlWBbLhUzwUH9++5L8RG -QILw8by8lmaRjd1vsnRCRo4j3vULk1C5Po4B2fCHN3L6I9ikK+jmEM/Iz82aeUWR -r6SDX+M1FkyxYeab2pqXL+E7VsvmfEhmZVebMS3C84/OOYG16rUqXTFpVFRoPvNm -jWajvU1ELSbLqYqt+ktstrLKF0ghJQheBH5xW/agAC5KEpjCrhDzym9309pahxhz -W+v4tlzFDWqiguxB9HgCH0cBoQTB8VFun781vzSUPwK0F4Q3vTu8wVsmhV6oi38S -OJNx02EG19cdx4SWEhDaeeOLOjKXa7NexqahiucoQrjFrcrBqjV0ghuR1H639jZT -wTVRc62AkzHxAgMBAAE= +MIIBigKCAYEAo9VyTKk227UQvVmdw4zSlov1UAPL7psRwmZ+kxa14ngClyX0y1K4 +Txj8/5hiTsQ54UOOe4ORLMX5OBOfG3Eg4JhsneFhJdu+jJznZZWA2A343SOgNxF9 +VbC3dshWjZUbYfQvAq7XDhWsFeLnIe/K3aGihUDtqk5IEF/dVQHG9lEQoJfYSmcm +0kylHVkF8lwE4x6/Y4mq14ECq7MHkKrTiGbZHfAAbm2N0T18zJxcJ9lS2MZU3A8+ +Nyfzady8VCqiNzlIJRQUojviGD0FdATAlOox9PepYHAW22V+D9YvSM5EtFk/22vM +hwnH9Q0a5FiZYWp2ubiZo+cNfrT6ccb9OSZAfju8Za/GCfuQhKYTFlMfk2laX34V +AUDdq6OSyn0c9eyGjHPRHi/bikEXSV8GK6IceM/aOKxndB5iVxeArQsUzvTR9CeR +633bPczmDeOJhStYE4CdWtpdczZct3gXKLP1sLLcOvvriZlz7Pyle5JL9Mlb6z1i +MPp/hqANGfMhAgMBAAE= -----END RSA PUBLIC KEY----- dir-signing-key -----BEGIN RSA PUBLIC KEY----- -MIIBCgKCAQEA7lYqoruSLa5+1N/47ojihMsWKCORqttlgHPc0QxSRbcxKfoeCZ2C -8FG+CSMy3c9k5GPBZ+pLrtBXFiEXJslyAkYysWYCTkI5bEpDmR1CzWVpGlciF2oC -ZH2x357KN/RlH22+T0tE2nnrGmwPulMoH72HTfB91a9dRm91+tk9uANKrGzidL4U -qsJK7r8IXzBR9RY8k3Ro6FJdnM1VbUAdEAKBwIfOxo0RJVcsZ9e+RdyOPERMP2dt -a+kYnv+ovY/xQZrP61GfqkqgNjEIs9xL7KsZzKerXn+2/cpxfzfcnawrgMd3EyCU -vYDMEHfSAFYPV2Xg/p0rZ5wDDFPwWHMOAQIDAQAB +MIIBCgKCAQEAmCH9KN37GbmztOVJNNaFSjt46uWUEGTap0GaewCJVhjuDTHvU6TA +bzSZadWhczYwulNnYGVBJD1J4qdA+ImrzxAgyIyuDmuJADWqlosCA46LK+5H2Uri +wTsJAxXDqmE0boWca1vXcF5Gwz07hjtXHMrbGZxMjqx6YT89jRfXob8AFXPknSpB ++BG3OaziNLf0MbMlpGlQEHHBpxj9O4myWMhMH+zrMY2MR6y8i1tEEiTqysqQIM0b +kUxIeHkoMUNxR6J1ucS4wmko9b1MVsjW8sEPCDAWDtm1OVOcdqvnjq7MEUHinynT +3ENZCNU5dB8c8pQ+ie2SouM5jF7vUyzSIQIDAQAB -----END RSA PUBLIC KEY----- dir-key-crosscert -----BEGIN ID SIGNATURE----- -s9Cy2FEH59iPYCLT5Ls59KP7BGW18tEI7seyM3Ps9pp4W8tv6XemIWr9Hffh2a2n -uP1o0jkbmOAWb48kWx11NqJDK8J/Qankfd1NXQoNJHZPFaVFj6yACdLmYvFv2Jak -JicW3PlSMx8kRrCt3gaZTpbcdV6pxVVak0yE99aJ1x1Ty83hBun7CsRI3sAbJVSb -ly05DcwNbnvMNDXOZJrGJeHgcfRR384usT67PjPpKJQ6R8vtTOi4WBh9DURsNVWJ -GEtz9mkSersA5IFlg05b47Ggvq7Yw1vlud1BLC6q3pGCkBJ6CcrouRvmQbfV/vDD -Mm21NTV29/qKwXvFBL50Aw== +focD/7ogTIuymfG1I7CPrNB5bXmeKqReXOuW5dAoPTU85feyp9J25SfoBFHs3y8N +o47Igf2mPmSrM+fV23xbFwp6GbBwQXkpIH4RSks3+mEBUSaqjvzf+C/Yx3Bf7TDO +zBpbmS7xOjA9eX2F1h2aQJGHgp2i5fBFzCiWRvTd6LLKqdwdbXTjUEPel6XyPfiQ +Io0VcSQr8qTTU+xsvLN4GxnANqnUyNcV75QOb0RPLpUy7qa53en6LnkqsruvXGd6 +RMF5AMK9e001gquMM6xBNwmU922MbQkYgbB4ljC6gCziP0ueYXTlThA68N9Gkncz +9xRSLZLHhBAO/veK3Xo83Q== -----END ID SIGNATURE----- dir-key-certification -----BEGIN SIGNATURE----- -xdnab0RdhKJWnRJHVF1OHSFnGG4KDgc5nae34TKlxpUuKOmST0XlJRFWcPib8zOV -uaqTLeVuyCuil8kXw5iHwJUzdr3x9kwcFGFmA/JeUV7ANDhvsU6wxMCx7iHHkIBe -f89ArUQdej1dPQFD4igElHVA3my7w0o67ZOcEYCW4sthOaS+0m8Cv5i1wL1p11K8 -eB20JtbWO1u60FI3Q01vO79P30D2mTgLSiNuDKaxvWFFRziCLvDMZLHCAtHlOkVR -HWQpAL6TKs8k2XIqUAuPWBhzR8ZtZB+FOQpHZ+Ol2ccB1jPOpg2LdHe8sWuNFeeZ -y6CEDh8xfw5yuWFOYv3+TH+pPdzZm6InWe+C7fmmvfOc+XV0kpy6Tn6mnOdgqZc0 -V8Rh/+2IlLxWZJMEliPAuhvPQByERL7mPudcnmoBOi+XK2orxWOVRBb0pgviu2kw -gvWGoXBlZ04fYZyu3Sq9wvgL04lhIZAQ0DXhju0JCUydILh2s9r4igM/eFk4PFOb +Pn6h6+5bB3yTqByA203cZLbVtLo+G9BB0/ADnRnl4B027QcR6e5PYwP/9dxSP4WC +jo76EEBHd1CYQUZ7pvNI4tBBMGMIO57FNebDNfpZygDR6m3F0AjwcuZhM12FRvHK +XP0XMlRidk0AfAVxVgG9UmYvzO/pvG7O5RbUi+wRzTH04ogMmImH5AbW4zDQ9u89 +uU3HoC/2DN8qADyyn64gXkHKS9Tc44qiTdre5uyK6/FSniuZs7TnDvocleJunKhI +SRc8Ricp3o2HhYaADIPVqvyaCrWqII5o9JwI+75woE3GWsbPrHVSi3OYXJY2hyxG +glbEhsaAATVWUQeJQ8vd9h+Rir4xeE3bJnP3LYsDWm406v8WpH3y4AztOz7FuWTA ++/l6is9S2UAqnmxhurrVFrBq6hf7mA+lS/cXRPSu2xzCIkV5D0qIYL8vHzLfKk6A +2MgG8nH2KSHFe8FpP7JkjwDwv3+U9YVPg/Ky7s0tBLw4vD7qbYG8L4pNH3BqGtpi -----END SIGNATURE----- diff --git a/src/templates/tor-keys/authority_identity_key b/src/templates/tor-keys/authority_identity_key index 72b8c5d38..c67d03a40 100644 --- a/src/templates/tor-keys/authority_identity_key +++ b/src/templates/tor-keys/authority_identity_key @@ -1,41 +1,41 @@ -----BEGIN ENCRYPTED PRIVATE KEY----- -MIIHKjAcBgoqhkiG9w0BDAEDMA4ECAdHyrhMiH3IAgIIAASCBwg1gnpfu0TI3Yj8 -aKynzSp2cTwzl/PuUbLqntC8EcAZpvZ0gy1UbovqwRZoRvy4EzQMMPhzLizXMzK/ -hUVBojXgtqhLsmm5l1891I5iJYjgp9K4HoJ7J1FexJxlmM9KnoYN6I+7gUO5BEI4 -sq02mDeBkZiBREREe7Aau05vG2akgUCtt94HYibN4Nsl2I5SlN9f2/+MUrlsY0YV -IKnTJb7BXiZGeRGLtDqe1fnCwJTuB7iXu+Xfq53GiDAHp9u0/LikjJvZ7zqy+tej -bItVJuCQFfT4B9TRj7mEIxzPo1HQdJdmaHFkPnZ+gNrq3JopMwhY0oX6mut/I+wW -axFmY9GomsV5mPz6dp8mINzux2rxUzhEiA+2cN4xqE5sBDhUetYzZbcyzF/WiBKD -mxZS3ZLyiEaZkg+PcH6oW6FQ7/cghX7/RXUbfAsuuq39SH206vUb/jfOZ55vvJJZ -6YFXvdXJuTGEd+Zh5AzPpk5uEOCGZ4MnuMVwJlU3NKBEktvqM081b3pSLTYh/+RY -3nlc2qM/6+Y+kcdHdUKrSxIlGYUcMk1BT6BYblcCrothtBlYzhtg4iO55ry6se4A -u5N5veepSfXmc9CUOcXzAeTAdPZZo0tfNq+R4TrMw3eKjUrt9/SzLgDIGUp91CYG -h079bsTe8bw4U5rrrFkXxuXqaaNlcfaK6cj9Y390CdSLSjCIOgdXqE4G8MYg0+Vh -TyZVRtVOAOB+FJInrnFJUtR/2r6Bick4iPuF9+mvYsInGWOfBJQo/jRSUKBw879c -VbAZM4oTL2iNnOSvdHYF2GDcyBl4zlJ+AjwF4KVABVYLtzf1vZ4uQLxae/Je/dVi -NTJ5fVSLyCzTISZwWXGWtweTkZDaXcPQdjtD/O1s+Dd8uFM17XtrWK9K5+hauSAW -N32u1MZ17c3dHJC6e8xLI4sJUnz+EnYD4UgRzJI1kzDpHzc6ihpgtAwlanfpIQLl -5q0pCj6+BBNrXtAPaOj76CwCGMYVj8aWUV8rkvN2OJ4vKFaymCNoKoP593MO3xC7 -gP4iZ6QzkBPmFl5HthXK77G2VtrqZkDv4Rjj5XC790Tk3FFgteUSQoFuyi2R0aNY -4jUVDfAVf56EXj0916anYktjVvqH5/nMI+qhJjoFbzJ37+MdqU3/w3bUbROW4110 -qVc/xGfinJ6Cam8h9sMLYcHOvHhCcVU49CTYr/06rzCr9oGf8vVRMv3C3DwxRpPF -pIszgohFOJhJWv++YdFPKgG7Er5XXyqDI5Z6NMBkDuUb0i/VLd89udwKdC6Y5+Rl -mCYsPkd7tb1Cm7U9h+J+Bb28xXvJMEv89r9W7iLAz91wMGrJtqYAxp5p+zQ37nMu -vT31i57AfS54vTfu70NUVJ6mc42S0O9IcSMBRT8DpdDLHyLKB5HQ+GLqT7hJzkdq -KqBYTbU8Q/zCs1qaio5DFgXTJf6lMC482GAKBRmFQ9i00dY4n5QT1SUwBojsonqO -CyQjNnChaRe+cZYXiPGM82sFZWelFngZrJ0tTEwkVJBCijN+X0U7R9nCPlFVvbbz -paAKs2j9p2RZQmJgMsnokwtdLlE4WZo1WAvMAP821Br6hDKp1UYYIJR26KhCa4sX -VWjSqB/JZGYk54s83X4DD7JvCAP+RkIcHJi8T0vstseTcoynat7yfgkMIWM6MGjP -t4+ClLp2I8teDdK7oIF8cTn9HsABovlIVyQwUv3TGnZJMM6TLfaWPb5NMxkuGEUg -G1TyAMXHyZrqsylmOiLUpKp5+6Z+zxNQtbMCVBxjjQ/N03clXI/CyDin/n1cCHbR -8jq9+NmFqhB9e8QAPdlaB8i3ZPbJyLBSHs5iar/XSMrKZOY2ST11nhFgKO+eGHtG -QcKEvporkwQWj7lPAA1vJqjvfHNBQl1V4zrcvwiHSp5qY4+ewSJI/tIirCU3+N5B -I3+WXeu+BfX6Wtrb1QuQO6O3pwHfdqXiIdWbYlalXFJVvcNUf1Rb7bfqI5HuEbof -qvQ7Hu1Y3hDi/cXte9y+hkS+/rVFYHvaJAvkknQl4WHw49XZQiI8a5YoLH4kYkwe -A7aYI24oibPtKQ2c9jXrFAetwffp0AWfEWkt/ehxIFwr0M8yIL5C9Xf0YbGcrYD9 -Fs2GazVdvf+9btBn6QsV1uKKjDDCIwJozeIYlntHxw1bHlV5gChGIQtHeJaAbDr1 -9VTRJKf2+FJnccbX17yDY62i0qJ4QH0rur3sVlJwGO4nFeMRKyEGhDVd5pAs/AUs -bD2or3QeZZ8Xfg4gQzi+3Ltcq0b07cS4QruYVhGVrznMvYbbmoTaFYveji236Xgb -Cuj+FFwZEYxoJ6I6PTARGBcwEBgr9DvuLJdC3cMIWDvpg/d0dLWrT5+kHLHQNfrK -C+jRpkqKVlhe2pvFAOA= +MIIHKjAcBgoqhkiG9w0BDAEDMA4ECB5bzKuqoWFjAgIIAASCBwikNfm7AMpe0787 +in2HNwCLUqZBrQGk9LLeRuF5lVBrdYrcSdSbvLY76+/veMcvYw/TMyAyNvRBSJ4H +NnR9xkqaCofXbWYx9V0UqwFqcSNcrE3u+DVFXrE1lv9KGMATGhx2ozDRCbACdXgk +1tBX3tdJ03SJVR8oN/0O6Tkk2813EQEipUF7IxanOzLZ7SwxloqvY4EHrYW+724+ +cdOvR7R7vJlXlZKmqA7RmgDYhHJDdBfMNUjmUUZh2dYEXsCR/3DeLvK7mcOeTCI6 +u5+C1XdjraPyaNXgHQ5ydkwCJ5kW8wiwa82JOS/Z5xEWYCUK7bx0sn8h92djkRli +fOfzwvvqlTe93Dd4iAIzn7qKhr7wrKgPyvZZLN3BfOnumNsxeCwr3nrvwexxx2nv +L+pY3kwLwxU24sU6f+a+gl7iUfEZP0MEM9YmE4ke7LVa4Br3dPXtTe24Rl5yH6hI +SeX5VVOS8HH+wCuj/QMkp5zk4YZgWy7ypjTSId/q+5+iXq+A4/n2RVPb8fye5hpk +k49DOQD2fRurh/xuvysS51s0w0shDj92z2OELKbIR6z67ebhMnbU+G86+yjQC2HR +PavUcGA+aVJMe1ZjWuiovWGCy3bLqGsM9HHkKYeLpXRx3ciHQkD3aCztpiyYLR/Z +IOeOpsjGXAI1KhbrSNVrW89CyFaVhp6c33hobKQkd0eNfBswBAvjd4h7xwCgUVas +gBlzdzkq2+RVSGtRNnViFT9hMq6oFpbXbjKS9gPzLrfX1VP6mxuk3nLEHQyJNfZ3 +Dj9oQN+mY+kXqxPw5ZjSrWNBZtmkGqHIpCsuAgH3oeuLE9jA+/OqxVlWJDSPAXhv +1k+YaeuPrriI61BHJ268YoRvoKHWV2ABqWAuE7VdILbyZjcHDdvgUtTui6+GJjkL +igCXhMQSdfOAv4JJ555na7n0rJ7pvv4zZBZDdaVxGTocy0jfqzMZGX+0bx5tVZGw +ilHm2DZP4H9QhrqcU4jV0SNC6LowZDv+WRT6I/kz9o1bjnxC2r4kYn759GvMjL7v +B6tMkeo/ajwgpWVaHYIqWpSPaBhENNI9eO24rv8ZeXTIfKt6Kr+NzWdWUsaicClo +kkkvznAgHxFgW9MJ31hAXpRSm8WlhZiPS0CBXChBPaW9MCFuHw/E1ez7xIrWzMnI +cSj7pO8DJG8TRovwB8IlGr7JLvyGfr/RSV+ORXVm3rfj98MaCqEReRdbp8jd+CwB +yEKf600gIi0V7pH+CaHNrHQ5lnT4YooKPEa3tvqW743aLrzlWrhT/Ng5S884cmYR +0To7G8Ap9yPbddEDQh+xFmos7c8FsOsPL+BtQAs6eXDY+2D/9m1oFXb0VrIfN4v4 +vGIi4+39FARUk+Qe6ek2JQIEZgZTh8Sw95PKmiRumqPW1DRXTONArwi7NJjIu6j1 +fl5QDldCi4fNeIkz9C8LrrW3zT90GudSCbuX0IPRdnB9Uf41kxzGogK1atzCQzH9 +JfVrWg0/OeiR0Tc+pLvhUrI8zd9xOPyRNKgy2S84OjQ2c+wFsEEfEkEU1kw7UmJX +DZD9D0Mt0sxebNLzAQQCLAe4aMVhkb7mHEKbxNLzIFXnPNIB/6p2tqfQZ8YYEZCP +pYLK9OSkV6fblVmACtmNnKMd/sVPlAbyjDb2XlbWU+1QO6MwcaJQSCvA/lQ5ZcWV +5a5sePxYJngUg2BvNuMc544VnaLtmsn1Qkx9R2IRc/wQudgCXtCCeF5ceVqgFUVK +Py1Gu9MukZnjDNxeyx1fJ98OxKPU2V5Fdo4ILo1J6RTAlgo/6qamfCOW2HI8pH81 +d3HmmKitLWzVfAfySMES5iFv7y8HfRV9IHW+w/CD/WWgW2Mc/9Z4sNhS09/mruZz +OPcfFba6CRfWPvcEMJgUsqU39EY6oJenqySv9vJbxbmM8z/Xh8bJ+zLO1wEeNFAO +K2HDZWt77dKoX/qh1oZHH9o/CDfl0sgikU+waKhgI+C09doMj7TMnoqqGR4NMI7N +naHrgA2cnlAWJ6rBRXLjRe/lytiq7jouLv7n4pKqyYG90R+o4dXUshlOG+4qNh2U +TWbecBknykO89xxLqjuo//qsWMyUw4ACzy1Ph1D53butm9TofdfWzCRzaQL0jbVh +LAqsAcTAKd3HSlSQ4Kuha9Bi6Tu0ByHsDFRUKh39nqOqG5UomL7RtH4REjZgnOxO +yE3VYvU8DfIC1oX1X+evGuCVF4bn/KbLuL6xVKciCOuClJsrjsCeee0emkEkJ6fY +SuZs6jpY2Xh1Pj22+Yi1xRW53HyVHOMrNMw5WnnxchdrdRwI09yfrrLAGpA9niC0 +k9Yu1HhVf4HXEy4R9p26Cs/Q54Cj7LSFbgaZ5MS5ydhK22dTseAPr1LuzOalqyGu +VAChjq6AkD6H/dVmifs= -----END ENCRYPTED PRIVATE KEY----- diff --git a/src/templates/tor-keys/authority_signing_key b/src/templates/tor-keys/authority_signing_key index f1347b5d2..48410dbc2 100644 --- a/src/templates/tor-keys/authority_signing_key +++ b/src/templates/tor-keys/authority_signing_key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA7lYqoruSLa5+1N/47ojihMsWKCORqttlgHPc0QxSRbcxKfoe -CZ2C8FG+CSMy3c9k5GPBZ+pLrtBXFiEXJslyAkYysWYCTkI5bEpDmR1CzWVpGlci -F2oCZH2x357KN/RlH22+T0tE2nnrGmwPulMoH72HTfB91a9dRm91+tk9uANKrGzi -dL4UqsJK7r8IXzBR9RY8k3Ro6FJdnM1VbUAdEAKBwIfOxo0RJVcsZ9e+RdyOPERM -P2dta+kYnv+ovY/xQZrP61GfqkqgNjEIs9xL7KsZzKerXn+2/cpxfzfcnawrgMd3 -EyCUvYDMEHfSAFYPV2Xg/p0rZ5wDDFPwWHMOAQIDAQABAoIBAGzVrjyHmPYBU+uT -p1IN8hqfvqtvqqox68Tfo7tWfA9X2zyG+cZ8RfnF6hi0GRtPBIyCvSPdyte2Tb5O -OAs/PC+rrKRzp6Pi++Pmeb3mrcWrhHZBydCVWShYeaxbD47DrJXQgHInNfbqa6Cy -LfBw3sa3vpypsFyu6tIqPd7h6xwmivPwL6JY3lAx+opednsRJM02CfTdbvHPy+iu -WPbtMngc0Q5sAgHp3m00/FHK7uv1kpdwxMvKKO8MjOgbscg8OvosJG+WQlmO2Z3d -pF3es3ZZHMe8V1ujIjPtzxA6c6lFNj5ngvP9Ebzsa5UlVXRrxgFjymTx0EHrCwXp -rgGZSQECgYEA/sxkQDCQo1MGY62sbMFQ6byjQsbMIoyPzZL1stccuzb0kffi6DjX -+Wss17+Rmi6AIpX5urElIsjhfSMx/0mhVDWhvH+VkIZJXy9ApQeNapFO1ZxuqyY3 -9CTUMZ7C+/X88qBY84GGzJZq8VhDrOVjzWUTjfofNUErA3uSteW95RECgYEA73Xm -tjXEnVPdckgjbTbmq7N+TM9J5gcv9sub09j0Ckjy5UKXbsU98pWeVmmuXf7FrLH5 -IY0sRomg2MS460cS8x17die28b3Ze/nmeR0rxW+LFQWn+0jG7a+nYV2vVDlUFyal -b+PSr7aqV2z2TUcTQYAXWpTo8k/H8XOUQoVT2fECgYBWjipUH+tSgCbAf8P+IeGR -tVIkohHAfs1vAUhT231YH8APQg0j5AOCKCVQmTQmEzvWA+qUwA6kdWccMnOOjH2w -dJOfzBuLwAJ2wj+MkDc7B2enav+xRkdluYkX3h3Qp+yoo030UoDvSP3FRL2go0A3 -CuoMpfYqChxJj5X0ErpicQKBgQDMMpAw5RzqxG9cWuPK6DxAQun6+fEcdRiTrgEN -4D4K3zYyBl04Gn6+9JN3QNtuUCmlIIc/8zDEXeyJrQe7qnogUw3LLga1kp15ORsu -QI/o5zuFC7O5dtAcM+PaBZfTjKeGrm/5QB+Ts4MC1moab/edrWs81SroJtxrq5HM -QAHHIQKBgFXc3hS3HC9CCBYg3WTygbPeT6J44lJBA8Gnx6VFUu8eSeOhI9mPXOI5 -bshun75tPO8TnT6q9+qWkZlvZu1omCA6JbGAcZ6N/MVn62UUxmLuoA1pImlKpNlZ -rNU94D6piYA9U/EOjfJNez4A5JMoLQ3PFH4tF41jzVsJNrowoYob +MIIEowIBAAKCAQEAmCH9KN37GbmztOVJNNaFSjt46uWUEGTap0GaewCJVhjuDTHv +U6TAbzSZadWhczYwulNnYGVBJD1J4qdA+ImrzxAgyIyuDmuJADWqlosCA46LK+5H +2UriwTsJAxXDqmE0boWca1vXcF5Gwz07hjtXHMrbGZxMjqx6YT89jRfXob8AFXPk +nSpB+BG3OaziNLf0MbMlpGlQEHHBpxj9O4myWMhMH+zrMY2MR6y8i1tEEiTqysqQ +IM0bkUxIeHkoMUNxR6J1ucS4wmko9b1MVsjW8sEPCDAWDtm1OVOcdqvnjq7MEUHi +nynT3ENZCNU5dB8c8pQ+ie2SouM5jF7vUyzSIQIDAQABAoIBACPHBob4YWek5Q7b +R1KeSp1xDBhN4nrShRuFkaNwtluhpEZFIpAMtFbSy2t42q3CJkaDe9HPLjksEraz +d9ObaOTa2aLgdsScZI1Akbwyt8gHW1J7CLmg41/nFtD7atckYhTF5knXQPzDpyee +PGPui6eRS2Kj/9sj0+oP2dVXMJZwfCVRIQLHX1KQg0n23v2EnTKUhQeQ/XpOeQtr +7ywy5PGyIOLahqa1P5xyWTgRUMQGU1OpPgcclVG6kcW1EGA1gmkYggEoonocdCoL +dMXpSC6n72tUopxJzkCZL03PbfNSaMD1zDCoMHpbkNqDVnOqE8J8w4PbjT4Nfpls +MY+kVZMCgYEAto9QCjNtFA6jvSJiczdx2VrIqhf/rCVbX7zXbZcvK0JMwQC7naO9 +8J8JCZRLVKUhy5BzRyUzf8CjohZyQUgsHS4dXEAbEt5/9UKUb2PVNZNpNgiwyBS0 +43jiR8Sk6O6VdPgPukdyhBYpRtc6MUjUNjKzZnC6DsVJLafhqoiRUjcCgYEA1VUv +pijruhVy1FZKGANzdF82j/orpsUxWeLMcXOJOVod66ApXQONR4jhytwcs+YMBwCp +dZuTjjTVvMsal3X6aKC1jqWgc8HtodkvnBbvpHaA8pTHHlMZeV9APe6ABK8O+5Jf +YCLS6B53O4Jzed0iXb3ifQyePYVf1aVgCzuKMmcCgYBqZ4FpUKwFArtg44eqS+ip +nQZUTYSRm3x0gqa1k2bEufHulDTGHsf4VVl5Idv+S/k0os7t6rrnfSpisN6LOjus +DlLCkAK3rsO5+cJ2YiPFBFRQUCRpmdgMtUp+NhlAQr8N2/+evUH7xsbsKUobq600 +obx6ur1XGpoaaD1ZnhvFZQKBgBCcbHRktU4tA17zRRuvkRrbmxUE5sc6jYgxN7gR +xfqYQMIrG21prGOIjgRMtVNwszkMXSWSMqAdDxN/QRfQEOp6sQhlbyP0UruKfMGL +q6dDrvC0kviSgeZIe2kXHBLBSspd8F08+O0oCiKxGqq/hECGW49fHyctLnQlR2A4 +h587AoGBAKXXGEwOSE96Cx0rJhC5sol2/mwM66831kcnAEoXIjb/EoB63jJWDZCZ +S3niNj85FNQLZAafmulxZPOlXq4+pXlE02YefBJBvEWf3zuh8ZToz519CYNuJZ9K +HBktvpst3yMZ1rz3esJPJRT8ZgrBXMZtoAJ1oTMIJRLkeIA4x4/j -----END RSA PRIVATE KEY----- diff --git a/src/templates/tor-keys/ed25519_master_id_public_key b/src/templates/tor-keys/ed25519_master_id_public_key deleted file mode 100644 index 48d0380d067533a65596edc2c001f2efda6f1da6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 64 zcmcDuRY*-SGBq`{EHl(CC{4=AOtw-esVqn}P_VUSU|?wa;=?yfU3z2d&4}(xwKC5? UmT%2|VAISsHPx~5-^F8n0Jgjtn*aa+ diff --git a/src/templates/tor-keys/ed25519_master_id_secret_key b/src/templates/tor-keys/ed25519_master_id_secret_key deleted file mode 100644 index a4649aa104ac7267abf61cb76ff616e70a7ce0af..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 96 zcmV-m0H6OoJs@RdGBq_ZId(BEb7f<4Wpp|qba`-PFd#iW0002!q4`P^LyxKfJYKBS z8Pd6O^F5d7Loyu=aZBN(0`_OH()79AOIFQB(YwbgLr@m#j&#)eRy;uEuYK57tSU0{ Cu_@gE diff --git a/src/templates/tor-keys/ed25519_signing_cert b/src/templates/tor-keys/ed25519_signing_cert deleted file mode 100644 index aeb02572e11d9023da510064e4e1219a682cc74b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 172 zcmV;d08{@xJs@RdGBq_ZId(BEV`Xx5Iv{j;aAhP-%IEyfm_$FnJJPDlX&AOrw{^h^$#CmXSa*;tFxeH`cUceQKa zJcA0AWkGrV(Z-H>Q~XQ8c(Io`sH6QnjZx#t;srmTMItzyrO=+EUG4bBS4`^MWyQiP a+2w$?c-oO+fQF=YG>WSIs9tLmjJpLPR8DjN diff --git a/src/templates/tor-keys/ed25519_signing_secret_key b/src/templates/tor-keys/ed25519_signing_secret_key deleted file mode 100644 index a6b78b1c9389de61a1ffdf7e0a3aad38cdd77255..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 96 zcmV-m0H6OoJs@RdGBq_ZId(BEb7f<4Wpp|qba`-PG$1`a0001hO2otXIFdQfi(Iu( zl{Y^$aGiBkBP0OIU*??_rdedSTEKNU&fV5tQaPu@bb)T$+Ibty7hK0uRMNcWjflm! C`YA8~ diff --git a/src/templates/tor-keys/secret_id_key b/src/templates/tor-keys/secret_id_key deleted file mode 100644 index 12255d84f..000000000 --- a/src/templates/tor-keys/secret_id_key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXAIBAAKBgQDfv1F98ex0d95PYzSN8MbMHl2cYNnmQQEORNBQvygxojI9D7Pt -Yrj9f3fWJbphzhRYXZL3doxbZ5BVZJkGPKPwxFijxfzj7ExUyqvwUU9II9KIyygF -xnEk1DLkZKIlfmoEBb53MjzD32igzVZ0BtkZoZw/CSTD1rYT4gX1hryzAwIDAQAB -AoGALSeFIK+0EoUmXksdDhArboIqTibVkrxHpEOK7uDFEc8z6FLE8wFbZ/1Th+ox -RQ+712F7CWnpRYEPBcy5fSK7yriXYgYSnqzTuIGEnQ1MPUdDRNTk5OrS5vlU0G9o -g8xvttJq/5qckvaL7sdmtbrNHpROnWj3LsU84r/6VJlwytECQQDz2J8Acb2iuwUc -ZxsYcVwbYRw8Xfx+Lq1yaRBcc++SNZFJ0xuhqnia/Rga8aHUlXHSvaFjxd5aUcqz -4gfKhxJdAkEA6uY+gFCQe+x9c969QugrX/WfWCI5mY67brjgiBC6TEvr+fBZ3nSw -FuC6JP16VJXGXNc6NDSDJnVligVV1c9E3wJAMOfJO/WT6wdHRyMGtChIKNWZLCJN -eaEy+DdKKMC308MamIvWht4BwlPwebbslg5C/dk6fSA6MDMnTFyQ43WjoQJBANjq -1jzYKRTHRvKBdnUawTNUN1nEATz0udF9mJsOF3vtgHTGp7buOs5lqIAQM694wD1r -46vh8lrJfoL+ek1/ojECQFZOvdd8nBh6JU98A/zbmlPQqoQ/3qhb5H9WNFAL8PAs -u9vkLoJ0UDM9gxQUJelHEutG63n9oDwuCpeoriiRF4s= ------END RSA PRIVATE KEY----- diff --git a/src/templates/tor-keys/secret_onion_key b/src/templates/tor-keys/secret_onion_key deleted file mode 100644 index 57aa15ede..000000000 --- a/src/templates/tor-keys/secret_onion_key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQCbSRxFx7y5rAtONk6YUGf5jCSMufLZKuuako+72KA8KxpeW5AY -pCPhlAOvGm4QZGoRRq3Sj3hdCF9eu9SVsOhuHYymhgIeLhcFahlACbl7EVLT39NW -WTHlgJBB4E5d2yUK1J8MMxRkzTxjEn/ReRaexQS69Ns8CIFkAm9HUmJ9SwIDAQAB -AoGABifhzQpb+uhNEm6osvUNCiF6GXINpHpFGL2SoRm3UGtNAkyS68cl/P1HIBvm -HRQ9+C1bGqvBU77POQwtIzn+HL+TQwOC4sKqh1h+g3ObYzEnV2EsO2d1IZBj6Gc1 -y3G59nSJAM7jdTvat04O3ojMR1LxaiGSg0SVhWEmjYx86BECQQDL9XoagMhF4Xe+ -pA8CyZrZtH0Iy/fzKvUgpHk23pjLy9H2LItvbQFj8L+hDWn2Fx06OfxGtv2LBK5y -YHzqc+/VAkEAwuhPbE2s3HRrRykgQ42naAvcNXdAFGKs00MCfd0U7CFunwroCsWH -WL8Cjc74l2nqEYStb//mSiuEnz07OiponwJAUQ7oTiwsbAlli7Cr/pGdJzSiFKLr -kgrajPSQ8AubcjM46LtfVxNdYBjIL/uixxj8lq2A46HKNjvQEMjkHFGjWQJANSkN -uJl7A2oRdIhID4TMWzmY51BOPwEm6DxHqrKpKTp5JJHh9kYoA1GdzbXc4dd2iy6n -63tdgW9bLd+SjuvQOwJAGCvD2rMN2KpnjRq/wBDkLxU9ss4xVQ2f4G6J8XF1gcuQ -6tBebDcc4Rw5KmGZMAoLyqLity44+qDgrxvWgQZFdg== ------END RSA PRIVATE KEY----- diff --git a/src/templates/tor-keys/secret_onion_key_ntor b/src/templates/tor-keys/secret_onion_key_ntor deleted file mode 100644 index 90eb769a8f8dcfcde34f094a89c84ad35619450d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 96 zcmcDuRY*26H8r#>Gqh63&&$luQ?RvVK!A#w-9=9uZ%XAhUwx5~I{8uO&&}PdnfDo_ u#)f>`e7~lkeJ{_cPY str: @jsonrpc.method("up") def up(network: str = "warnet") -> str: - config_dir = gen_config_dir(network) - wn = Warnet.from_network(config_dir, network) + wn = Warnet.from_network(network=network, tanks=False) def thread_start(wn): try: wn.docker_compose_up() + # Update warnet from docker here to get ip addresses + wn = Warnet.from_docker_env(network) wn.apply_network_conditions() wn.connect_edges() logger.info(f"Resumed warnet named '{network}' from config dir {wn.config_dir}") From e80e0f24623216e001cdab2628ff1d5fb5eb7ed8 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 1 Sep 2023 23:00:36 +0100 Subject: [PATCH 04/28] include custom build dockerfile --- .gitignore | 2 +- src/templates/Dockerfile_custom_build | 75 +++++++++++++++++++++++++++ src/utils/gen_dockerfiles.py | 7 ++- 3 files changed, 79 insertions(+), 5 deletions(-) create mode 100644 src/templates/Dockerfile_custom_build diff --git a/.gitignore b/.gitignore index ecc95d9c8..891f24271 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,4 @@ __pycache__ warnet.egg-info .python-version .env -src/templates/Dockerfile* +src/templates/Dockerfile_[0-9]* diff --git a/src/templates/Dockerfile_custom_build b/src/templates/Dockerfile_custom_build new file mode 100644 index 000000000..66dae8cff --- /dev/null +++ b/src/templates/Dockerfile_custom_build @@ -0,0 +1,75 @@ +FROM debian:bookworm-slim + +ENV DEBIAN_FRONTEND=noninteractive + +ARG REPO +ARG BRANCH + +# Base requirements +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + # + # general packages + # + ccache \ + ca-certificates \ + python3 \ + vim \ + build-essential \ + wget \ + tor \ + iproute2 \ + gosu \ + # + # For building bitcoin core + # + autotools-dev \ + libtool \ + automake \ + pkg-config \ + libboost-dev \ + libevent-dev \ + libdb5.3++-dev \ + libminiupnpc-dev \ + libnatpmp-dev \ + libzmq3-dev \ + libqrencode-dev \ + libsqlite3-dev \ + git \ + && apt-get clean + +# Build from source +RUN if [ -n "${REPO}" ]; then \ + mkdir build; \ + cd /build; \ + git clone --depth 1 --branch "${BRANCH}" "https://github.com/${REPO}"; \ + cd /build/bitcoin; \ + ./autogen.sh; \ + ./configure \ + --disable-tests \ + --without-gui \ + --disable-bench \ + --disable-fuzz-binary \ + --enable-suppress-external-warnings; \ + make -j$(nproc); \ + make install; \ + fi + +# Create bitcoin and debian-tor users +RUN groupadd -r bitcoin && useradd -r -m -g bitcoin bitcoin + + +# Tor setup +RUN mkdir -p /home/debian-tor/.tor/keys \ + && chown -R debian-tor:debian-tor /home/debian-tor \ + && chown -R debian-tor:debian-tor /etc/tor + +COPY tor-keys/* /home/debian-tor/.tor/keys/ + +# Bitcoind setup +RUN usermod -a -G debian-tor bitcoin +RUN mkdir -p /home/bitcoin/.bitcoin \ + && chown -R bitcoin:bitcoin /home/bitcoin + +CMD gosu debian-tor tor & \ + gosu bitcoin /usr/local/bin/bitcoind --datadir=/home/bitcoin/.bitcoin --conf=/home/bitcoin/.bitcoin/bitcoin.conf diff --git a/src/utils/gen_dockerfiles.py b/src/utils/gen_dockerfiles.py index c73a2d6fd..76efe8bd3 100644 --- a/src/utils/gen_dockerfiles.py +++ b/src/utils/gen_dockerfiles.py @@ -6,11 +6,10 @@ dockerfile_template = """FROM {base_url}:{tag} RUN apt-get update && apt-get install -y --no-install-recommends \\ - python3 \\ - vim \\ tor \\ - iproute2; \\ - apt-get clean; + iproute2 \\ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* COPY tor-keys/* /home/debian-tor/.tor/keys/ COPY warnet_entrypoint.sh /warnet_entrypoint.sh From 33de2f0e1093441008d6f909431723af4ce36afc Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sat, 2 Sep 2023 20:14:25 +0100 Subject: [PATCH 05/28] add fork-monitor service --- src/templates/bitcoin.conf | 1 + src/templates/fork_observer_config.toml | 31 +++++++++++++++++++++++++ src/warnet/tank.py | 11 +++++++++ src/warnet/warnet.py | 15 ++++++++++-- 4 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 src/templates/fork_observer_config.toml diff --git a/src/templates/bitcoin.conf b/src/templates/bitcoin.conf index fbe6d397c..e18a76e19 100644 --- a/src/templates/bitcoin.conf +++ b/src/templates/bitcoin.conf @@ -14,6 +14,7 @@ debugexclude=rand # RPC rpcallowip=0.0.0.0/0 rpcbind=0.0.0.0 +rest=1 # for forkmonitor # Wallet fallbackfee=0.00001000 # P2P diff --git a/src/templates/fork_observer_config.toml b/src/templates/fork_observer_config.toml new file mode 100644 index 000000000..7b5421de7 --- /dev/null +++ b/src/templates/fork_observer_config.toml @@ -0,0 +1,31 @@ +# fork-observer base configuration file + +# Database path of the key value store. Will be created if non-existing. +database_path = "db" + +# path to the location of the static www files +www_path = "./www" + +# Interval for checking for new blocks +query_interval = 10 + +# Webserver listen address +address = "0.0.0.0:2323" + +# Custom footer for the site. +footer_html = """ +
+
+ Warnet fork-observer +
+
+ """ + +[[networks]] +id = 0xDEADBE +name = "Warnet" +description = "A custom regtest network" +min_fork_height = 0 +max_interesting_heights = 5 + + diff --git a/src/warnet/tank.py b/src/warnet/tank.py index 27206bf1d..030568e5e 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -73,6 +73,17 @@ def from_graph_node(cls, index, warnet): self.conf = node["bitcoin_config"] if "tc_netem" in node: self.netem = node["tc_netem"] + with open(self.warnet.fork_observer_config, "a") as f: + f.write(f''' + [[networks.nodes]] + id = {self.index} + name = "Node {self.index}" + description = "Warnet tank {self.index}" + rpc_host = "{self.ipv4}" + rpc_port = {self.rpc_port} + rpc_user = "{self.rpc_user}" + rpc_password = "{self.rpc_password}" +''') self.config_dir = self.warnet.config_dir / str(self.suffix) self.config_dir.mkdir(parents=True, exist_ok=True) self.write_torrc() diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index 57828b56f..a71c9ba30 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -18,6 +18,7 @@ TMPDIR_PREFIX = "warnet_tmp_" TOR_DOCKERFILE = "Dockerfile_tor_da" TOR_DA_IP = "100.20.15.18" +FO_CONF_NAME = "fork_observer_config.toml" logging.getLogger("docker.utils.config").setLevel(logging.WARNING) logging.getLogger("docker.auth").setLevel(logging.WARNING) @@ -26,6 +27,7 @@ class Warnet: def __init__(self, config_dir): self.config_dir: Path = config_dir + self.config_dir.mkdir(parents=True, exist_ok=True) self.docker = docker.from_env() self.bitcoin_network:str = "regtest" self.docker_network:str = "warnet" @@ -33,6 +35,9 @@ def __init__(self, config_dir): self.graph = None self.graph_name = "graph.graphml" self.tanks: List[Tank] = [] + self.fork_observer_config = self.config_dir / FO_CONF_NAME + logger.info(f"copying config {TEMPLATES / FO_CONF_NAME} to {self.fork_observer_config}") + shutil.copy(TEMPLATES / FO_CONF_NAME, self.fork_observer_config) def __str__(self) -> str: tanks_str = ',\n'.join([str(tank) for tank in self.tanks]) @@ -59,7 +64,7 @@ def from_graph_file(cls, graph_file: str, config_dir: Path, network: str = "warn return self @classmethod - def from_graph(cls, graph, tanks=True): + def from_graph(cls, graph): self = cls(Path()) self.graph = graph self.tanks_from_graph() @@ -210,7 +215,6 @@ def write_docker_compose(self): "volumes": ["grafana-storage:/var/lib/grafana"], "networks": [self.docker_network], } - # Add Tor service compose["services"]["tor"] = { "build": { "context": str(TEMPLATES), @@ -223,6 +227,13 @@ def write_docker_compose(self): } }, } + compose["services"]["fork-observer"] = { + "image": "b10c/fork-observer:latest", + "container_name": "fork-observer", + "ports": ["12323:2323"], + "volumes": [f"{self.fork_observer_config}:/app/config.toml"], + "networks": [self.docker_network], + } docker_compose_path = self.config_dir / "docker-compose.yml" try: From ba769bddd66de5b4d875bebe1508977f17d874ce Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sat, 2 Sep 2023 22:26:49 +0100 Subject: [PATCH 06/28] move to Click Gives more flexibility thatn Typer, can now run `warcli rpc 1 -netinfo 4` which was a pain in Typer... --- pyproject.toml | 16 ++--- src/warnet/cli.py | 143 +++++++++++++++++++++++++++++++----------- src/warnet/client.py | 4 +- src/warnet/warnetd.py | 42 ++++++++----- 4 files changed, 144 insertions(+), 61 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 68088f729..ada5aa6ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,22 +13,22 @@ classifiers = [ "Programming Language :: Python :: 3", ] dependencies = [ + "click==8.1.7", "docker==6.1.3", - "networkx==3.1", - "PyYAML==6.0.1", - "typer[all]==0.9.0", + "flask==2.3.3", + "Flask-JSONRPC==2.2.2", + "gunicorn==21.2.0", "jsonschema", "jsonrpcserver==5.0.3", "jsonrpcclient==4.0.0", - "gunicorn==21.2.0", - "flask==2.3.3", - "Flask-JSONRPC==2.2.2", + "networkx==3.1", + "PyYAML==6.0.1", ] dynamic = ["version"] [project.scripts] -warnetd = "warnet.warnetd:run_gunicorn" -warnet = "warnet.cli:cli" +warnet = "warnet.warnetd:run_gunicorn" +warcli = "warnet.cli:cli" [tool.black] line-length = 88 diff --git a/src/warnet/cli.py b/src/warnet/cli.py index 38cbed845..cacb99285 100644 --- a/src/warnet/cli.py +++ b/src/warnet/cli.py @@ -1,23 +1,19 @@ -import os import requests -from typing_extensions import Annotated from typing import Optional, Any, Tuple, Dict, Union from pathlib import Path from jsonrpcclient import Ok, parse, request -import typer +import click from rich import print from templates import TEMPLATES +from warnet import warnet from warnet.warnetd import WARNETD_PORT EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" -cli = typer.Typer() -debug = typer.Typer() -cli.add_typer(debug, name="debug", help="Various warnet debug commands") -def rpc(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]]]): +def rpc_call(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]]]): payload = request(rpc_method, params) response = requests.post(f"http://localhost:{WARNETD_PORT}/api", json=payload) parsed = parse(response.json()) @@ -29,45 +25,115 @@ def rpc(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]]]): raise Exception(parsed.message) -@cli.command() -def bcli( - node: int, - method: str, - params: Annotated[Optional[list[str]], typer.Argument()] = None, - network: str = "warnet", -): +@click.group() +def cli(): + pass + +@click.group(name="debug") +def debug(): + """Debug commands""" +cli.add_command(debug) + +@click.group(name="scenarios") +def scenarios(): + """Scenario commands""" +cli.add_command(scenarios) + +@click.group(name="network") +def network(): + """Network commands""" +cli.add_command(network) + + +@cli.command(name="help") +@click.argument("command", required=False, default=None) +@click.pass_context +def help_command(ctx, command): + """ + Display help information for the given command. + If no command is given, display help for the main CLI. + """ + if command is None: + # Display help for the main CLI + print(ctx.parent.get_help()) + return + + # Fetch the command object + cmd_obj = cli.get_command(ctx, command) + + if cmd_obj is None: + print(f"Unknown command: {command}") + return + + # Extract only the relevant help information (excluding the initial usage line) + help_info = cmd_obj.get_help(ctx).split("\n", 1)[-1].strip() + + # Extract the arguments of the command + arguments = [param.human_readable_name.upper() for param in cmd_obj.params if isinstance(param, click.Argument)] + + # Determine the correct usage string based on whether the command has subcommands + if isinstance(cmd_obj, click.Group) and cmd_obj.list_commands(ctx): + usage_str = f"Usage: warnet {command} [OPTIONS] COMMAND [ARGS...]\n\n{help_info}" + else: + args_str = " ".join(arguments) + usage_str = f"Usage: warnet {command} [OPTIONS] {args_str}\n\n{help_info}" + + print(usage_str) + +cli.add_command(help_command) + + +@cli.command(context_settings={"ignore_unknown_options": True}) +@click.argument('node', type=int) +@click.argument('method', type=str, nargs=-1) # this will capture all remaining arguments +@click.option('--params', type=str, multiple=True, default=()) +@click.option('--network', default='warnet', show_default=True) +def rpc(node, method, params, network): """ Call bitcoin-cli on in <--network> """ + if len(method) > 2: + raise click.BadArgumentUsage("You can provide at most two arguments for 'method'.") + + # Convert tuple to space-separated string + method_str = " ".join(method) + try: - result = rpc( + result = rpc_call( "bcli", - {"network": network, "node": node, "method": method, "params": params}, + {"network": network, "node": node, "method": method_str, "params": params}, ) print(result) except Exception as e: - print(f"bitcoin-cli {method} {params} failed on node {node}:\n{e}") + print(f"bitcoin-cli {method_str} {params} failed on node {node}:\n{e}") @cli.command() -def debug_log(node: int, network: str = "warnet"): +@click.argument('node', type=int, required=True) +@click.option('--network', default='warnet', show_default=True) +def debug_log(node, network): """ - Fetch the Bitcoin Core debug log from in + Fetch the Bitcoin Core debug log from in [network] """ try: - result = rpc("debug_log", {"node": node, "network": network}) + result = rpc_call("debug_log", {"node": node, "network": network}) print(result) except Exception as e: print(f"In our pursuit of knowledge from node {node}, we were thwarted: {e}") @cli.command() -def messages(node_a: int, node_b: int, network: str = "warnet"): +@click.argument('node_a', type=int, required=True) +@click.argument('node_b', type=int, required=True) +@click.option('--network', default='warnet', show_default=True) +def messages(node_a, node_b, network): """ Fetch messages sent between and in """ + import logging + logging.warning(f"got args: {node_a}, {node_b}, {network}") try: - result = rpc( + result = rpc_call( "messages", {"network": network, "node_a": node_a, "node_b": node_b} ) print(result) @@ -77,72 +143,79 @@ def messages(node_a: int, node_b: int, network: str = "warnet"): ) -@cli.command() +@scenarios.command() def list(): """ List available scenarios in the Warnet Test Framework """ try: - result = rpc("list", None) + result = rpc_call("list", None) print(result) except Exception as e: print(f"Error listing scenarios: {e}") -@cli.command() -def run(scenario: str): +@scenarios.command() +@click.argument('scenario', type=str) +def run(scenario): """ Run from the Warnet Test Framework """ try: - res = rpc("run", {"scenario": scenario}) + res = rpc_call("run", {"scenario": scenario}) print(res) except Exception as e: print(f"Error running scenario: {e}") @debug.command() +@click.argument('graph_file', type=str) +@click.option('--network', default='warnet', show_default=True) def generate_compose(graph_file: str, network: str = "warnet"): """ Generate the docker-compose file for a given and <--network> name and return it. """ try: - result = rpc("generate_compose", {"graph_file": graph_file, "network": network}) + result = rpc_call("generate_compose", {"graph_file": graph_file, "network": network}) print(result) except Exception as e: print(f"Error generating compose: {e}") -@cli.command() +@network.command() +@click.argument('graph_file', default=EXAMPLE_GRAPH_FILE, type=click.Path()) +@click.option('--network', default='warnet', show_default=True) def start(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): """ Start a warnet with topology loaded from a into <--network> (default: "warnet") """ try: - result = rpc("from_file", {"graph_file": str(graph_file), "network": network}) + result = rpc_call("from_file", {"graph_file": str(graph_file), "network": network}) print(result) except Exception as e: print(f"Error creating network: {e}") -@cli.command() +@network.command() +@click.option('--network', default='warnet', show_default=True) def up(network: str = "warnet"): """ Run 'docker-compose up' on a warnet named <--network> (default: "warnet"). """ try: - result = rpc("up", {"network": network}) + result = rpc_call("up", {"network": network}) print(result) except Exception as e: print(f"Error creating network: {e}") -@cli.command() +@network.command() +@click.option('--network', default='warnet', show_default=True) def down(network: str = "warnet"): """ Run 'docker-compose down on a warnet named <--network> (default: "warnet"). """ try: - result = rpc("down", {"network": network}) + result = rpc_call("down", {"network": network}) print(result) except Exception as e: print(f"As we endeavored to cease operations, adversity struck: {e}") @@ -154,7 +227,7 @@ def stop(): Stop the warnetd daemon. """ try: - result = rpc("stop", None) + result = rpc_call("stop", None) print(result) except Exception as e: print(f"As we endeavored to cease operations, adversity struck: {e}") diff --git a/src/warnet/client.py b/src/warnet/client.py index 5f66a5200..2dfc4c659 100644 --- a/src/warnet/client.py +++ b/src/warnet/client.py @@ -45,8 +45,8 @@ def get_messages(network: str, src_index: int, dst_index: int) -> List[Optional[ subdir = ( "/" if src_node.bitcoin_network == "main" else f"{src_node.bitcoin_network}/" ) - exit_code, dirs = src_node.exec(f"ls /home/bitcoin/.bitcoin/{subdir}message_capture") - dirs = dirs.decode().splitlines() + dirs = src_node.exec(f"ls /home/bitcoin/.bitcoin/{subdir}message_capture") + dirs = dirs.splitlines() messages = [] for dir_name in dirs: if dst_ip in dir_name: diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index b65ceb37e..e15883749 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -66,35 +66,45 @@ def bcli(node: int, method: str, params: list[str] = [], network: str = "warnet" @jsonrpc.method("debug_log") -def debug_log(network: str, node: int): +def debug_log(network: str, node: int) -> str: """ Fetch the Bitcoin Core debug log from """ try: result = get_bitcoin_debug_log(network, node) - return result + return str(result) except Exception as e: raise Exception(f"{e}") @jsonrpc.method("messages") -def messages(network: str, node_a: int, node_b: int): +def messages(network: str, node_a: int, node_b: int) -> str: """ Fetch messages sent between and . """ try: messages = get_messages(network, node_a, node_b) - out = "" - for m in messages: - timestamp = datetime.utcfromtimestamp(m["time"] / 1e6).strftime( - "%Y-%m-%d %H:%M:%S" - ) - direction = ">>>" if m["outbound"] else "<<<" - body = "" - if "body" in m: - body = m["body"] - out = out + f"{timestamp} {direction} {m['msgtype']} {body}\n" - return out + if not messages: + return f"No messages found between {node_a} and {node_b}" + + # Convert each message dictionary to a string representation + messages_str_list = [] + for message in messages: + timestamp = datetime.utcfromtimestamp(message["time"] / 1e6).strftime("%Y-%m-%d %H:%M:%S") + direction = ">>>" if message.get("outbound", False) else "<<<" + msgtype = message.get("msgtype", "") + + # Handle the body dictionary in a special way + body_dict = message.get("body", {}) + body_str = ', '.join(f"{key}: {value}" for key, value in body_dict.items()) + + messages_str_list.append(f"{timestamp} {direction} {msgtype} {body_str}") + + # Join all message strings with newlines + result_str = '\n'.join(messages_str_list) + + return result_str + except Exception as e: raise Exception(f"{e}") @@ -222,7 +232,7 @@ def run_gunicorn(): Run the RPC server using gunicorn WSGI HTTP server """ parser = argparse.ArgumentParser(description='Run the Warnet RPC server.') - parser.add_argument('--no-daemon', default=False, action='store_true', help='Run server in the foreground instead of daemon mode.') + parser.add_argument('--daemon', default=False, action='store_true', help='Run server in the background.') args = parser.parse_args() command = [ @@ -236,7 +246,7 @@ def run_gunicorn(): ] # If in daemon mode, log to file and add daemon argument - if not args.no_daemon: + if args.daemon: command.extend([ "--daemon", "--access-logfile", From d4cdb2e7d13f4bb5867bcb2e2943726759adf07b Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sat, 2 Sep 2023 23:31:55 +0100 Subject: [PATCH 07/28] add force option to start to override dir exists --- src/warnet/cli.py | 5 +++-- src/warnet/warnetd.py | 8 ++++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/warnet/cli.py b/src/warnet/cli.py index cacb99285..5d2bd2740 100644 --- a/src/warnet/cli.py +++ b/src/warnet/cli.py @@ -183,13 +183,14 @@ def generate_compose(graph_file: str, network: str = "warnet"): @network.command() @click.argument('graph_file', default=EXAMPLE_GRAPH_FILE, type=click.Path()) +@click.option('--force', default=False, is_flag=True, type=bool) @click.option('--network', default='warnet', show_default=True) -def start(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): +def start(graph_file: Path = EXAMPLE_GRAPH_FILE, force: bool = False, network: str = "warnet"): """ Start a warnet with topology loaded from a into <--network> (default: "warnet") """ try: - result = rpc_call("from_file", {"graph_file": str(graph_file), "network": network}) + result = rpc_call("from_file", {"graph_file": str(graph_file), "force": force, "network": network}) print(result) except Exception as e: print(f"Error creating network: {e}") diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index e15883749..a81eb5916 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -2,6 +2,7 @@ import logging import os import pkgutil +import shutil import signal import subprocess import sys @@ -165,13 +166,16 @@ def thread_start(wn): @jsonrpc.method() -def from_file(graph_file: str, network: str = "warnet") -> str: +def from_file(graph_file: str, force: bool = False, network: str = "warnet") -> str: """ Run a warnet with topology loaded from a """ config_dir = gen_config_dir(network) if config_dir.exists(): - return f"Config dir {config_dir} already exists, not overwriting existing warnet" + if force: + shutil.rmtree(config_dir) + else: + return f"Config dir {config_dir} already exists, not overwriting existing warnet without --force" wn = Warnet.from_graph_file(graph_file, config_dir, network) def thread_start(wn): From 86124be3348518ba19b27cf33bd9ef6bbd04cf6d Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 3 Sep 2023 19:56:29 +0100 Subject: [PATCH 08/28] update README with revised commands --- README.md | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index aea898e8b..ddd7c9f0a 100644 --- a/README.md +++ b/README.md @@ -123,7 +123,7 @@ warnetd > [!NOTE] > `warnetd` also accepts a `--no-debug` option which prevents daemonization -Once `warnetd` is running it can be interacted with using the cli tool `warnet`. +Once `warnetd` is running it can be interacted with using the cli tool `warcli`. Run `warnet --help` to see a list of possible commands. All `warnet` commands accept a `--network` option, which allows you to specify the warnet you want to control. @@ -132,7 +132,17 @@ This is set by default to `--network="warnet"` to simplify default operation. To start an example warnet, with your venv active, run the following command to use the default graph and network: ```bash -warnet start +warcli start +``` + +To see available commands use: + +```bash +# All commands help +warcli help + +# Sub-command help +warcli help networks ``` Each container is a node as described in the graph, along with various data exporters and a demo grafana dashboard. @@ -150,31 +160,31 @@ See `/src/scenarios` for examples of how these can be written. To see available scenarios (loaded from the default directory): ```bash -warnet list +warcli scenarios list ``` -Once a scenarios is selected it can be run with `warnet run [--network=warnet]`, e.g.: +Once a scenarios is selected it can be run with `warnet scenarios run [--network=warnet]`, e.g.: ```bash # Command one node to generate a wallet and fill 100 blocks with 100 txs each -warnet run tx-flood.py +warcli scenarios run tx-flood.py ``` -This will run the run the scenario in the background until it exits, or is killed by the user. +This will run the run the scenario in the background until it exits or is killed by the user. ### Stopping -Currently the warnet can be stopped, or stopped and removed, but **not** stopped, persisted and restarted. +Currently the warnet can be stopped, but **not** stopped, persisted and continued. Persisting the warnet during a stoppage is WIP. -To stop the warnet, or remove it (which first stops, then deletes the containers): +To stop the warnet, and warnetd daemon: ```bash # stop but retain containers -warnet stop +warcli network down -# stop and erase containers -warnet wipe +# stop warnetd +warcli stop ``` ## Remote / Cloud Deployment From d4737fd0cff748be95311f2603d6aaa6c1f945a2 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 3 Sep 2023 21:59:04 +0100 Subject: [PATCH 09/28] Add bind9 dns seed --- src/templates/Dockerfile_bind9 | 5 +++ src/templates/dns-seed.zone | 14 +++++++++ src/templates/named.conf.local | 4 +++ src/warnet/cli.py | 14 +++++++++ src/warnet/warnet.py | 57 ++++++++++++++++++++++++++++++++-- src/warnet/warnetd.py | 17 ++++++++++ 6 files changed, 109 insertions(+), 2 deletions(-) create mode 100644 src/templates/Dockerfile_bind9 create mode 100644 src/templates/dns-seed.zone create mode 100644 src/templates/named.conf.local diff --git a/src/templates/Dockerfile_bind9 b/src/templates/Dockerfile_bind9 new file mode 100644 index 000000000..082261a2d --- /dev/null +++ b/src/templates/Dockerfile_bind9 @@ -0,0 +1,5 @@ +FROM ubuntu/bind9:9.16-20.04_beta +COPY dns-seed.zone /etc/bind/dns-seed.zone +COPY named.conf.local /etc/bind/named.conf.local + +CMD ["/usr/sbin/named", "-g", "-c", "/etc/bind/named.conf"] diff --git a/src/templates/dns-seed.zone b/src/templates/dns-seed.zone new file mode 100644 index 000000000..710b7b2dc --- /dev/null +++ b/src/templates/dns-seed.zone @@ -0,0 +1,14 @@ +; +; BIND data file for warnet dns seeder service +; +dns-seed. 300 IN SOA dns-seed. admin.warnet.com. ( + 2023082401 ; Serial + 3600 ; Refresh + 1800 ; Retry + 604800 ; Expire + 86400 ; Minimum TTL +) +dns-seed. 300 IN NS dns-seed. +; following line likely needs to be changed to the container ip address +dns-seed. 300 IN A 127.0.0.1 + diff --git a/src/templates/named.conf.local b/src/templates/named.conf.local new file mode 100644 index 000000000..c76093d72 --- /dev/null +++ b/src/templates/named.conf.local @@ -0,0 +1,4 @@ +zone "dns-seed" { + type master; + file "/etc/bind/dns-seed.zone"; +}; diff --git a/src/warnet/cli.py b/src/warnet/cli.py index 5d2bd2740..d95f04199 100644 --- a/src/warnet/cli.py +++ b/src/warnet/cli.py @@ -181,6 +181,20 @@ def generate_compose(graph_file: str, network: str = "warnet"): except Exception as e: print(f"Error generating compose: {e}") +@debug.command() +@click.argument('graph_file', type=str) +@click.option('--network', default='warnet', show_default=True) +def update_dns_seed(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): + """ + Update the dns seed database using a graph file + """ + try: + result = rpc_call("update_dns_seeder", {"graph_file": str(graph_file), "network": network}) + print(result) + except Exception as e: + print(f"Error updating dns seed addresses: {e}") + + @network.command() @click.argument('graph_file', default=EXAMPLE_GRAPH_FILE, type=click.Path()) @click.option('--force', default=False, is_flag=True, type=bool) diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index a71c9ba30..2684d325f 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -15,10 +15,10 @@ from warnet.utils import parse_bitcoin_conf, gen_config_dir logger = logging.getLogger("Warnet") -TMPDIR_PREFIX = "warnet_tmp_" TOR_DOCKERFILE = "Dockerfile_tor_da" TOR_DA_IP = "100.20.15.18" FO_CONF_NAME = "fork_observer_config.toml" +ZONE_FILE_NAME = "dns-seed.zone" logging.getLogger("docker.utils.config").setLevel(logging.WARNING) logging.getLogger("docker.auth").setLevel(logging.WARNING) @@ -96,6 +96,10 @@ def from_docker_env(cls, network_name): break return self + @property + def zone_file_path(self): + return self.config_dir / ZONE_FILE_NAME + def tanks_from_graph(self): for node_id in self.graph.nodes(): if int(node_id) != len(self.tanks): @@ -116,6 +120,39 @@ def apply_network_conditions(self): for tank in self.tanks: tank.apply_network_conditions() + + def generate_zone_file_from_tanks(self): + records_list = [f"seed.dns-seed. 300 IN A {tank.ipv4}" for tank in self.tanks] + content = [] + with open(str(TEMPLATES / ZONE_FILE_NAME), 'r') as f: + content = [line.rstrip() for line in f] + + # TODO: Really we should also read active SOA value from dns-seed, and increment from there + + content.extend(records_list) + # Join the content into a single string and escape single quotes for echoing + content_str = '\n'.join(content).replace("'", "'\\''") + with open(self.config_dir / ZONE_FILE_NAME, 'w') as f: + f.write(content_str) + + def apply_zone_file(self): + """ + Sync the dns seed list served by dns-seed with currently active Tanks. + """ + seeder = self.docker.containers.get("dns-seed") + + # Read the content from the generated zone file + with open(self.config_dir / ZONE_FILE_NAME, 'r') as f: + content_str = f.read().replace("'", "'\\''") + + # Overwrite all existing content + result = seeder.exec_run(f"sh -c 'echo \"{content_str}\" > /etc/bind/dns-seed.zone'") + logging.debug(f"result of updating {ZONE_FILE_NAME}: {result}") + + # Reload that single zone only + seeder.exec_run("rndc reload dns-seed") + + def connect_edges(self): for edge in self.graph.edges(): (src, dst) = edge @@ -173,7 +210,7 @@ def docker_compose_down(self): f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" ) - def write_docker_compose(self): + def write_docker_compose(self, dns=True): compose = { "version": "3.8", "networks": { @@ -234,6 +271,22 @@ def write_docker_compose(self): "volumes": [f"{self.fork_observer_config}:/app/config.toml"], "networks": [self.docker_network], } + if dns: + compose["services"]["dns-seed"] = { + "container_name": "dns-seed", + "ports": ["15353:53/udp", "15353:53/tcp"], + "build": { + "context": ".", + "dockerfile": str(TEMPLATES / "Dockerfile_bind9"), + }, + "networks": [ + "warnet" + ], + } + # Copy to tmpdir for dockerfile. Using volume means changes on container reflect on template + shutil.copy(str(TEMPLATES / 'dns-seed.zone'), self.config_dir) + shutil.copy(str(TEMPLATES / 'named.conf.local'), self.config_dir) + docker_compose_path = self.config_dir / "docker-compose.yml" try: diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index a81eb5916..a3ac3fbf2 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -184,6 +184,8 @@ def thread_start(wn): wn.write_docker_compose() wn.write_prometheus_config() wn.docker_compose_build_up() + wn.generate_zone_file_from_tanks() + wn.apply_zone_file() wn.apply_network_conditions() wn.connect_edges() logger.info(f"Created warnet named '{network}' from graph file {graph_file}") @@ -194,6 +196,21 @@ def thread_start(wn): return f"Starting warnet network named '{network}' with the following parameters:\n{wn}" +@jsonrpc.method() +def update_dns_seeder(graph_file: str, network: str = "warnet") -> str: + try: + config_dir = gen_config_dir(network) + wn = Warnet.from_graph_file(graph_file, config_dir, network) + wn.generate_zone_file_from_tanks() + wn.apply_zone_file() + with open(wn.zone_file_path, 'r') as f: + zone_file = f.read() + + return f"DNS seeder update using zone file:\n{zone_file}" + except Exception as e: + return f"DNS seeder not updated due to exception: {e}" + + @jsonrpc.method() def generate_compose(graph_file: str, network: str = "warnet") -> str: """ From ce7bdfabc9b48481567f835e926c96600de7c341 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 3 Sep 2023 22:19:56 +0100 Subject: [PATCH 10/28] Move services into services dir --- src/services/__init__.py | 0 src/services/base_service.py | 11 +++++ src/services/dns_seed.py | 24 +++++++++++ src/services/fork_observer.py | 16 +++++++ src/services/grafana.py | 15 +++++++ src/services/node_exporter.py | 13 ++++++ src/services/prometheus.py | 18 ++++++++ src/services/tor.py | 22 ++++++++++ src/warnet/warnet.py | 80 +++++++++-------------------------- 9 files changed, 139 insertions(+), 60 deletions(-) create mode 100644 src/services/__init__.py create mode 100644 src/services/base_service.py create mode 100644 src/services/dns_seed.py create mode 100644 src/services/fork_observer.py create mode 100644 src/services/grafana.py create mode 100644 src/services/node_exporter.py create mode 100644 src/services/prometheus.py create mode 100644 src/services/tor.py diff --git a/src/services/__init__.py b/src/services/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/services/base_service.py b/src/services/base_service.py new file mode 100644 index 000000000..35040b148 --- /dev/null +++ b/src/services/base_service.py @@ -0,0 +1,11 @@ +from pathlib import Path + + +class BaseService: + def __init__(self, docker_network, config_dir=Path()): + self.docker_network = docker_network + self.config_dir = config_dir + self.service = {} + + def get_service(self): + return self.service diff --git a/src/services/dns_seed.py b/src/services/dns_seed.py new file mode 100644 index 000000000..f9b337b35 --- /dev/null +++ b/src/services/dns_seed.py @@ -0,0 +1,24 @@ +from .base_service import BaseService +import shutil + + +PORT = 15353 + + +class DnsSeed(BaseService): + def __init__(self, docker_network, templates, config_dir): + super().__init__(docker_network) + self.docker_network = docker_network + self.templates = templates + self.service = { + "container_name": "dns-seed", + "ports": [f"{PORT}:53/udp", f"{PORT}:53/tcp"], + "build": { + "context": ".", + "dockerfile": str(self.templates / "Dockerfile_bind9"), + }, + "networks": [self.docker_network], + } + # Copy files for dockerfile + shutil.copy(str(self.templates / "dns-seed.zone"), config_dir) + shutil.copy(str(self.templates / "named.conf.local"), config_dir) diff --git a/src/services/fork_observer.py b/src/services/fork_observer.py new file mode 100644 index 000000000..f6e90be83 --- /dev/null +++ b/src/services/fork_observer.py @@ -0,0 +1,16 @@ +from .base_service import BaseService + +PORT = 12323 + + +class ForkObserver(BaseService): + def __init__(self, docker_network, fork_observer_config): + super().__init__(docker_network) + self.fork_observer_config = fork_observer_config + self.service = { + "image": "b10c/fork-observer:latest", + "container_name": "fork-observer", + "ports": [f"{PORT}:2323"], + "volumes": [f"{self.fork_observer_config}:/app/config.toml"], + "networks": [self.docker_network], + } diff --git a/src/services/grafana.py b/src/services/grafana.py new file mode 100644 index 000000000..e82a0c1d7 --- /dev/null +++ b/src/services/grafana.py @@ -0,0 +1,15 @@ +from .base_service import BaseService + +PORT = 3000 + + +class Grafana(BaseService): + def __init__(self, docker_network): + super().__init__(docker_network) + self.service = { + "image": "grafana/grafana:latest", + "container_name": "grafana", + "ports": [f"3000:{PORT}"], + "volumes": ["grafana-storage:/var/lib/grafana"], + "networks": [self.docker_network], + } diff --git a/src/services/node_exporter.py b/src/services/node_exporter.py new file mode 100644 index 000000000..0a493bf49 --- /dev/null +++ b/src/services/node_exporter.py @@ -0,0 +1,13 @@ +from .base_service import BaseService + + +class NodeExporter(BaseService): + def __init__(self, docker_network): + super().__init__(docker_network) + self.service = { + "image": "prom/node-exporter:latest", + "container_name": "node-exporter", + "volumes": ["/proc:/host/proc:ro", "/sys:/host/sys:ro", "/:/rootfs:ro"], + "command": ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"], + "networks": [self.docker_network], + } diff --git a/src/services/prometheus.py b/src/services/prometheus.py new file mode 100644 index 000000000..f49bef9d3 --- /dev/null +++ b/src/services/prometheus.py @@ -0,0 +1,18 @@ +from .base_service import BaseService + +PORT = 9090 + + +class Prometheus(BaseService): + def __init__(self, docker_network, config_dir): + super().__init__(docker_network, config_dir) + self.service = { + "image": "prom/prometheus:latest", + "container_name": "prometheus", + "ports": [f"{PORT}:9090"], + "volumes": [ + f"{self.config_dir / 'prometheus.yml'}:/etc/prometheus/prometheus.yml" + ], + "command": ["--config.file=/etc/prometheus/prometheus.yml"], + "networks": [self.docker_network], + } diff --git a/src/services/tor.py b/src/services/tor.py new file mode 100644 index 000000000..0f7077e3a --- /dev/null +++ b/src/services/tor.py @@ -0,0 +1,22 @@ +from .base_service import BaseService + +DOCKERFILE = "Dockerfile_tor_da" +DIRECTORY_AUTHORITY_IP = "100.20.15.18" + + +class Tor(BaseService): + def __init__(self, docker_network, templates): + super().__init__(docker_network) + self.templates = templates + self.service = { + "build": { + "context": str(self.templates), + "dockerfile": DOCKERFILE, + }, + "container_name": "tor", + "networks": { + self.docker_network: { + "ipv4_address": DIRECTORY_AUTHORITY_IP, + } + }, + } diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index 2684d325f..257d12e87 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -11,12 +11,17 @@ from pathlib import Path from templates import TEMPLATES from typing import List + +from services.prometheus import Prometheus +from services.node_exporter import NodeExporter +from services.grafana import Grafana +from services.tor import Tor +from services.fork_observer import ForkObserver +from services.dns_seed import DnsSeed from warnet.tank import Tank from warnet.utils import parse_bitcoin_conf, gen_config_dir logger = logging.getLogger("Warnet") -TOR_DOCKERFILE = "Dockerfile_tor_da" -TOR_DA_IP = "100.20.15.18" FO_CONF_NAME = "fork_observer_config.toml" ZONE_FILE_NAME = "dns-seed.zone" logging.getLogger("docker.utils.config").setLevel(logging.WARNING) @@ -227,65 +232,20 @@ def write_docker_compose(self, dns=True): for tank in self.tanks: tank.add_services(compose["services"]) - # Add global services - compose["services"]["prometheus"] = { - "image": "prom/prometheus:latest", - "container_name": "prometheus", - "ports": ["9090:9090"], - "volumes": [ - f"{self.config_dir / 'prometheus.yml'}:/etc/prometheus/prometheus.yml" - ], - "command": ["--config.file=/etc/prometheus/prometheus.yml"], - "networks": [self.docker_network], - } - compose["services"]["node-exporter"] = { - "image": "prom/node-exporter:latest", - "container_name": "node-exporter", - "volumes": ["/proc:/host/proc:ro", "/sys:/host/sys:ro", "/:/rootfs:ro"], - "command": ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"], - "networks": [self.docker_network], - } - compose["services"]["grafana"] = { - "image": "grafana/grafana:latest", - "container_name": "grafana", - "ports": ["3000:3000"], - "volumes": ["grafana-storage:/var/lib/grafana"], - "networks": [self.docker_network], - } - compose["services"]["tor"] = { - "build": { - "context": str(TEMPLATES), - "dockerfile": TOR_DOCKERFILE, - }, - "container_name": "tor", - "networks": { - self.docker_network: { - "ipv4_address": TOR_DA_IP, - } - }, - } - compose["services"]["fork-observer"] = { - "image": "b10c/fork-observer:latest", - "container_name": "fork-observer", - "ports": ["12323:2323"], - "volumes": [f"{self.fork_observer_config}:/app/config.toml"], - "networks": [self.docker_network], - } + # Initialize services and add them to the compose + services = [ + Prometheus(self.docker_network, self.config_dir), + NodeExporter(self.docker_network), + Grafana(self.docker_network), + Tor(self.docker_network, TEMPLATES), + ForkObserver(self.docker_network, self.fork_observer_config), + ] if dns: - compose["services"]["dns-seed"] = { - "container_name": "dns-seed", - "ports": ["15353:53/udp", "15353:53/tcp"], - "build": { - "context": ".", - "dockerfile": str(TEMPLATES / "Dockerfile_bind9"), - }, - "networks": [ - "warnet" - ], - } - # Copy to tmpdir for dockerfile. Using volume means changes on container reflect on template - shutil.copy(str(TEMPLATES / 'dns-seed.zone'), self.config_dir) - shutil.copy(str(TEMPLATES / 'named.conf.local'), self.config_dir) + services.append(DnsSeed(self.docker_network, TEMPLATES, self.config_dir)) + + for service_obj in services: + service_name = service_obj.__class__.__name__.lower() + compose["services"][service_name] = service_obj.get_service() docker_compose_path = self.config_dir / "docker-compose.yml" From e51d5a076185b0ac2a1d6c674196946fac68d13d Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 3 Sep 2023 22:34:10 +0100 Subject: [PATCH 11/28] minor formatting fixups --- src/warnet/utils.py | 1 + src/warnet/warnetd.py | 13 ++++++------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/warnet/utils.py b/src/warnet/utils.py index e746be343..0fb3a7e39 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -8,6 +8,7 @@ import time from io import BytesIO from pathlib import Path + from test_framework.p2p import MESSAGEMAP from test_framework.messages import ser_uint256 diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index a3ac3fbf2..bfe916c52 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -24,26 +24,25 @@ from warnet.utils import gen_config_dir WARNETD_PORT = 9276 -continue_running = True app = Flask(__name__) jsonrpc = JSONRPC(app, "/api") # Determine the log file path based on XDG_STATE_HOME -xdg_state_home = os.environ.get( +_xdg_state_home = os.environ.get( "XDG_STATE_HOME", os.path.join(os.environ["HOME"], ".local", "state") ) -log_file_path = os.path.join(xdg_state_home, "warnet", "warnet.log") +LOG_FILE_PATH = os.path.join(_xdg_state_home, "warnet", "warnet.log") # Ensure the directory exists -os.makedirs(os.path.dirname(log_file_path), exist_ok=True) +os.makedirs(os.path.dirname(LOG_FILE_PATH), exist_ok=True) # Configure root logger logging.basicConfig( level=logging.DEBUG, handlers=[ RotatingFileHandler( - log_file_path, maxBytes=16_000_000, backupCount=3, delay=True + LOG_FILE_PATH, maxBytes=16_000_000, backupCount=3, delay=True ) ], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", @@ -271,9 +270,9 @@ def run_gunicorn(): command.extend([ "--daemon", "--access-logfile", - log_file_path, + LOG_FILE_PATH, "--error-logfile", - log_file_path, + LOG_FILE_PATH, ]) subprocess.run(command) From 4bf48220b9a9b826f63ad635860ca53b58995470 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sun, 3 Sep 2023 22:35:05 +0100 Subject: [PATCH 12/28] black-ify everything --- pyproject.toml | 1 + src/warnet/cli.py | 85 +++++++++----- src/warnet/client.py | 3 + src/warnet/tank.py | 70 ++++++----- src/warnet/test_framework_bridge.py | 174 ++++++++++++++++++++-------- src/warnet/utils.py | 2 +- src/warnet/warnet.py | 66 ++++++----- src/warnet/warnetd.py | 65 +++++++---- 8 files changed, 307 insertions(+), 159 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ada5aa6ac..51313a203 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,7 @@ classifiers = [ "Programming Language :: Python :: 3", ] dependencies = [ + "black==23.7.0", "click==8.1.7", "docker==6.1.3", "flask==2.3.3", diff --git a/src/warnet/cli.py b/src/warnet/cli.py index d95f04199..fcacee0cf 100644 --- a/src/warnet/cli.py +++ b/src/warnet/cli.py @@ -29,19 +29,28 @@ def rpc_call(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]] def cli(): pass + @click.group(name="debug") def debug(): """Debug commands""" + + cli.add_command(debug) + @click.group(name="scenarios") def scenarios(): """Scenario commands""" + + cli.add_command(scenarios) + @click.group(name="network") def network(): """Network commands""" + + cli.add_command(network) @@ -69,31 +78,42 @@ def help_command(ctx, command): help_info = cmd_obj.get_help(ctx).split("\n", 1)[-1].strip() # Extract the arguments of the command - arguments = [param.human_readable_name.upper() for param in cmd_obj.params if isinstance(param, click.Argument)] + arguments = [ + param.human_readable_name.upper() + for param in cmd_obj.params + if isinstance(param, click.Argument) + ] # Determine the correct usage string based on whether the command has subcommands if isinstance(cmd_obj, click.Group) and cmd_obj.list_commands(ctx): - usage_str = f"Usage: warnet {command} [OPTIONS] COMMAND [ARGS...]\n\n{help_info}" + usage_str = ( + f"Usage: warnet {command} [OPTIONS] COMMAND [ARGS...]\n\n{help_info}" + ) else: args_str = " ".join(arguments) usage_str = f"Usage: warnet {command} [OPTIONS] {args_str}\n\n{help_info}" - + print(usage_str) + cli.add_command(help_command) @cli.command(context_settings={"ignore_unknown_options": True}) -@click.argument('node', type=int) -@click.argument('method', type=str, nargs=-1) # this will capture all remaining arguments -@click.option('--params', type=str, multiple=True, default=()) -@click.option('--network', default='warnet', show_default=True) +@click.argument("node", type=int) +@click.argument( + "method", type=str, nargs=-1 +) # this will capture all remaining arguments +@click.option("--params", type=str, multiple=True, default=()) +@click.option("--network", default="warnet", show_default=True) def rpc(node, method, params, network): """ Call bitcoin-cli on in <--network> """ if len(method) > 2: - raise click.BadArgumentUsage("You can provide at most two arguments for 'method'.") + raise click.BadArgumentUsage( + "You can provide at most two arguments for 'method'." + ) # Convert tuple to space-separated string method_str = " ".join(method) @@ -109,8 +129,8 @@ def rpc(node, method, params, network): @cli.command() -@click.argument('node', type=int, required=True) -@click.option('--network', default='warnet', show_default=True) +@click.argument("node", type=int, required=True) +@click.option("--network", default="warnet", show_default=True) def debug_log(node, network): """ Fetch the Bitcoin Core debug log from in [network] @@ -123,14 +143,15 @@ def debug_log(node, network): @cli.command() -@click.argument('node_a', type=int, required=True) -@click.argument('node_b', type=int, required=True) -@click.option('--network', default='warnet', show_default=True) +@click.argument("node_a", type=int, required=True) +@click.argument("node_b", type=int, required=True) +@click.option("--network", default="warnet", show_default=True) def messages(node_a, node_b, network): """ Fetch messages sent between and in """ import logging + logging.warning(f"got args: {node_a}, {node_b}, {network}") try: result = rpc_call( @@ -156,7 +177,7 @@ def list(): @scenarios.command() -@click.argument('scenario', type=str) +@click.argument("scenario", type=str) def run(scenario): """ Run from the Warnet Test Framework @@ -169,49 +190,59 @@ def run(scenario): @debug.command() -@click.argument('graph_file', type=str) -@click.option('--network', default='warnet', show_default=True) +@click.argument("graph_file", type=str) +@click.option("--network", default="warnet", show_default=True) def generate_compose(graph_file: str, network: str = "warnet"): """ Generate the docker-compose file for a given and <--network> name and return it. """ try: - result = rpc_call("generate_compose", {"graph_file": graph_file, "network": network}) + result = rpc_call( + "generate_compose", {"graph_file": graph_file, "network": network} + ) print(result) except Exception as e: print(f"Error generating compose: {e}") + @debug.command() -@click.argument('graph_file', type=str) -@click.option('--network', default='warnet', show_default=True) +@click.argument("graph_file", type=str) +@click.option("--network", default="warnet", show_default=True) def update_dns_seed(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): """ Update the dns seed database using a graph file """ try: - result = rpc_call("update_dns_seeder", {"graph_file": str(graph_file), "network": network}) + result = rpc_call( + "update_dns_seeder", {"graph_file": str(graph_file), "network": network} + ) print(result) except Exception as e: print(f"Error updating dns seed addresses: {e}") @network.command() -@click.argument('graph_file', default=EXAMPLE_GRAPH_FILE, type=click.Path()) -@click.option('--force', default=False, is_flag=True, type=bool) -@click.option('--network', default='warnet', show_default=True) -def start(graph_file: Path = EXAMPLE_GRAPH_FILE, force: bool = False, network: str = "warnet"): +@click.argument("graph_file", default=EXAMPLE_GRAPH_FILE, type=click.Path()) +@click.option("--force", default=False, is_flag=True, type=bool) +@click.option("--network", default="warnet", show_default=True) +def start( + graph_file: Path = EXAMPLE_GRAPH_FILE, force: bool = False, network: str = "warnet" +): """ Start a warnet with topology loaded from a into <--network> (default: "warnet") """ try: - result = rpc_call("from_file", {"graph_file": str(graph_file), "force": force, "network": network}) + result = rpc_call( + "from_file", + {"graph_file": str(graph_file), "force": force, "network": network}, + ) print(result) except Exception as e: print(f"Error creating network: {e}") @network.command() -@click.option('--network', default='warnet', show_default=True) +@click.option("--network", default="warnet", show_default=True) def up(network: str = "warnet"): """ Run 'docker-compose up' on a warnet named <--network> (default: "warnet"). @@ -224,7 +255,7 @@ def up(network: str = "warnet"): @network.command() -@click.option('--network', default='warnet', show_default=True) +@click.option("--network", default="warnet", show_default=True) def down(network: str = "warnet"): """ Run 'docker-compose down on a warnet named <--network> (default: "warnet"). diff --git a/src/warnet/client.py b/src/warnet/client.py index 2dfc4c659..e5f0f426c 100644 --- a/src/warnet/client.py +++ b/src/warnet/client.py @@ -72,10 +72,12 @@ def stop_container(c): logger.info(f"stopping container: {c.name}") c.stop() + def stop_network(network="warnet") -> bool: """ Stop all containers in the network in parallel using a background thread """ + def thread_stop(): d = docker.from_env() network_obj = d.networks.get(network) @@ -87,6 +89,7 @@ def thread_stop(): threading.Thread(target=thread_stop).start() return True + def compose_down(network="warnet") -> bool: """ Run docker-compose down on a warnet diff --git a/src/warnet/tank.py b/src/warnet/tank.py index 030568e5e..b1495ad77 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -45,14 +45,16 @@ def __init__(self): self.config_dir = Path() def __str__(self) -> str: - return (f"Tank(\n" - f"\tIndex: {self.index}\n" - f"\tVersion: {self.version}\n" - f"\tConf: {self.conf}\n" - f"\tConf File: {self.conf_file}\n" - f"\tNetem: {self.netem}\n" - f"\tIPv4: {self._ipv4}\n" - f"\t)") + return ( + f"Tank(\n" + f"\tIndex: {self.index}\n" + f"\tVersion: {self.version}\n" + f"\tConf: {self.conf}\n" + f"\tConf File: {self.conf_file}\n" + f"\tNetem: {self.netem}\n" + f"\tIPv4: {self._ipv4}\n" + f"\t)" + ) @classmethod def from_graph_node(cls, index, warnet): @@ -67,14 +69,17 @@ def from_graph_node(cls, index, warnet): if "version" in node: if not "/" and "#" in self.version: if node["version"] not in SUPPORTED_TAGS: - raise Exception(f"Unsupported version: can't be generated from Docker images: {node['version']}") + raise Exception( + f"Unsupported version: can't be generated from Docker images: {node['version']}" + ) self.version = node["version"] if "bitcoin_config" in node: self.conf = node["bitcoin_config"] if "tc_netem" in node: self.netem = node["tc_netem"] with open(self.warnet.fork_observer_config, "a") as f: - f.write(f''' + f.write( + f""" [[networks.nodes]] id = {self.index} name = "Node {self.index}" @@ -83,7 +88,8 @@ def from_graph_node(cls, index, warnet): rpc_port = {self.rpc_port} rpc_user = "{self.rpc_user}" rpc_password = "{self.rpc_password}" -''') +""" + ) self.config_dir = self.warnet.config_dir / str(self.suffix) self.config_dir.mkdir(parents=True, exist_ok=True) self.write_torrc() @@ -133,8 +139,10 @@ def container(self) -> Container: def exec(self, cmd: str, user: str = "root"): result = self.container.exec_run(cmd=cmd, user=user) if result.exit_code != 0: - raise Exception(f"Command failed with exit code {result.exit_code}: {result.output.decode('utf-8')}") - return result.output.decode('utf-8') + raise Exception( + f"Command failed with exit code {result.exit_code}: {result.output.decode('utf-8')}" + ) + return result.output.decode("utf-8") def apply_network_conditions(self): if self.netem is None: @@ -181,7 +189,7 @@ def write_bitcoin_conf(self, base_bitcoin_conf): self.conf_file = path def write_torrc(self): - src_tor_conf_file = TEMPLATES / 'torrc' + src_tor_conf_file = TEMPLATES / "torrc" dest_path = self.config_dir / "torrc" shutil.copyfile(src_tor_conf_file, dest_path) @@ -214,23 +222,23 @@ def add_services(self, services): services[self.bitcoind_name].update({"entrypoint": "/warnet_entrypoint.sh"}) # Add the bitcoind service - services[self.bitcoind_name].update({ - "container_name": self.bitcoind_name, - "build": build, - "volumes": [ - f"{self.conf_file}:/home/bitcoin/.bitcoin/bitcoin.conf", - f"{self.torrc_file}:/etc/tor/torrc_original", - ], - "networks": { - self.docker_network: { - "ipv4_address": f"{self.ipv4}", - } - }, - "labels": { - "warnet": "tank" - }, - "privileged": True, - }) + services[self.bitcoind_name].update( + { + "container_name": self.bitcoind_name, + "build": build, + "volumes": [ + f"{self.conf_file}:/home/bitcoin/.bitcoin/bitcoin.conf", + f"{self.torrc_file}:/etc/tor/torrc_original", + ], + "networks": { + self.docker_network: { + "ipv4_address": f"{self.ipv4}", + } + }, + "labels": {"warnet": "tank"}, + "privileged": True, + } + ) # Add the prometheus data exporter in a neighboring container # services[self.exporter_name] = { diff --git a/src/warnet/test_framework_bridge.py b/src/warnet/test_framework_bridge.py index 82e60159d..5d85fb768 100644 --- a/src/warnet/test_framework_bridge.py +++ b/src/warnet/test_framework_bridge.py @@ -9,13 +9,10 @@ from test_framework.test_framework import ( BitcoinTestFramework, TMPDIR_PREFIX, - TestStatus + TestStatus, ) from test_framework.test_node import TestNode -from test_framework.util import ( - get_rpc_proxy, - PortSeed -) +from test_framework.util import get_rpc_proxy, PortSeed from warnet.warnet import Warnet @@ -39,7 +36,7 @@ def setup(self): logger.info(f"Adding TestNode {i} from {tank.bitcoind_name} with IP {ip}") node = TestNode( i, - "", # datadir path + "", # datadir path chain=tank.bitcoin_network, rpchost=ip, timewait=60, @@ -105,7 +102,7 @@ def setup(self): random.seed(seed) self.log.info("PRNG seed is: {}".format(seed)) - self.log.debug('Setting up network thread') + self.log.debug("Setting up network thread") self.network_thread = NetworkThread() self.network_thread.start() @@ -119,64 +116,143 @@ def setup(self): self.success = TestStatus.PASSED - def parse_args(self): previous_releases_path = "" parser = argparse.ArgumentParser(usage="%(prog)s [options]") - parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", - help="Leave bitcoinds and test.* datadir on exit or error") - parser.add_argument("--nosandbox", dest="nosandbox", default=False, action="store_true", - help="Don't use the syscall sandbox") - parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", - help="Don't stop bitcoinds after the test execution") - parser.add_argument("--cachedir", dest="cachedir", default=None, - help="Directory for caching pregenerated datadirs (default: %(default)s)") - parser.add_argument("--tmpdir", dest="tmpdir", default=None, - help="Root directory for datadirs") - parser.add_argument("-l", "--loglevel", dest="loglevel", default="DEBUG", - help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") - parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", - help="Print out all RPC calls as they are made") - parser.add_argument("--portseed", dest="port_seed", default=0, - help="The seed to use for assigning port numbers (default: current process id)") - parser.add_argument("--previous-releases", dest="prev_releases", default=None, action="store_true", - help="Force test of previous releases (default: %(default)s)") - parser.add_argument("--coveragedir", dest="coveragedir", default=None, - help="Write tested RPC commands into this directory") - parser.add_argument("--configfile", dest="configfile", default=None, - help="Location of the test framework config file (default: %(default)s)") - parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", - help="Attach a python debugger if test fails") - parser.add_argument("--usecli", dest="usecli", default=False, action="store_true", - help="use bitcoin-cli instead of RPC for all commands") - parser.add_argument("--perf", dest="perf", default=False, action="store_true", - help="profile running nodes with perf for the duration of the test") - parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true", - help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required.") - parser.add_argument("--randomseed", default=0x7761726e6574, # "warnet" ascii - help="set a random seed for deterministically reproducing a previous test run") - parser.add_argument("--timeout-factor", dest="timeout_factor", default=1, - help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts") - parser.add_argument("--network", dest="network", default="warnet", - help="Designate which warnet this should run on (default: warnet)") + parser.add_argument( + "--nocleanup", + dest="nocleanup", + default=False, + action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error", + ) + parser.add_argument( + "--nosandbox", + dest="nosandbox", + default=False, + action="store_true", + help="Don't use the syscall sandbox", + ) + parser.add_argument( + "--noshutdown", + dest="noshutdown", + default=False, + action="store_true", + help="Don't stop bitcoinds after the test execution", + ) + parser.add_argument( + "--cachedir", + dest="cachedir", + default=None, + help="Directory for caching pregenerated datadirs (default: %(default)s)", + ) + parser.add_argument( + "--tmpdir", dest="tmpdir", default=None, help="Root directory for datadirs" + ) + parser.add_argument( + "-l", + "--loglevel", + dest="loglevel", + default="DEBUG", + help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.", + ) + parser.add_argument( + "--tracerpc", + dest="trace_rpc", + default=False, + action="store_true", + help="Print out all RPC calls as they are made", + ) + parser.add_argument( + "--portseed", + dest="port_seed", + default=0, + help="The seed to use for assigning port numbers (default: current process id)", + ) + parser.add_argument( + "--previous-releases", + dest="prev_releases", + default=None, + action="store_true", + help="Force test of previous releases (default: %(default)s)", + ) + parser.add_argument( + "--coveragedir", + dest="coveragedir", + default=None, + help="Write tested RPC commands into this directory", + ) + parser.add_argument( + "--configfile", + dest="configfile", + default=None, + help="Location of the test framework config file (default: %(default)s)", + ) + parser.add_argument( + "--pdbonfailure", + dest="pdbonfailure", + default=False, + action="store_true", + help="Attach a python debugger if test fails", + ) + parser.add_argument( + "--usecli", + dest="usecli", + default=False, + action="store_true", + help="use bitcoin-cli instead of RPC for all commands", + ) + parser.add_argument( + "--perf", + dest="perf", + default=False, + action="store_true", + help="profile running nodes with perf for the duration of the test", + ) + parser.add_argument( + "--valgrind", + dest="valgrind", + default=False, + action="store_true", + help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required.", + ) + parser.add_argument( + "--randomseed", + default=0x7761726E6574, # "warnet" ascii + help="set a random seed for deterministically reproducing a previous test run", + ) + parser.add_argument( + "--timeout-factor", + dest="timeout_factor", + default=1, + help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts", + ) + parser.add_argument( + "--network", + dest="network", + default="warnet", + help="Designate which warnet this should run on (default: warnet)", + ) self.add_options(parser) # Running TestShell in a Jupyter notebook causes an additional -f argument # To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument # source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168 - parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") + parser.add_argument( + "-f", "--fff", help="a dummy argument to fool ipython", default="1" + ) self.options = parser.parse_args() if self.options.timeout_factor == 0: self.options.timeout_factor = 99999 - self.options.timeout_factor = self.options.timeout_factor or (4 if self.options.valgrind else 1) + self.options.timeout_factor = self.options.timeout_factor or ( + 4 if self.options.valgrind else 1 + ) self.options.previous_releases_path = previous_releases_path config = configparser.ConfigParser() if self.options.configfile is not None: config.read_file(open(self.options.configfile)) - config["environment"] = { - "PACKAGE_BUGREPORT": "" - } + config["environment"] = {"PACKAGE_BUGREPORT": ""} self.config = config diff --git a/src/warnet/utils.py b/src/warnet/utils.py index 0fb3a7e39..f4c00c26a 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -46,7 +46,7 @@ def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: - error_msg = str(e).replace('\n', ' ').replace('\t', ' ') + error_msg = str(e).replace("\n", " ").replace("\t", " ") logger.error(f"rpc error: {error_msg}") retries += 1 if retries == max_retries: diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index 257d12e87..cfa9efec7 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -29,35 +29,40 @@ class Warnet: - def __init__(self, config_dir): self.config_dir: Path = config_dir self.config_dir.mkdir(parents=True, exist_ok=True) self.docker = docker.from_env() - self.bitcoin_network:str = "regtest" - self.docker_network:str = "warnet" + self.bitcoin_network: str = "regtest" + self.docker_network: str = "warnet" self.subnet: str = "100.0.0.0/8" self.graph = None self.graph_name = "graph.graphml" self.tanks: List[Tank] = [] self.fork_observer_config = self.config_dir / FO_CONF_NAME - logger.info(f"copying config {TEMPLATES / FO_CONF_NAME} to {self.fork_observer_config}") + logger.info( + f"copying config {TEMPLATES / FO_CONF_NAME} to {self.fork_observer_config}" + ) shutil.copy(TEMPLATES / FO_CONF_NAME, self.fork_observer_config) def __str__(self) -> str: - tanks_str = ',\n'.join([str(tank) for tank in self.tanks]) - return (f"Warnet(\n" - f"\tTemp Directory: {self.config_dir}\n" - f"\tBitcoin Network: {self.bitcoin_network}\n" - f"\tDocker Network: {self.docker_network}\n" - f"\tSubnet: {self.subnet}\n" - f"\tGraph: {self.graph}\n" - f"\tTanks: [\n{tanks_str}\n" - f"\t]\n" - f")") + tanks_str = ",\n".join([str(tank) for tank in self.tanks]) + return ( + f"Warnet(\n" + f"\tTemp Directory: {self.config_dir}\n" + f"\tBitcoin Network: {self.bitcoin_network}\n" + f"\tDocker Network: {self.docker_network}\n" + f"\tSubnet: {self.subnet}\n" + f"\tGraph: {self.graph}\n" + f"\tTanks: [\n{tanks_str}\n" + f"\t]\n" + f")" + ) @classmethod - def from_graph_file(cls, graph_file: str, config_dir: Path, network: str = "warnet"): + def from_graph_file( + cls, graph_file: str, config_dir: Path, network: str = "warnet" + ): self = cls(config_dir) destination = self.config_dir / self.graph_name destination.parent.mkdir(parents=True, exist_ok=True) @@ -77,10 +82,14 @@ def from_graph(cls, graph): return self @classmethod - def from_network(cls, config_dir: Path = Path(), network: str = "warnet", tanks=True): + def from_network( + cls, config_dir: Path = Path(), network: str = "warnet", tanks=True + ): self = cls(config_dir) self.config_dir = gen_config_dir(network) - self.graph = networkx.read_graphml(Path(self.config_dir / self.graph_name), node_type=int) + self.graph = networkx.read_graphml( + Path(self.config_dir / self.graph_name), node_type=int + ) if tanks: self.tanks_from_graph() return self @@ -89,7 +98,9 @@ def from_network(cls, config_dir: Path = Path(), network: str = "warnet", tanks= def from_docker_env(cls, network_name): config_dir = gen_config_dir(network_name) self = cls(config_dir) - self.graph = networkx.read_graphml(Path(self.config_dir / self.graph_name), node_type=int) + self.graph = networkx.read_graphml( + Path(self.config_dir / self.graph_name), node_type=int + ) self.docker_network = network_name index = 0 while index <= 999999: @@ -125,19 +136,20 @@ def apply_network_conditions(self): for tank in self.tanks: tank.apply_network_conditions() - def generate_zone_file_from_tanks(self): - records_list = [f"seed.dns-seed. 300 IN A {tank.ipv4}" for tank in self.tanks] + records_list = [ + f"seed.dns-seed. 300 IN A {tank.ipv4}" for tank in self.tanks + ] content = [] - with open(str(TEMPLATES / ZONE_FILE_NAME), 'r') as f: + with open(str(TEMPLATES / ZONE_FILE_NAME), "r") as f: content = [line.rstrip() for line in f] # TODO: Really we should also read active SOA value from dns-seed, and increment from there content.extend(records_list) # Join the content into a single string and escape single quotes for echoing - content_str = '\n'.join(content).replace("'", "'\\''") - with open(self.config_dir / ZONE_FILE_NAME, 'w') as f: + content_str = "\n".join(content).replace("'", "'\\''") + with open(self.config_dir / ZONE_FILE_NAME, "w") as f: f.write(content_str) def apply_zone_file(self): @@ -147,17 +159,18 @@ def apply_zone_file(self): seeder = self.docker.containers.get("dns-seed") # Read the content from the generated zone file - with open(self.config_dir / ZONE_FILE_NAME, 'r') as f: + with open(self.config_dir / ZONE_FILE_NAME, "r") as f: content_str = f.read().replace("'", "'\\''") # Overwrite all existing content - result = seeder.exec_run(f"sh -c 'echo \"{content_str}\" > /etc/bind/dns-seed.zone'") + result = seeder.exec_run( + f"sh -c 'echo \"{content_str}\" > /etc/bind/dns-seed.zone'" + ) logging.debug(f"result of updating {ZONE_FILE_NAME}: {result}") # Reload that single zone only seeder.exec_run("rndc reload dns-seed") - def connect_edges(self): for edge in self.graph.edges(): (src, dst) = edge @@ -247,7 +260,6 @@ def write_docker_compose(self, dns=True): service_name = service_obj.__class__.__name__.lower() compose["services"][service_name] = service_obj.get_service() - docker_compose_path = self.config_dir / "docker-compose.yml" try: with open(docker_compose_path, "w") as file: diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index bfe916c52..073607e8a 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -25,9 +25,6 @@ WARNETD_PORT = 9276 -app = Flask(__name__) -jsonrpc = JSONRPC(app, "/api") - # Determine the log file path based on XDG_STATE_HOME _xdg_state_home = os.environ.get( "XDG_STATE_HOME", os.path.join(os.environ["HOME"], ".local", "state") @@ -49,12 +46,16 @@ ) # Disable urllib3.connectionpool logging logging.getLogger("urllib3.connectionpool").setLevel(logging.CRITICAL) - logger = logging.getLogger("warnetd") +app = Flask(__name__) +jsonrpc = JSONRPC(app, "/api") + @jsonrpc.method("bcli") -def bcli(node: int, method: str, params: list[str] = [], network: str = "warnet") -> str: +def bcli( + node: int, method: str, params: list[str] = [], network: str = "warnet" +) -> str: """ Call bitcoin-cli on in [network] """ @@ -86,22 +87,24 @@ def messages(network: str, node_a: int, node_b: int) -> str: messages = get_messages(network, node_a, node_b) if not messages: return f"No messages found between {node_a} and {node_b}" - + # Convert each message dictionary to a string representation messages_str_list = [] for message in messages: - timestamp = datetime.utcfromtimestamp(message["time"] / 1e6).strftime("%Y-%m-%d %H:%M:%S") + timestamp = datetime.utcfromtimestamp(message["time"] / 1e6).strftime( + "%Y-%m-%d %H:%M:%S" + ) direction = ">>>" if message.get("outbound", False) else "<<<" msgtype = message.get("msgtype", "") - + # Handle the body dictionary in a special way body_dict = message.get("body", {}) - body_str = ', '.join(f"{key}: {value}" for key, value in body_dict.items()) + body_str = ", ".join(f"{key}: {value}" for key, value in body_dict.items()) messages_str_list.append(f"{timestamp} {direction} {msgtype} {body_str}") # Join all message strings with newlines - result_str = '\n'.join(messages_str_list) + result_str = "\n".join(messages_str_list) return result_str @@ -149,6 +152,7 @@ def run(scenario: str, network: str = "warnet") -> str: @jsonrpc.method("up") def up(network: str = "warnet") -> str: wn = Warnet.from_network(network=network, tanks=False) + def thread_start(wn): try: wn.docker_compose_up() @@ -156,7 +160,9 @@ def thread_start(wn): wn = Warnet.from_docker_env(network) wn.apply_network_conditions() wn.connect_edges() - logger.info(f"Resumed warnet named '{network}' from config dir {wn.config_dir}") + logger.info( + f"Resumed warnet named '{network}' from config dir {wn.config_dir}" + ) except Exception as e: logger.error(f"Exception {e}") @@ -187,7 +193,9 @@ def thread_start(wn): wn.apply_zone_file() wn.apply_network_conditions() wn.connect_edges() - logger.info(f"Created warnet named '{network}' from graph file {graph_file}") + logger.info( + f"Created warnet named '{network}' from graph file {graph_file}" + ) except Exception as e: logger.error(f"Exception {e}") @@ -202,7 +210,7 @@ def update_dns_seeder(graph_file: str, network: str = "warnet") -> str: wn = Warnet.from_graph_file(graph_file, config_dir, network) wn.generate_zone_file_from_tanks() wn.apply_zone_file() - with open(wn.zone_file_path, 'r') as f: + with open(wn.zone_file_path, "r") as f: zone_file = f.read() return f"DNS seeder update using zone file:\n{zone_file}" @@ -217,7 +225,9 @@ def generate_compose(graph_file: str, network: str = "warnet") -> str: """ config_dir = gen_config_dir(network) if config_dir.exists(): - return f"Config dir {config_dir} already exists, not overwriting existing warnet" + return ( + f"Config dir {config_dir} already exists, not overwriting existing warnet" + ) wn = Warnet.from_graph_file(graph_file, config_dir, network) wn.write_bitcoin_confs() wn.write_docker_compose() @@ -251,8 +261,13 @@ def run_gunicorn(): """ Run the RPC server using gunicorn WSGI HTTP server """ - parser = argparse.ArgumentParser(description='Run the Warnet RPC server.') - parser.add_argument('--daemon', default=False, action='store_true', help='Run server in the background.') + parser = argparse.ArgumentParser(description="Run the Warnet RPC server.") + parser.add_argument( + "--daemon", + default=False, + action="store_true", + help="Run server in the background.", + ) args = parser.parse_args() command = [ @@ -267,14 +282,16 @@ def run_gunicorn(): # If in daemon mode, log to file and add daemon argument if args.daemon: - command.extend([ - "--daemon", - "--access-logfile", - LOG_FILE_PATH, - "--error-logfile", - LOG_FILE_PATH, - ]) - + command.extend( + [ + "--daemon", + "--access-logfile", + LOG_FILE_PATH, + "--error-logfile", + LOG_FILE_PATH, + ] + ) + subprocess.run(command) From 1d90aea7ce4d7c2ae9bdaea69d4f72e33cefe284 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 4 Sep 2023 13:01:41 +0100 Subject: [PATCH 13/28] fixups to container naming and service classes --- src/services/dns_seed.py | 9 +++++--- src/warnet/tank.py | 32 ++++++++++++++++------------- src/warnet/test_framework_bridge.py | 2 +- src/warnet/utils.py | 20 ++++++++++++++++++ src/warnet/warnet.py | 25 +++++++++++++++++----- 5 files changed, 65 insertions(+), 23 deletions(-) diff --git a/src/services/dns_seed.py b/src/services/dns_seed.py index f9b337b35..d318194b3 100644 --- a/src/services/dns_seed.py +++ b/src/services/dns_seed.py @@ -3,6 +3,9 @@ PORT = 15353 +DNS_SEED_NAME = "dns-seed" +ZONE_FILE_NAME = "dns-seed.zone" +NAMED_CONF_NAME = "named.conf.local" class DnsSeed(BaseService): @@ -11,7 +14,7 @@ def __init__(self, docker_network, templates, config_dir): self.docker_network = docker_network self.templates = templates self.service = { - "container_name": "dns-seed", + "container_name": DNS_SEED_NAME, "ports": [f"{PORT}:53/udp", f"{PORT}:53/tcp"], "build": { "context": ".", @@ -20,5 +23,5 @@ def __init__(self, docker_network, templates, config_dir): "networks": [self.docker_network], } # Copy files for dockerfile - shutil.copy(str(self.templates / "dns-seed.zone"), config_dir) - shutil.copy(str(self.templates / "named.conf.local"), config_dir) + shutil.copy(str(self.templates / ZONE_FILE_NAME), config_dir) + shutil.copy(str(self.templates / NAMED_CONF_NAME), config_dir) diff --git a/src/warnet/tank.py b/src/warnet/tank.py index b1495ad77..fdaba8086 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -40,7 +40,7 @@ def __init__(self): self._container = None self._suffix = None self._ipv4 = None - self._bitcoind_name = None + self._container_name = None self._exporter_name = None self.config_dir = Path() @@ -118,10 +118,12 @@ def ipv4(self): return self._ipv4 @property - def bitcoind_name(self): - if self._bitcoind_name is None: - self._bitcoind_name = f"{CONTAINER_PREFIX_BITCOIND}_{self.suffix}" - return self._bitcoind_name + def container_name(self): + if self._container_name is None: + self._container_name = ( + f"{self.docker_network}_{CONTAINER_PREFIX_BITCOIND}_{self.suffix}" + ) + return self._container_name @property def exporter_name(self): @@ -132,7 +134,7 @@ def exporter_name(self): @property def container(self) -> Container: if self._container is None: - self._container = docker.from_env().containers.get(self.bitcoind_name) + self._container = docker.from_env().containers.get(self.container_name) return self._container @exponential_backoff() @@ -150,7 +152,7 @@ def apply_network_conditions(self): if not sanitize_tc_netem_command(self.netem): logger.warning( - f"Not applying unsafe tc-netem conditions to container {self.bitcoind_name}: `{self.netem}`" + f"Not applying unsafe tc-netem conditions to container {self.container_name}: `{self.netem}`" ) return @@ -158,11 +160,11 @@ def apply_network_conditions(self): rcode, result = self.exec(self.netem) if rcode == 0: logger.info( - f"Successfully applied network conditions to {self.bitcoind_name}: `{self.netem}`" + f"Successfully applied network conditions to {self.container_name}: `{self.netem}`" ) else: logger.error( - f"Error applying network conditions to {self.bitcoind_name}: `{self.netem}` ({result})" + f"Error applying network conditions to {self.container_name}: `{self.netem}` ({result})" ) def write_bitcoin_conf(self, base_bitcoin_conf): @@ -198,7 +200,7 @@ def write_torrc(self): def add_services(self, services): assert self.index is not None assert self.conf_file is not None - services[self.bitcoind_name] = {} + services[self.container_name] = {} # Setup bitcoind, either release binary or build from source if "/" and "#" in self.version: @@ -219,12 +221,14 @@ def add_services(self, services): "dockerfile": str(TEMPLATES / f"Dockerfile_{self.version}"), } # Use entrypoint for derived build, but not for compiled build - services[self.bitcoind_name].update({"entrypoint": "/warnet_entrypoint.sh"}) + services[self.container_name].update( + {"entrypoint": "/warnet_entrypoint.sh"} + ) # Add the bitcoind service - services[self.bitcoind_name].update( + services[self.container_name].update( { - "container_name": self.bitcoind_name, + "container_name": self.container_name, "build": build, "volumes": [ f"{self.conf_file}:/home/bitcoin/.bitcoin/bitcoin.conf", @@ -257,7 +261,7 @@ def add_services(self, services): def add_scrapers(self, scrapers): scrapers.append( { - "job_name": self.bitcoind_name, + "job_name": self.container_name, "scrape_interval": "5s", "static_configs": [{"targets": [f"{self.exporter_name}:9332"]}], } diff --git a/src/warnet/test_framework_bridge.py b/src/warnet/test_framework_bridge.py index 5d85fb768..ffd06511d 100644 --- a/src/warnet/test_framework_bridge.py +++ b/src/warnet/test_framework_bridge.py @@ -33,7 +33,7 @@ def setup(self): warnet = Warnet.from_docker_env(self.options.network) for i, tank in enumerate(warnet.tanks): ip = tank.ipv4 - logger.info(f"Adding TestNode {i} from {tank.bitcoind_name} with IP {ip}") + logger.info(f"Adding TestNode {i} from {tank.container_name} with IP {ip}") node = TestNode( i, "", # datadir path diff --git a/src/warnet/utils.py b/src/warnet/utils.py index f4c00c26a..7a1a298c0 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -1,4 +1,5 @@ import functools +import inspect import ipaddress import logging import os @@ -326,3 +327,22 @@ def gen_config_dir(network: str) -> Path: xdg_config = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")) config_dir = Path(xdg_config) / "warnet" / network return config_dir + + +def bubble_exception_str(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + func_name = inspect.currentframe().f_code.co_name + local_vars = inspect.currentframe().f_locals + # Filter out the 'self' variable from the local_vars + context_str = ", ".join( + f"{k}={v}" for k, v in local_vars.items() if k != "self" + ) + raise Exception( + f"Exception in function '{func_name}' with context ({context_str}): {str(e)}" + ) + + return wrapper diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index cfa9efec7..bf62e8992 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -17,13 +17,12 @@ from services.grafana import Grafana from services.tor import Tor from services.fork_observer import ForkObserver -from services.dns_seed import DnsSeed +from services.dns_seed import DnsSeed, ZONE_FILE_NAME, DNS_SEED_NAME from warnet.tank import Tank -from warnet.utils import parse_bitcoin_conf, gen_config_dir +from warnet.utils import parse_bitcoin_conf, gen_config_dir, bubble_exception_str -logger = logging.getLogger("Warnet") +logger = logging.getLogger("warnet") FO_CONF_NAME = "fork_observer_config.toml" -ZONE_FILE_NAME = "dns-seed.zone" logging.getLogger("docker.utils.config").setLevel(logging.WARNING) logging.getLogger("docker.auth").setLevel(logging.WARNING) @@ -60,6 +59,7 @@ def __str__(self) -> str: ) @classmethod + @bubble_exception_str def from_graph_file( cls, graph_file: str, config_dir: Path, network: str = "warnet" ): @@ -74,6 +74,7 @@ def from_graph_file( return self @classmethod + @bubble_exception_str def from_graph(cls, graph): self = cls(Path()) self.graph = graph @@ -82,6 +83,7 @@ def from_graph(cls, graph): return self @classmethod + @bubble_exception_str def from_network( cls, config_dir: Path = Path(), network: str = "warnet", tanks=True ): @@ -95,6 +97,7 @@ def from_network( return self @classmethod + @bubble_exception_str def from_docker_env(cls, network_name): config_dir = gen_config_dir(network_name) self = cls(config_dir) @@ -113,9 +116,11 @@ def from_docker_env(cls, network_name): return self @property + @bubble_exception_str def zone_file_path(self): return self.config_dir / ZONE_FILE_NAME + @bubble_exception_str def tanks_from_graph(self): for node_id in self.graph.nodes(): if int(node_id) != len(self.tanks): @@ -125,6 +130,7 @@ def tanks_from_graph(self): self.tanks.append(Tank.from_graph_node(node_id, self)) logger.info(f"Imported {len(self.tanks)} tanks from graph") + @bubble_exception_str def write_bitcoin_confs(self): with open(TEMPLATES / "bitcoin.conf", "r") as file: text = file.read() @@ -132,10 +138,12 @@ def write_bitcoin_confs(self): for tank in self.tanks: tank.write_bitcoin_conf(base_bitcoin_conf) + @bubble_exception_str def apply_network_conditions(self): for tank in self.tanks: tank.apply_network_conditions() + @bubble_exception_str def generate_zone_file_from_tanks(self): records_list = [ f"seed.dns-seed. 300 IN A {tank.ipv4}" for tank in self.tanks @@ -152,11 +160,12 @@ def generate_zone_file_from_tanks(self): with open(self.config_dir / ZONE_FILE_NAME, "w") as f: f.write(content_str) + @bubble_exception_str def apply_zone_file(self): """ Sync the dns seed list served by dns-seed with currently active Tanks. """ - seeder = self.docker.containers.get("dns-seed") + seeder = self.docker.containers.get(f"{self.docker_network}_{DNS_SEED_NAME}") # Read the content from the generated zone file with open(self.config_dir / ZONE_FILE_NAME, "r") as f: @@ -171,6 +180,7 @@ def apply_zone_file(self): # Reload that single zone only seeder.exec_run("rndc reload dns-seed") + @bubble_exception_str def connect_edges(self): for edge in self.graph.edges(): (src, dst) = edge @@ -180,6 +190,7 @@ def connect_edges(self): cmd = f"bitcoin-cli addpeeraddress {dst_ip} 18444" src_tank.exec(cmd=cmd, user="bitcoin") + @bubble_exception_str def docker_compose_build_up(self): command = ["docker-compose", "-p", self.docker_network, "up", "-d", "--build"] try: @@ -196,6 +207,7 @@ def docker_compose_build_up(self): f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" ) + @bubble_exception_str def docker_compose_up(self): command = ["docker-compose", "-p", self.docker_network, "up", "-d"] try: @@ -212,6 +224,7 @@ def docker_compose_up(self): f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" ) + @bubble_exception_str def docker_compose_down(self): command = ["docker-compose", "down"] try: @@ -228,6 +241,7 @@ def docker_compose_down(self): f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" ) + @bubble_exception_str def write_docker_compose(self, dns=True): compose = { "version": "3.8", @@ -270,6 +284,7 @@ def write_docker_compose(self, dns=True): f"An error occurred while writing to {docker_compose_path}: {e}" ) + @bubble_exception_str def write_prometheus_config(self): config = { "global": {"scrape_interval": "15s"}, From 290f2198d07786c1896c6778a090c4031c2f8f14 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 4 Sep 2023 13:21:32 +0100 Subject: [PATCH 14/28] WIP: use fluentd to collect logs --- src/services/fluentd.py | 24 ++++++++++++++++++++++++ src/templates/fluent.conf | 9 +++++++++ src/warnet/tank.py | 33 +++++++++++++++++++++------------ src/warnet/warnet.py | 3 +++ 4 files changed, 57 insertions(+), 12 deletions(-) create mode 100644 src/services/fluentd.py create mode 100644 src/templates/fluent.conf diff --git a/src/services/fluentd.py b/src/services/fluentd.py new file mode 100644 index 000000000..457f191f3 --- /dev/null +++ b/src/services/fluentd.py @@ -0,0 +1,24 @@ +from .base_service import BaseService + +FLUENT_IP = "100.102.108.117" +FLUENT_CONF = "fluent.conf" + +class Fluentd(BaseService): + PORT = 24224 + + def __init__(self, docker_network, config_dir): + super().__init__(docker_network, config_dir) + self.service = { + "image": "fluent/fluentd:v1.16-debian-1", # Debian version is recommended officially since it has jemalloc support. + "container_name": f"{self.docker_network}_fluentd", + "ports": [f"{self.PORT}:{self.PORT}"], + "volumes": [ + f"{self.config_dir / FLUENT_CONF}:/fluentd/etc/{FLUENT_CONF}" + ], + "command": ["/bin/sh", "-c", f"sleep 10 && fluentd -c /fluentd/etc/{FLUENT_CONF}"], + "networks": { + self.docker_network: { + "ipv4_address": f"{FLUENT_IP}", + } + }, + } diff --git a/src/templates/fluent.conf b/src/templates/fluent.conf new file mode 100644 index 000000000..c9f9444f6 --- /dev/null +++ b/src/templates/fluent.conf @@ -0,0 +1,9 @@ + + @type forward + port 24224 + bind 0.0.0.0 + + + + @type stdout + diff --git a/src/warnet/tank.py b/src/warnet/tank.py index fdaba8086..bd59b322e 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -9,6 +9,7 @@ from pathlib import Path from docker.api import service from docker.models.containers import Container +from services.fluentd import FLUENT_IP from templates import TEMPLATES from warnet.utils import ( exponential_backoff, @@ -241,22 +242,30 @@ def add_services(self, services): }, "labels": {"warnet": "tank"}, "privileged": True, + # "depends_on": ["fluentd"], + # "logging": { + # "driver": "fluentd", + # "options": { + # "fluentd-address": f"{FLUENT_IP}:24224", + # "tag": "{{.Name}}" + # } + # } } ) # Add the prometheus data exporter in a neighboring container - # services[self.exporter_name] = { - # "image": "jvstein/bitcoin-prometheus-exporter", - # "container_name": self.exporter_name, - # "environment": { - # "BITCOIN_RPC_HOST": self.bitcoind_name, - # "BITCOIN_RPC_PORT": self.rpc_port, - # "BITCOIN_RPC_USER": self.rpc_user, - # "BITCOIN_RPC_PASSWORD": self.rpc_password, - # }, - # "ports": [f"{8335 + self.index}:9332"], - # "networks": [self.docker_network], - # } + services[self.exporter_name] = { + "image": "jvstein/bitcoin-prometheus-exporter", + "container_name": self.exporter_name, + "environment": { + "BITCOIN_RPC_HOST": self.container_name, + "BITCOIN_RPC_PORT": self.rpc_port, + "BITCOIN_RPC_USER": self.rpc_user, + "BITCOIN_RPC_PASSWORD": self.rpc_password, + }, + "ports": [f"{8335 + self.index}:9332"], + "networks": [self.docker_network], + } def add_scrapers(self, scrapers): scrapers.append( diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index bf62e8992..94b3ee9f6 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -17,6 +17,7 @@ from services.grafana import Grafana from services.tor import Tor from services.fork_observer import ForkObserver +from services.fluentd import FLUENT_CONF, Fluentd, FLUENT_IP from services.dns_seed import DnsSeed, ZONE_FILE_NAME, DNS_SEED_NAME from warnet.tank import Tank from warnet.utils import parse_bitcoin_conf, gen_config_dir, bubble_exception_str @@ -43,6 +44,7 @@ def __init__(self, config_dir): f"copying config {TEMPLATES / FO_CONF_NAME} to {self.fork_observer_config}" ) shutil.copy(TEMPLATES / FO_CONF_NAME, self.fork_observer_config) + shutil.copy(TEMPLATES / FLUENT_CONF, self.config_dir) def __str__(self) -> str: tanks_str = ",\n".join([str(tank) for tank in self.tanks]) @@ -266,6 +268,7 @@ def write_docker_compose(self, dns=True): Grafana(self.docker_network), Tor(self.docker_network, TEMPLATES), ForkObserver(self.docker_network, self.fork_observer_config), + # Fluentd(self.docker_network, self.config_dir), ] if dns: services.append(DnsSeed(self.docker_network, TEMPLATES, self.config_dir)) From 909134085f7258d341d4250b2217cecd6121fe9e Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 4 Sep 2023 14:13:57 +0100 Subject: [PATCH 15/28] fix scenarios --- src/services/dns_seed.py | 2 +- src/services/fork_observer.py | 2 +- src/services/grafana.py | 2 +- src/services/node_exporter.py | 2 +- src/services/prometheus.py | 3 ++- src/services/tor.py | 2 +- src/warnet/cli.py | 9 ++++++--- src/warnet/tank.py | 2 +- src/warnet/warnetd.py | 6 +++--- 9 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/services/dns_seed.py b/src/services/dns_seed.py index d318194b3..b36853d5c 100644 --- a/src/services/dns_seed.py +++ b/src/services/dns_seed.py @@ -14,7 +14,7 @@ def __init__(self, docker_network, templates, config_dir): self.docker_network = docker_network self.templates = templates self.service = { - "container_name": DNS_SEED_NAME, + "container_name": f"{self.docker_network}_{DNS_SEED_NAME}", "ports": [f"{PORT}:53/udp", f"{PORT}:53/tcp"], "build": { "context": ".", diff --git a/src/services/fork_observer.py b/src/services/fork_observer.py index f6e90be83..2f952be31 100644 --- a/src/services/fork_observer.py +++ b/src/services/fork_observer.py @@ -9,7 +9,7 @@ def __init__(self, docker_network, fork_observer_config): self.fork_observer_config = fork_observer_config self.service = { "image": "b10c/fork-observer:latest", - "container_name": "fork-observer", + "container_name": f"{self.docker_network}_fork-observer", "ports": [f"{PORT}:2323"], "volumes": [f"{self.fork_observer_config}:/app/config.toml"], "networks": [self.docker_network], diff --git a/src/services/grafana.py b/src/services/grafana.py index e82a0c1d7..1196e0dd6 100644 --- a/src/services/grafana.py +++ b/src/services/grafana.py @@ -8,7 +8,7 @@ def __init__(self, docker_network): super().__init__(docker_network) self.service = { "image": "grafana/grafana:latest", - "container_name": "grafana", + "container_name": f"{self.docker_network}_grafana", "ports": [f"3000:{PORT}"], "volumes": ["grafana-storage:/var/lib/grafana"], "networks": [self.docker_network], diff --git a/src/services/node_exporter.py b/src/services/node_exporter.py index 0a493bf49..c9add95e7 100644 --- a/src/services/node_exporter.py +++ b/src/services/node_exporter.py @@ -6,7 +6,7 @@ def __init__(self, docker_network): super().__init__(docker_network) self.service = { "image": "prom/node-exporter:latest", - "container_name": "node-exporter", + "container_name": f"{self.docker_network}_node-exporter", "volumes": ["/proc:/host/proc:ro", "/sys:/host/sys:ro", "/:/rootfs:ro"], "command": ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"], "networks": [self.docker_network], diff --git a/src/services/prometheus.py b/src/services/prometheus.py index f49bef9d3..6039d6f82 100644 --- a/src/services/prometheus.py +++ b/src/services/prometheus.py @@ -1,3 +1,4 @@ +import docker from .base_service import BaseService PORT = 9090 @@ -8,7 +9,7 @@ def __init__(self, docker_network, config_dir): super().__init__(docker_network, config_dir) self.service = { "image": "prom/prometheus:latest", - "container_name": "prometheus", + "container_name": f"{self.docker_network}_prometheus", "ports": [f"{PORT}:9090"], "volumes": [ f"{self.config_dir / 'prometheus.yml'}:/etc/prometheus/prometheus.yml" diff --git a/src/services/tor.py b/src/services/tor.py index 0f7077e3a..d266f1451 100644 --- a/src/services/tor.py +++ b/src/services/tor.py @@ -13,7 +13,7 @@ def __init__(self, docker_network, templates): "context": str(self.templates), "dockerfile": DOCKERFILE, }, - "container_name": "tor", + "container_name": f"{self.docker_network}_tor", "networks": { self.docker_network: { "ipv4_address": DIRECTORY_AUTHORITY_IP, diff --git a/src/warnet/cli.py b/src/warnet/cli.py index fcacee0cf..0ed91eb03 100644 --- a/src/warnet/cli.py +++ b/src/warnet/cli.py @@ -176,14 +176,17 @@ def list(): print(f"Error listing scenarios: {e}") -@scenarios.command() +@scenarios.command(context_settings={"ignore_unknown_options": True}) @click.argument("scenario", type=str) -def run(scenario): +@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) +@click.option("--network", default="warnet", show_default=True) +def run(scenario, network, additional_args): """ Run from the Warnet Test Framework """ try: - res = rpc_call("run", {"scenario": scenario}) + params = {"scenario": scenario, "additional_args": additional_args, "network": network} + res = rpc_call("run", params) print(res) except Exception as e: print(f"Error running scenario: {e}") diff --git a/src/warnet/tank.py b/src/warnet/tank.py index bd59b322e..65cf50886 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -129,7 +129,7 @@ def container_name(self): @property def exporter_name(self): if self._exporter_name is None: - self._exporter_name = f"{CONTAINER_PREFIX_PROMETHEUS}_{self.suffix}" + self._exporter_name = f"{self.docker_network}_{CONTAINER_PREFIX_PROMETHEUS}_{self.suffix}" return self._exporter_name @property diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index 073607e8a..6c3551f21 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -9,7 +9,7 @@ import threading from datetime import datetime from logging.handlers import RotatingFileHandler - +from typing import List from flask import Flask from flask_jsonrpc.app import JSONRPC @@ -129,7 +129,7 @@ def list() -> list[str]: @jsonrpc.method("run") -def run(scenario: str, network: str = "warnet") -> str: +def run(scenario: str, additional_args: List[str], network: str = "warnet") -> str: """ Run from the Warnet Test Framework """ @@ -140,7 +140,7 @@ def run(scenario: str, network: str = "warnet") -> str: return f"Scenario {scenario} not found at {scenario_path}." try: - run_cmd = [sys.executable, scenario_path] + [f"--network={network}"] + run_cmd = [sys.executable, scenario_path] + additional_args + [f"--network={network}"] logger.debug(f"Running {run_cmd}") subprocess.Popen(run_cmd, shell=False) return f"Running scenario {scenario} in the background..." From 4a0821e75ce78f1dea25f70738f056c2b2e2a916 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 4 Sep 2023 20:48:46 +0100 Subject: [PATCH 16/28] Split cli, persist running scenarios * better code organisation * persist running scenarios to disk for later restore --- pyproject.toml | 3 +- src/warnet/cli.py | 286 ------------------------------------ src/warnet/cli/__init__.py | 0 src/warnet/cli/debug.py | 46 ++++++ src/warnet/cli/main.py | 148 +++++++++++++++++++ src/warnet/cli/network.py | 62 ++++++++ src/warnet/cli/rpc.py | 23 +++ src/warnet/cli/scenarios.py | 70 +++++++++ src/warnet/client.py | 6 +- src/warnet/tank.py | 4 +- src/warnet/utils.py | 43 ++++++ src/warnet/warnet.py | 6 +- src/warnet/warnetd.py | 86 +++++++++-- 13 files changed, 477 insertions(+), 306 deletions(-) delete mode 100644 src/warnet/cli.py create mode 100644 src/warnet/cli/__init__.py create mode 100644 src/warnet/cli/debug.py create mode 100644 src/warnet/cli/main.py create mode 100644 src/warnet/cli/network.py create mode 100644 src/warnet/cli/rpc.py create mode 100644 src/warnet/cli/scenarios.py diff --git a/pyproject.toml b/pyproject.toml index 51313a203..ded0736bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,13 +23,14 @@ dependencies = [ "jsonrpcserver==5.0.3", "jsonrpcclient==4.0.0", "networkx==3.1", + "rich==13.5.2", "PyYAML==6.0.1", ] dynamic = ["version"] [project.scripts] warnet = "warnet.warnetd:run_gunicorn" -warcli = "warnet.cli:cli" +warcli = "warnet.cli.main:cli" [tool.black] line-length = 88 diff --git a/src/warnet/cli.py b/src/warnet/cli.py deleted file mode 100644 index 0ed91eb03..000000000 --- a/src/warnet/cli.py +++ /dev/null @@ -1,286 +0,0 @@ -import requests -from typing import Optional, Any, Tuple, Dict, Union -from pathlib import Path - -from jsonrpcclient import Ok, parse, request -import click -from rich import print - -from templates import TEMPLATES -from warnet import warnet -from warnet.warnetd import WARNETD_PORT - -EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" - - -def rpc_call(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]]]): - payload = request(rpc_method, params) - response = requests.post(f"http://localhost:{WARNETD_PORT}/api", json=payload) - parsed = parse(response.json()) - - if isinstance(parsed, Ok): - return parsed.result - else: - print(parsed) - raise Exception(parsed.message) - - -@click.group() -def cli(): - pass - - -@click.group(name="debug") -def debug(): - """Debug commands""" - - -cli.add_command(debug) - - -@click.group(name="scenarios") -def scenarios(): - """Scenario commands""" - - -cli.add_command(scenarios) - - -@click.group(name="network") -def network(): - """Network commands""" - - -cli.add_command(network) - - -@cli.command(name="help") -@click.argument("command", required=False, default=None) -@click.pass_context -def help_command(ctx, command): - """ - Display help information for the given command. - If no command is given, display help for the main CLI. - """ - if command is None: - # Display help for the main CLI - print(ctx.parent.get_help()) - return - - # Fetch the command object - cmd_obj = cli.get_command(ctx, command) - - if cmd_obj is None: - print(f"Unknown command: {command}") - return - - # Extract only the relevant help information (excluding the initial usage line) - help_info = cmd_obj.get_help(ctx).split("\n", 1)[-1].strip() - - # Extract the arguments of the command - arguments = [ - param.human_readable_name.upper() - for param in cmd_obj.params - if isinstance(param, click.Argument) - ] - - # Determine the correct usage string based on whether the command has subcommands - if isinstance(cmd_obj, click.Group) and cmd_obj.list_commands(ctx): - usage_str = ( - f"Usage: warnet {command} [OPTIONS] COMMAND [ARGS...]\n\n{help_info}" - ) - else: - args_str = " ".join(arguments) - usage_str = f"Usage: warnet {command} [OPTIONS] {args_str}\n\n{help_info}" - - print(usage_str) - - -cli.add_command(help_command) - - -@cli.command(context_settings={"ignore_unknown_options": True}) -@click.argument("node", type=int) -@click.argument( - "method", type=str, nargs=-1 -) # this will capture all remaining arguments -@click.option("--params", type=str, multiple=True, default=()) -@click.option("--network", default="warnet", show_default=True) -def rpc(node, method, params, network): - """ - Call bitcoin-cli on in <--network> - """ - if len(method) > 2: - raise click.BadArgumentUsage( - "You can provide at most two arguments for 'method'." - ) - - # Convert tuple to space-separated string - method_str = " ".join(method) - - try: - result = rpc_call( - "bcli", - {"network": network, "node": node, "method": method_str, "params": params}, - ) - print(result) - except Exception as e: - print(f"bitcoin-cli {method_str} {params} failed on node {node}:\n{e}") - - -@cli.command() -@click.argument("node", type=int, required=True) -@click.option("--network", default="warnet", show_default=True) -def debug_log(node, network): - """ - Fetch the Bitcoin Core debug log from in [network] - """ - try: - result = rpc_call("debug_log", {"node": node, "network": network}) - print(result) - except Exception as e: - print(f"In our pursuit of knowledge from node {node}, we were thwarted: {e}") - - -@cli.command() -@click.argument("node_a", type=int, required=True) -@click.argument("node_b", type=int, required=True) -@click.option("--network", default="warnet", show_default=True) -def messages(node_a, node_b, network): - """ - Fetch messages sent between and in - """ - import logging - - logging.warning(f"got args: {node_a}, {node_b}, {network}") - try: - result = rpc_call( - "messages", {"network": network, "node_a": node_a, "node_b": node_b} - ) - print(result) - except Exception as e: - print( - f"Amidst the fog of war, we failed to relay messages between strongholds {node_a} and {node_b}: {e}" - ) - - -@scenarios.command() -def list(): - """ - List available scenarios in the Warnet Test Framework - """ - try: - result = rpc_call("list", None) - print(result) - except Exception as e: - print(f"Error listing scenarios: {e}") - - -@scenarios.command(context_settings={"ignore_unknown_options": True}) -@click.argument("scenario", type=str) -@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) -@click.option("--network", default="warnet", show_default=True) -def run(scenario, network, additional_args): - """ - Run from the Warnet Test Framework - """ - try: - params = {"scenario": scenario, "additional_args": additional_args, "network": network} - res = rpc_call("run", params) - print(res) - except Exception as e: - print(f"Error running scenario: {e}") - - -@debug.command() -@click.argument("graph_file", type=str) -@click.option("--network", default="warnet", show_default=True) -def generate_compose(graph_file: str, network: str = "warnet"): - """ - Generate the docker-compose file for a given and <--network> name and return it. - """ - try: - result = rpc_call( - "generate_compose", {"graph_file": graph_file, "network": network} - ) - print(result) - except Exception as e: - print(f"Error generating compose: {e}") - - -@debug.command() -@click.argument("graph_file", type=str) -@click.option("--network", default="warnet", show_default=True) -def update_dns_seed(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): - """ - Update the dns seed database using a graph file - """ - try: - result = rpc_call( - "update_dns_seeder", {"graph_file": str(graph_file), "network": network} - ) - print(result) - except Exception as e: - print(f"Error updating dns seed addresses: {e}") - - -@network.command() -@click.argument("graph_file", default=EXAMPLE_GRAPH_FILE, type=click.Path()) -@click.option("--force", default=False, is_flag=True, type=bool) -@click.option("--network", default="warnet", show_default=True) -def start( - graph_file: Path = EXAMPLE_GRAPH_FILE, force: bool = False, network: str = "warnet" -): - """ - Start a warnet with topology loaded from a into <--network> (default: "warnet") - """ - try: - result = rpc_call( - "from_file", - {"graph_file": str(graph_file), "force": force, "network": network}, - ) - print(result) - except Exception as e: - print(f"Error creating network: {e}") - - -@network.command() -@click.option("--network", default="warnet", show_default=True) -def up(network: str = "warnet"): - """ - Run 'docker-compose up' on a warnet named <--network> (default: "warnet"). - """ - try: - result = rpc_call("up", {"network": network}) - print(result) - except Exception as e: - print(f"Error creating network: {e}") - - -@network.command() -@click.option("--network", default="warnet", show_default=True) -def down(network: str = "warnet"): - """ - Run 'docker-compose down on a warnet named <--network> (default: "warnet"). - """ - try: - result = rpc_call("down", {"network": network}) - print(result) - except Exception as e: - print(f"As we endeavored to cease operations, adversity struck: {e}") - - -@cli.command() -def stop(): - """ - Stop the warnetd daemon. - """ - try: - result = rpc_call("stop", None) - print(result) - except Exception as e: - print(f"As we endeavored to cease operations, adversity struck: {e}") - - -if __name__ == "__main__": - cli() diff --git a/src/warnet/cli/__init__.py b/src/warnet/cli/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/warnet/cli/debug.py b/src/warnet/cli/debug.py new file mode 100644 index 000000000..a344309f0 --- /dev/null +++ b/src/warnet/cli/debug.py @@ -0,0 +1,46 @@ +from pathlib import Path + +import click +from rich import print + +from templates import TEMPLATES +from warnet.cli.rpc import rpc_call + +EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" + + +@click.group(name="debug") +def debug(): + """Debug commands""" + + +@debug.command() +@click.argument("graph_file", type=str) +@click.option("--network", default="warnet", show_default=True) +def generate_compose(graph_file: str, network: str = "warnet"): + """ + Generate the docker-compose file for a given and <--network> (default: "warnet") name and return it. + """ + try: + result = rpc_call( + "generate_compose", {"graph_file": graph_file, "network": network} + ) + print(result) + except Exception as e: + print(f"Error generating compose: {e}") + + +@debug.command() +@click.argument("graph_file", type=str) +@click.option("--network", default="warnet", show_default=True) +def update_dns_seed(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): + """ + Update the dns seed database using a on <--network> (default: "warnet") + """ + try: + result = rpc_call( + "update_dns_seeder", {"graph_file": str(graph_file), "network": network} + ) + print(result) + except Exception as e: + print(f"Error updating dns seed addresses: {e}") diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py new file mode 100644 index 000000000..f375f5f70 --- /dev/null +++ b/src/warnet/cli/main.py @@ -0,0 +1,148 @@ +import click +from rich import print + +from templates import TEMPLATES +from warnet.cli.debug import debug +from warnet.cli.network import network +from warnet.cli.rpc import rpc_call +from warnet.cli.scenarios import scenarios + +EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" + + +@click.group() +def cli(): + pass + + +cli.add_command(debug) +cli.add_command(scenarios) +cli.add_command(network) + + +@cli.command(name="help") +@click.argument("command", required=False, default=None) +@click.pass_context +def help_command(ctx, command): + """ + Display help information for the given command. + If no command is given, display help for the main CLI. + """ + if command is None: + # Display help for the main CLI + print(ctx.parent.get_help()) + return + + # Fetch the command object + cmd_obj = cli.get_command(ctx, command) + + if cmd_obj is None: + print(f"Unknown command: {command}") + return + + # Extract only the relevant help information (excluding the initial usage line) + # help_info = cmd_obj.get_help(ctx).split("\n", 1)[-1].strip() + help_info = cmd_obj.get_help(ctx).strip() + + + # Extract the arguments of the command + arguments = [ + param.human_readable_name.upper() + for param in cmd_obj.params + if isinstance(param, click.Argument) + ] + + # Determine the correct usage string based on whether the command has subcommands + if isinstance(cmd_obj, click.Group) and cmd_obj.list_commands(ctx): + usage_str = ( + f"Usage: warnet {command} [OPTIONS] COMMAND [ARGS...]\n\n{help_info}" + ) + else: + args_str = " ".join(arguments) + usage_str = f"Usage: warnet {command} [OPTIONS] {args_str}\n\n{help_info}" + + print(usage_str) + + +cli.add_command(help_command) + + +@cli.command(context_settings={"ignore_unknown_options": True}) +@click.argument("node", type=int) +@click.argument( + "method", type=str, nargs=-1 +) # this will capture all remaining arguments +@click.option("--params", type=str, multiple=True, default=()) +@click.option("--network", default="warnet", show_default=True) +def rpc(node, method, params, network): + """ + Call bitcoin-cli on in <--network> + """ + if len(method) > 2: + raise click.BadArgumentUsage( + "You can provide at most two arguments for 'method'." + ) + + # Convert tuple to space-separated string + method_str = " ".join(method) + + try: + result = rpc_call( + "bcli", + {"network": network, "node": node, "method": method_str, "params": params}, + ) + print(result) + except Exception as e: + print(f"bitcoin-cli {method_str} {params} failed on node {node}:\n{e}") + + +@cli.command() +@click.argument("node", type=int, required=True) +@click.option("--network", default="warnet", show_default=True) +def debug_log(node, network): + """ + Fetch the Bitcoin Core debug log from in [network] + """ + try: + result = rpc_call("debug_log", {"node": node, "network": network}) + print(result) + except Exception as e: + print(f"In our pursuit of knowledge from node {node}, we were thwarted: {e}") + + +@cli.command() +@click.argument("node_a", type=int, required=True) +@click.argument("node_b", type=int, required=True) +@click.option("--network", default="warnet", show_default=True) +def messages(node_a, node_b, network): + """ + Fetch messages sent between and in + """ + import logging + + logging.warning(f"got args: {node_a}, {node_b}, {network}") + try: + result = rpc_call( + "messages", {"network": network, "node_a": node_a, "node_b": node_b} + ) + print(result) + except Exception as e: + print( + f"Error fetching messages between {node_a} and {node_b}: {e}" + ) + + +@cli.command() +def stop(): + """ + Stop warnetd. + """ + try: + result = rpc_call("stop", None) + print(result) + except Exception as e: + print(f"Error stopping warnetd: {e}") + + +if __name__ == "__main__": + cli() diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py new file mode 100644 index 000000000..92581ac96 --- /dev/null +++ b/src/warnet/cli/network.py @@ -0,0 +1,62 @@ +from pathlib import Path + +import click +from rich import print + +from templates import TEMPLATES +from warnet.cli.rpc import rpc_call + +EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" + + +@click.group(name="network") +def network(): + """Network commands""" + + + +@network.command() +@click.argument("graph_file", default=EXAMPLE_GRAPH_FILE, type=click.Path()) +@click.option("--force", default=False, is_flag=True, type=bool) +@click.option("--network", default="warnet", show_default=True) +def start( + graph_file: Path = EXAMPLE_GRAPH_FILE, force: bool = False, network: str = "warnet" +): + """ + Start a warnet with topology loaded from a into <--network> (default: "warnet") + """ + try: + result = rpc_call( + "from_file", + {"graph_file": str(graph_file), "force": force, "network": network}, + ) + print(result) + except Exception as e: + print(f"Error creating network: {e}") + + +@network.command() +@click.option("--network", default="warnet", show_default=True) +def up(network: str = "warnet"): + """ + Run 'docker-compose up' on a warnet named <--network> (default: "warnet"). + """ + try: + result = rpc_call("up", {"network": network}) + print(result) + except Exception as e: + print(f"Error creating network: {e}") + + +@network.command() +@click.option("--network", default="warnet", show_default=True) +def down(network: str = "warnet"): + """ + Run 'docker-compose down on a warnet named <--network> (default: "warnet"). + """ + try: + result = rpc_call("down", {"network": network}) + print(result) + except Exception as e: + print(f"Error running docker-compose down on network {network}: {e}") + diff --git a/src/warnet/cli/rpc.py b/src/warnet/cli/rpc.py new file mode 100644 index 000000000..a2ad8e79a --- /dev/null +++ b/src/warnet/cli/rpc.py @@ -0,0 +1,23 @@ +import logging +import requests +from jsonrpcclient.responses import Ok, parse +from jsonrpcclient.requests import request +from typing import Any, Dict, Tuple, Union, Optional +from warnet.warnetd import WARNETD_PORT + +logger = logging.getLogger(__name__) + + +def rpc_call(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]]]): + payload = request(rpc_method, params) + logger.debug(f"Constructed rpc call: {payload}") + response = requests.post(f"http://localhost:{WARNETD_PORT}/api", json=payload) + logger.debug(f"RPC respose: {response.status_code}, {response.text}") + parsed = parse(response.json()) + + if isinstance(parsed, Ok): + return parsed.result + else: + error_message = getattr(parsed, 'message', 'Unknown RPC error') + raise Exception(error_message) + diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py new file mode 100644 index 000000000..bdb62872c --- /dev/null +++ b/src/warnet/cli/scenarios.py @@ -0,0 +1,70 @@ +import click +from rich import print + +from templates import TEMPLATES +from warnet.cli.rpc import rpc_call + +EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" + + +@click.group(name="scenarios") +def scenarios(): + """Scenario commands""" + + +@scenarios.command() +def list(): + """ + List available scenarios in the Warnet Test Framework + """ + try: + result = rpc_call("list", None) + print(result) + except Exception as e: + print(f"Error listing scenarios: {e}") + + +@scenarios.command(context_settings={"ignore_unknown_options": True}) +@click.argument("scenario", type=str) +@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) +@click.option("--network", default="warnet", show_default=True) +def run(scenario, network, additional_args): + """ + Run from the Warnet Test Framework on <--network> with optional arguments + """ + try: + params = {"scenario": scenario, "additional_args": additional_args, "network": network} + res = rpc_call("run", params) + print(res) + except Exception as e: + print(f"Error running scenario: {e}") + + +@scenarios.command() +@click.option("--network", default="warnet", show_default=True) +def active(network: str = "warnet"): + """ + List running scenarios on <--network> (default=warnet) as "name": "pid" pairs + """ + try: + result = rpc_call("list_running_scenarios", {"network": network}) + print(result) + except Exception as e: + print(f"Error listing scenarios: {e}") + + +@scenarios.command() +@click.argument("scenario", type=str) +@click.argument("name", type=str) +@click.option("--network", default="warnet", show_default=True) +def stop(scenario, network): + """ + Stop from running on <--network> + """ + try: + params = {"scenario": scenario, "network": network} + res = rpc_call("stop_scenario", params) + print(res) + except Exception as e: + print(f"Error stopping scenario: {e}") + diff --git a/src/warnet/client.py b/src/warnet/client.py index e5f0f426c..04f3e0b8c 100644 --- a/src/warnet/client.py +++ b/src/warnet/client.py @@ -1,7 +1,7 @@ import concurrent.futures import logging import threading -from typing import List, Optional +from typing import List, Optional, Any, Dict import docker @@ -35,7 +35,9 @@ def get_bitcoin_cli(network: str, index: int, method: str, params=None) -> str: return tank.exec(cmd=cmd, user="bitcoin") -def get_messages(network: str, src_index: int, dst_index: int) -> List[Optional[str]]: +def get_messages( + network: str, src_index: int, dst_index: int +) -> List[Optional[Dict[str, Any]]]: src_node = Tank.from_docker_env(network, src_index) dst_node = Tank.from_docker_env(network, dst_index) # start with the IP of the peer diff --git a/src/warnet/tank.py b/src/warnet/tank.py index 65cf50886..3f692f334 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -129,7 +129,9 @@ def container_name(self): @property def exporter_name(self): if self._exporter_name is None: - self._exporter_name = f"{self.docker_network}_{CONTAINER_PREFIX_PROMETHEUS}_{self.suffix}" + self._exporter_name = ( + f"{self.docker_network}_{CONTAINER_PREFIX_PROMETHEUS}_{self.suffix}" + ) return self._exporter_name @property diff --git a/src/warnet/utils.py b/src/warnet/utils.py index 7a1a298c0..56a51750b 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -9,6 +9,7 @@ import time from io import BytesIO from pathlib import Path +from typing import Dict from test_framework.p2p import MESSAGEMAP from test_framework.messages import ser_uint256 @@ -27,6 +28,7 @@ "0.16.3", "0.15.1", ] +RUNNING_PROC_FILE = "running_scenarios.dat" def exponential_backoff(max_retries=5, base_delay=1, max_delay=32): @@ -346,3 +348,44 @@ def wrapper(*args, **kwargs): ) return wrapper + + +def save_running_scenario(scenario: str, process, config_dir: Path): + with open(config_dir / RUNNING_PROC_FILE, "a") as file: + file.write(f"{scenario}\t{process.pid}\n") + + +def load_running_scenarios(config_dir: Path) -> Dict[str, int]: + scenarios = {} + if os.path.exists(config_dir / RUNNING_PROC_FILE): + with open(os.path.join(config_dir, RUNNING_PROC_FILE), "r") as file: + for line in file.readlines(): + scenario, pid = line.strip().split("\t") + scenarios[scenario] = int(pid) + return scenarios + + +def remove_stopped_scenario(scenario: str, config_dir: Path): + lines = [] + with open(config_dir / RUNNING_PROC_FILE, "r") as file: + lines = file.readlines() + + with open(config_dir / RUNNING_PROC_FILE, "w") as file: + for line in lines: + if not line.startswith(scenario): + file.write(line) + + +def update_running_scenarios_file(config_dir: Path, running_scenarios: Dict[str, int]): + with open(config_dir / RUNNING_PROC_FILE, "w") as file: + for scenario, pid in running_scenarios.items(): + file.write(f"{scenario}\t{pid}\n") + + # Check if each PID is still running + still_running = {} + for scenario, pid in running_scenarios.items(): + try: + os.kill(pid, 0) # Will raise an error if the process doesn't exist + still_running[scenario] = pid + except OSError: + pass diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index 94b3ee9f6..7fcba10c7 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -17,7 +17,7 @@ from services.grafana import Grafana from services.tor import Tor from services.fork_observer import ForkObserver -from services.fluentd import FLUENT_CONF, Fluentd, FLUENT_IP +# from services.fluentd import FLUENT_CONF, Fluentd, FLUENT_IP from services.dns_seed import DnsSeed, ZONE_FILE_NAME, DNS_SEED_NAME from warnet.tank import Tank from warnet.utils import parse_bitcoin_conf, gen_config_dir, bubble_exception_str @@ -44,7 +44,7 @@ def __init__(self, config_dir): f"copying config {TEMPLATES / FO_CONF_NAME} to {self.fork_observer_config}" ) shutil.copy(TEMPLATES / FO_CONF_NAME, self.fork_observer_config) - shutil.copy(TEMPLATES / FLUENT_CONF, self.config_dir) + # shutil.copy(TEMPLATES / FLUENT_CONF, self.config_dir) def __str__(self) -> str: tanks_str = ",\n".join([str(tank) for tank in self.tanks]) @@ -177,7 +177,7 @@ def apply_zone_file(self): result = seeder.exec_run( f"sh -c 'echo \"{content_str}\" > /etc/bind/dns-seed.zone'" ) - logging.debug(f"result of updating {ZONE_FILE_NAME}: {result}") + logger.debug(f"result of updating {ZONE_FILE_NAME}: {result}") # Reload that single zone only seeder.exec_run("rndc reload dns-seed") diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index 6c3551f21..3bf139fd4 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -7,9 +7,10 @@ import subprocess import sys import threading +from collections import defaultdict from datetime import datetime from logging.handlers import RotatingFileHandler -from typing import List +from typing import List, Dict from flask import Flask from flask_jsonrpc.app import JSONRPC @@ -21,7 +22,13 @@ get_messages, compose_down, ) -from warnet.utils import gen_config_dir +from warnet.utils import ( + gen_config_dir, + save_running_scenario, + load_running_scenarios, + remove_stopped_scenario, + update_running_scenarios_file, +) WARNETD_PORT = 9276 @@ -84,26 +91,32 @@ def messages(network: str, node_a: int, node_b: int) -> str: Fetch messages sent between and . """ try: - messages = get_messages(network, node_a, node_b) + messages = [ + msg for msg in get_messages(network, node_a, node_b) if msg is not None + ] if not messages: return f"No messages found between {node_a} and {node_b}" - # Convert each message dictionary to a string representation messages_str_list = [] + for message in messages: + # Check if 'time' key exists and its value is a number + if not (message.get("time") and isinstance(message["time"], (int, float))): + continue + timestamp = datetime.utcfromtimestamp(message["time"] / 1e6).strftime( "%Y-%m-%d %H:%M:%S" ) direction = ">>>" if message.get("outbound", False) else "<<<" msgtype = message.get("msgtype", "") - - # Handle the body dictionary in a special way body_dict = message.get("body", {}) - body_str = ", ".join(f"{key}: {value}" for key, value in body_dict.items()) + if not isinstance(body_dict, dict): # messages will be in dict form + continue + + body_str = ", ".join(f"{key}: {value}" for key, value in body_dict.items()) messages_str_list.append(f"{timestamp} {direction} {msgtype} {body_str}") - # Join all message strings with newlines result_str = "\n".join(messages_str_list) return result_str @@ -128,11 +141,12 @@ def list() -> list[str]: return [f"Exception {e}"] +running_scenarios = defaultdict(dict) + + @jsonrpc.method("run") def run(scenario: str, additional_args: List[str], network: str = "warnet") -> str: - """ - Run from the Warnet Test Framework - """ + config_dir = gen_config_dir(network) base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) scenario_path = os.path.join(base_dir, "scenarios", f"{scenario}.py") @@ -140,15 +154,61 @@ def run(scenario: str, additional_args: List[str], network: str = "warnet") -> s return f"Scenario {scenario} not found at {scenario_path}." try: - run_cmd = [sys.executable, scenario_path] + additional_args + [f"--network={network}"] + run_cmd = ( + [sys.executable, scenario_path] + additional_args + [f"--network={network}"] + ) logger.debug(f"Running {run_cmd}") - subprocess.Popen(run_cmd, shell=False) + process = subprocess.Popen(run_cmd, shell=False) + + save_running_scenario(scenario, process, config_dir) + return f"Running scenario {scenario} in the background..." except Exception as e: logger.error(f"Exception occurred while running the scenario: {e}") return f"Exception {e}" +@jsonrpc.method("stop_scenario") +def stop_scenario(scenario: str, network: str = "warnet") -> str: + config_dir = gen_config_dir(network) + running_scenarios = load_running_scenarios(config_dir) + + if scenario not in running_scenarios: + return f"Scenario {scenario} is not running." + + pid = running_scenarios[scenario] + try: + os.kill(pid, 0) + except ProcessLookupError: + return f"Scenario {scenario} with PID {pid} is not running." + + os.kill(pid, signal.SIGTERM) + + remove_stopped_scenario(scenario, config_dir) + + return f"Stopped scenario {scenario}." + + +@jsonrpc.method("list_running_scenarios") +def list_running_scenarios(network: str = "warnet") -> Dict[str, int]: + config_dir = gen_config_dir(network) + running_scenarios = load_running_scenarios(config_dir) + + # Check if each PID is still running + still_running = {} + for scenario, pid in running_scenarios.items(): + try: + os.kill(pid, 0) # Will raise an error if the process doesn't exist + still_running[scenario] = pid + except OSError: + pass + + # Update the file with only the still running scenarios + update_running_scenarios_file(config_dir, still_running) + + return still_running + + @jsonrpc.method("up") def up(network: str = "warnet") -> str: wn = Warnet.from_network(network=network, tanks=False) From 55998473942f03db416f7dc2e4405c3399334c0a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 4 Sep 2023 21:56:38 +0100 Subject: [PATCH 17/28] WIP: properly stop scenarios --- src/warnet/cli/scenarios.py | 11 +++++---- src/warnet/utils.py | 4 ++-- src/warnet/warnetd.py | 45 ++++++++++++++++++++++++++++--------- 3 files changed, 41 insertions(+), 19 deletions(-) diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py index bdb62872c..faaa2d5eb 100644 --- a/src/warnet/cli/scenarios.py +++ b/src/warnet/cli/scenarios.py @@ -26,7 +26,7 @@ def list(): @scenarios.command(context_settings={"ignore_unknown_options": True}) @click.argument("scenario", type=str) -@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) +@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) @click.option("--network", default="warnet", show_default=True) def run(scenario, network, additional_args): """ @@ -54,15 +54,14 @@ def active(network: str = "warnet"): @scenarios.command() -@click.argument("scenario", type=str) -@click.argument("name", type=str) +@click.argument("pid", type=int) @click.option("--network", default="warnet", show_default=True) -def stop(scenario, network): +def stop(pid: int, network: str = "warnet"): """ - Stop from running on <--network> + Stop scenario with from running on <--network> """ try: - params = {"scenario": scenario, "network": network} + params = {"pid": pid, "network": network} res = rpc_call("stop_scenario", params) print(res) except Exception as e: diff --git a/src/warnet/utils.py b/src/warnet/utils.py index 56a51750b..d7d45db38 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -350,9 +350,9 @@ def wrapper(*args, **kwargs): return wrapper -def save_running_scenario(scenario: str, process, config_dir: Path): +def save_running_scenario(scenario: str, pid: int, config_dir: Path): with open(config_dir / RUNNING_PROC_FILE, "a") as file: - file.write(f"{scenario}\t{process.pid}\n") + file.write(f"{scenario}\t{pid}\n") def load_running_scenarios(config_dir: Path) -> Dict[str, int]: diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index 3bf139fd4..c44607718 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -6,6 +6,7 @@ import signal import subprocess import sys +import time import threading from collections import defaultdict from datetime import datetime @@ -23,6 +24,7 @@ compose_down, ) from warnet.utils import ( + exponential_backoff, gen_config_dir, save_running_scenario, load_running_scenarios, @@ -158,7 +160,7 @@ def run(scenario: str, additional_args: List[str], network: str = "warnet") -> s [sys.executable, scenario_path] + additional_args + [f"--network={network}"] ) logger.debug(f"Running {run_cmd}") - process = subprocess.Popen(run_cmd, shell=False) + process = subprocess.Popen(run_cmd, shell=False, preexec_fn=os.setsid) save_running_scenario(scenario, process, config_dir) @@ -169,24 +171,45 @@ def run(scenario: str, additional_args: List[str], network: str = "warnet") -> s @jsonrpc.method("stop_scenario") -def stop_scenario(scenario: str, network: str = "warnet") -> str: +def stop_scenario(pid: int, network: str = "warnet") -> str: + + def is_running(pid): + try: + os.kill(pid, 0) + except ProcessLookupError: + return False + return True + + @exponential_backoff() + def kill_process(pid): + os.kill(pid, signal.SIGKILL) + config_dir = gen_config_dir(network) running_scenarios = load_running_scenarios(config_dir) - if scenario not in running_scenarios: - return f"Scenario {scenario} is not running." + scenario = None + for scenario_name, scenario_pid in running_scenarios.items(): + if scenario_pid == pid: + scenario = scenario_name + break + if not scenario: + return f"No active scenario found for PID {pid}." - pid = running_scenarios[scenario] - try: - os.kill(pid, 0) - except ProcessLookupError: - return f"Scenario {scenario} with PID {pid} is not running." + if not is_running(pid): + return f"Scenario {scenario} with PID {pid} was found in file but is not running." + # First try with SIGTERM os.kill(pid, signal.SIGTERM) + time.sleep(5) + # Then try SIGKILL with exponential backoff + if is_running(pid): + kill_process(pid) - remove_stopped_scenario(scenario, config_dir) + if is_running(pid): + return f"Could not kill scenario {scenario} with pid {pid} using SIGKILL" - return f"Stopped scenario {scenario}." + remove_stopped_scenario(scenario, config_dir) + return f"Stopped scenario {scenario} with PID {pid}." @jsonrpc.method("list_running_scenarios") From fc26812b44d0a1c4154d3045689ef373685e989b Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 4 Sep 2023 22:07:11 +0100 Subject: [PATCH 18/28] fix python 3.8 list[] types --- src/warnet/warnetd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index c44607718..d11c7adfd 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -63,7 +63,7 @@ @jsonrpc.method("bcli") def bcli( - node: int, method: str, params: list[str] = [], network: str = "warnet" + node: int, method: str, params: List[str] = [], network: str = "warnet" ) -> str: """ Call bitcoin-cli on in [network] @@ -128,7 +128,7 @@ def messages(network: str, node_a: int, node_b: int) -> str: @jsonrpc.method("list") -def list() -> list[str]: +def list() -> List[str]: """ List available scenarios in the Warnet Test Framework """ From 7276fcd42a8497e43cb9d704c70aa8fb6ba9d670 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 4 Sep 2023 22:30:22 +0100 Subject: [PATCH 19/28] disable rich print for debug_log takes ages to format large logs --- src/warnet/cli/main.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py index f375f5f70..0644cac48 100644 --- a/src/warnet/cli/main.py +++ b/src/warnet/cli/main.py @@ -1,5 +1,5 @@ import click -from rich import print +from rich import print as richprint from templates import TEMPLATES from warnet.cli.debug import debug @@ -30,14 +30,14 @@ def help_command(ctx, command): """ if command is None: # Display help for the main CLI - print(ctx.parent.get_help()) + richprint(ctx.parent.get_help()) return # Fetch the command object cmd_obj = cli.get_command(ctx, command) if cmd_obj is None: - print(f"Unknown command: {command}") + richprint(f"Unknown command: {command}") return # Extract only the relevant help information (excluding the initial usage line) @@ -61,7 +61,7 @@ def help_command(ctx, command): args_str = " ".join(arguments) usage_str = f"Usage: warnet {command} [OPTIONS] {args_str}\n\n{help_info}" - print(usage_str) + richprint(usage_str) cli.add_command(help_command) @@ -91,9 +91,9 @@ def rpc(node, method, params, network): "bcli", {"network": network, "node": node, "method": method_str, "params": params}, ) - print(result) + richprint(result) except Exception as e: - print(f"bitcoin-cli {method_str} {params} failed on node {node}:\n{e}") + richprint(f"bitcoin-cli {method_str} {params} failed on node {node}:\n{e}") @cli.command() @@ -107,7 +107,7 @@ def debug_log(node, network): result = rpc_call("debug_log", {"node": node, "network": network}) print(result) except Exception as e: - print(f"In our pursuit of knowledge from node {node}, we were thwarted: {e}") + richprint(f"In our pursuit of knowledge from node {node}, we were thwarted: {e}") @cli.command() @@ -125,9 +125,9 @@ def messages(node_a, node_b, network): result = rpc_call( "messages", {"network": network, "node_a": node_a, "node_b": node_b} ) - print(result) + richprint(result) except Exception as e: - print( + richprint( f"Error fetching messages between {node_a} and {node_b}: {e}" ) @@ -139,9 +139,9 @@ def stop(): """ try: result = rpc_call("stop", None) - print(result) + richprint(result) except Exception as e: - print(f"Error stopping warnetd: {e}") + richprint(f"Error stopping warnetd: {e}") if __name__ == "__main__": From 0a69a040e11a52ccda35adae4881dbb3f7398a61 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Mon, 4 Sep 2023 22:48:49 +0100 Subject: [PATCH 20/28] switch to ~HOME/.warnet for directories, if XDG not set --- README.md | 2 +- src/warnet/utils.py | 4 ++-- src/warnet/warnetd.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index ddd7c9f0a..194606db9 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,7 @@ pip install -e . ## Running Warnet runs a daemon called `warnetd` which can be used to manage multiple warnets. -`warnetd` will by default log to a file `$XDG_STATE_HOME/warnet/warnet.log` if the `$XDG_STATE_HOME` environment variable is set, otherwise it will use `$HOME/.local/state/warnet/warnet.log`. +`warnetd` will by default log to a file `$XDG_STATE_HOME/warnet/warnet.log` if the `$XDG_STATE_HOME` environment variable is set, otherwise it will use `$HOME/.warnet/warnet.log`. To start `warnetd` with your venv activated simply run: diff --git a/src/warnet/utils.py b/src/warnet/utils.py index d7d45db38..9cdbc1785 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -326,8 +326,8 @@ def gen_config_dir(network: str) -> Path: """ Determine a config dir based on network name """ - xdg_config = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")) - config_dir = Path(xdg_config) / "warnet" / network + config_dir = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.warnet")) + config_dir = Path(config_dir) / "warnet" / network return config_dir diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index d11c7adfd..e81b31e75 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -34,11 +34,11 @@ WARNETD_PORT = 9276 -# Determine the log file path based on XDG_STATE_HOME -_xdg_state_home = os.environ.get( - "XDG_STATE_HOME", os.path.join(os.environ["HOME"], ".local", "state") +# Determine the log file path based on XDG_STATE_HOME, or ~/.warnet +log_file_dir = os.environ.get( + "XDG_STATE_HOME", os.path.join(os.environ["HOME"], ".warnet") ) -LOG_FILE_PATH = os.path.join(_xdg_state_home, "warnet", "warnet.log") +LOG_FILE_PATH = os.path.join(log_file_dir, "warnet", "warnet.log") # Ensure the directory exists os.makedirs(os.path.dirname(LOG_FILE_PATH), exist_ok=True) From 7e1416a2e0c1dfeae407ea77fa832b4af3eb5879 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 6 Sep 2023 14:57:51 +0100 Subject: [PATCH 21/28] Revert from ruimarinho to our own docker images We can keep these better maintained, and they work just as well --- src/templates/Dockerfile | 109 ++++++++++++++++++++++++++ src/templates/Dockerfile_custom_build | 75 ------------------ src/templates/docker-entrypoint.sh | 47 +++++++++++ src/templates/example.graphml | 75 +++++++++--------- src/warnet/tank.py | 14 ++-- 5 files changed, 200 insertions(+), 120 deletions(-) create mode 100644 src/templates/Dockerfile delete mode 100644 src/templates/Dockerfile_custom_build create mode 100755 src/templates/docker-entrypoint.sh diff --git a/src/templates/Dockerfile b/src/templates/Dockerfile new file mode 100644 index 000000000..fc85d053f --- /dev/null +++ b/src/templates/Dockerfile @@ -0,0 +1,109 @@ +FROM debian:bookworm-slim as builder +ENV DEBIAN_FRONTEND=noninteractive + +ARG ARCH +ARG BITCOIN_VERSION +ARG REPO +ARG BRANCH +ARG BITCOIN_URL + +# Base requirements +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + wget \ + && apt-get clean + +# Download binary +RUN if [ -n "${BITCOIN_URL}" ]; then \ + wget "${BITCOIN_URL}/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-${ARCH}-linux-gnu.tar.gz"; \ + tar -xzf "bitcoin-${BITCOIN_VERSION}-${ARCH}-linux-gnu.tar.gz" -C /usr/local --strip-components=1; \ + fi; + +# Extra requirements to build from source +RUN if [ -n "${REPO}" ]; then \ + apt-get update && apt-get install -y \ + automake \ + autotools-dev \ + build-essential \ + git \ + libtool \ + libboost-dev \ + libevent-dev \ + libdb5.3++-dev \ + libminiupnpc-dev \ + libnatpmp-dev \ + libzmq3-dev \ + libqrencode-dev \ + libsqlite3-dev \ + pkg-config \ + && apt-get clean; \ + fi; + +# Build from source +RUN if [ -n "${REPO}" ]; then \ + mkdir build; \ + cd /build; \ + git clone --depth 1 --branch "${BRANCH}" "https://github.com/${REPO}"; \ + cd /build/bitcoin; \ + ./autogen.sh; \ + ./configure \ + --disable-tests \ + --without-gui \ + --disable-bench \ + --disable-fuzz-binary \ + --enable-suppress-external-warnings; \ + make -j$(nproc); \ + make install; \ + fi; + +RUN rm -f /usr/local/bin/bitcoin-qt +RUN rm -f /usr/local/bin/test_bitcoin* + +# shrink image size a bit with a second stage +FROM debian:bookworm-slim + +ARG UID=3338 +ARG GID=3338 +ARG REPO +# env var overrides +ENV UID=$UID +ENV GID=$GID + +ENV BITCOIN_DATA=/home/bitcoin/.bitcoin + +RUN groupadd --gid ${GID} bitcoin \ + && useradd --create-home --no-log-init -u ${UID} -g ${GID} bitcoin \ + && apt-get update && apt-get install -y --no-install-recommends \ + gosu \ + tor \ + iproute2; \ + apt-get clean; + +# libs needed to run source-build without depends +RUN if [ -n "${REPO}" ]; then \ + apt-get update && apt-get install -y \ + libboost-dev \ + libevent-dev \ + libdb5.3++-dev \ + libminiupnpc-dev \ + libnatpmp-dev \ + libzmq3-dev \ + libqrencode-dev \ + libsqlite3-dev \ + && apt-get clean; \ + fi; + +COPY --from=builder /usr/local/bin/bitcoin* /usr/local/bin/ + +COPY docker-entrypoint.sh /entrypoint.sh +COPY tor-keys/* /home/debian-tor/.tor/keys/ + +VOLUME ["/home/bitcoin/.bitcoin"] + +EXPOSE 8332 8333 18332 18333 18443 18444 38333 38332 + +ENTRYPOINT ["/entrypoint.sh"] + +RUN bitcoind -version | grep -E "Bitcoin Core( Daemon)? version v${BITCOIN_VERSION}" + +CMD ["bitcoind"] diff --git a/src/templates/Dockerfile_custom_build b/src/templates/Dockerfile_custom_build deleted file mode 100644 index 66dae8cff..000000000 --- a/src/templates/Dockerfile_custom_build +++ /dev/null @@ -1,75 +0,0 @@ -FROM debian:bookworm-slim - -ENV DEBIAN_FRONTEND=noninteractive - -ARG REPO -ARG BRANCH - -# Base requirements -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - # - # general packages - # - ccache \ - ca-certificates \ - python3 \ - vim \ - build-essential \ - wget \ - tor \ - iproute2 \ - gosu \ - # - # For building bitcoin core - # - autotools-dev \ - libtool \ - automake \ - pkg-config \ - libboost-dev \ - libevent-dev \ - libdb5.3++-dev \ - libminiupnpc-dev \ - libnatpmp-dev \ - libzmq3-dev \ - libqrencode-dev \ - libsqlite3-dev \ - git \ - && apt-get clean - -# Build from source -RUN if [ -n "${REPO}" ]; then \ - mkdir build; \ - cd /build; \ - git clone --depth 1 --branch "${BRANCH}" "https://github.com/${REPO}"; \ - cd /build/bitcoin; \ - ./autogen.sh; \ - ./configure \ - --disable-tests \ - --without-gui \ - --disable-bench \ - --disable-fuzz-binary \ - --enable-suppress-external-warnings; \ - make -j$(nproc); \ - make install; \ - fi - -# Create bitcoin and debian-tor users -RUN groupadd -r bitcoin && useradd -r -m -g bitcoin bitcoin - - -# Tor setup -RUN mkdir -p /home/debian-tor/.tor/keys \ - && chown -R debian-tor:debian-tor /home/debian-tor \ - && chown -R debian-tor:debian-tor /etc/tor - -COPY tor-keys/* /home/debian-tor/.tor/keys/ - -# Bitcoind setup -RUN usermod -a -G debian-tor bitcoin -RUN mkdir -p /home/bitcoin/.bitcoin \ - && chown -R bitcoin:bitcoin /home/bitcoin - -CMD gosu debian-tor tor & \ - gosu bitcoin /usr/local/bin/bitcoind --datadir=/home/bitcoin/.bitcoin --conf=/home/bitcoin/.bitcoin/bitcoin.conf diff --git a/src/templates/docker-entrypoint.sh b/src/templates/docker-entrypoint.sh new file mode 100755 index 000000000..83f343568 --- /dev/null +++ b/src/templates/docker-entrypoint.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -e + +# Add bitcoin user to tor group to read the auth cookie +usermod -a -G debian-tor bitcoin +cp /etc/tor/torrc_original /etc/tor/torrc +mkdir -p /home/debian-tor/.tor/keys +chown -R debian-tor:debian-tor /home/debian-tor +# Start tor in the background +gosu debian-tor tor & + +if [ -n "${UID+x}" ] && [ "${UID}" != "0" ]; then + usermod -u "$UID" bitcoin +fi + +if [ -n "${GID+x}" ] && [ "${GID}" != "0" ]; then + groupmod -g "$GID" bitcoin +fi + +echo "$0: assuming uid:gid for bitcoin:bitcoin of $(id -u bitcoin):$(id -g bitcoin)" + +if [ $(echo "$1" | cut -c1) = "-" ]; then + echo "$0: assuming arguments for bitcoind" + + set -- bitcoind "$@" +fi + +if [ $(echo "$1" | cut -c1) = "-" ] || [ "$1" = "bitcoind" ]; then + mkdir -p "$BITCOIN_DATA" + chmod 700 "$BITCOIN_DATA" + # Fix permissions for home dir. + chown -R bitcoin:bitcoin "$(getent passwd bitcoin | cut -d: -f6)" + # Fix permissions for bitcoin data dir. + chown -R bitcoin:bitcoin "$BITCOIN_DATA" + + echo "$0: setting data directory to $BITCOIN_DATA" + + set -- "$@" -datadir="$BITCOIN_DATA" +fi + +if [ "$1" = "bitcoind" ] || [ "$1" = "bitcoin-cli" ] || [ "$1" = "bitcoin-tx" ]; then + echo + exec gosu bitcoin "$@" +fi + +echo +exec "$@" diff --git a/src/templates/example.graphml b/src/templates/example.graphml index 71169bcdd..2bd485061 100644 --- a/src/templates/example.graphml +++ b/src/templates/example.graphml @@ -2,79 +2,78 @@ - - 22.0 + 25.0 uacomment=w0 - 22.0 + 24.1 uacomment=w1 - 22.0 + 23.2 uacomment=w2 - 22.0 + 22.1 uacomment=w3 - - 23.0 + 0.21.2 uacomment=w4 - 23.0 + 0.20.2 uacomment=w5 - 23.0 + 0.19.1 uacomment=w6 - 23.0 + 0.18.1 uacomment=w7 - - 22.0 + 0.18.0 uacomment=w8 - 22.0 + 0.17.2 uacomment=w9 - 22.0 + 0.16.3 uacomment=w10 - - - - + + vasild/bitcoin#relay_tx_to_priv_nets + sensitiverelayowntx=1,debugexclude=addrman,debug=sensitiverelay,debug=tor,debug=net,uacomment=sensitive_relay + - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + diff --git a/src/warnet/tank.py b/src/warnet/tank.py index 3f692f334..46a17c92a 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -211,7 +211,7 @@ def add_services(self, services): repo, branch = self.version.split("#") build = { "context": str(TEMPLATES), - "dockerfile": str(TEMPLATES / "Dockerfile_custom_build"), + "dockerfile": str(TEMPLATES / "Dockerfile"), "args": { "REPO": repo, "BRANCH": branch, @@ -221,13 +221,13 @@ def add_services(self, services): # assume it's a release version, get the binary build = { "context": str(TEMPLATES), - "dockerfile": str(TEMPLATES / f"Dockerfile_{self.version}"), + "dockerfile": str(TEMPLATES / f"Dockerfile"), + "args": { + "ARCH": "x86_64", + "BITCOIN_URL": "https://bitcoincore.org/bin", + "BITCOIN_VERSION": f"{self.version}", + }, } - # Use entrypoint for derived build, but not for compiled build - services[self.container_name].update( - {"entrypoint": "/warnet_entrypoint.sh"} - ) - # Add the bitcoind service services[self.container_name].update( { From de9ed43ab7894cf8fbe6317ef896e4ef800c47f4 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 5 Sep 2023 19:34:04 -0400 Subject: [PATCH 22/28] update readme with new syntax --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 194606db9..590257608 100644 --- a/README.md +++ b/README.md @@ -111,25 +111,25 @@ pip install -e . ## Running -Warnet runs a daemon called `warnetd` which can be used to manage multiple warnets. -`warnetd` will by default log to a file `$XDG_STATE_HOME/warnet/warnet.log` if the `$XDG_STATE_HOME` environment variable is set, otherwise it will use `$HOME/.warnet/warnet.log`. +Warnet runs a daemon which can be used to manage multiple warnets. +`warnet` will by default log to a file `$XDG_STATE_HOME/warnet/warnet.log` if the `$XDG_STATE_HOME` environment variable is set, otherwise it will use `$HOME/.warnet/warnet.log`. -To start `warnetd` with your venv activated simply run: +To start `warnet` in the foreground with your venv activated simply run: ```bash -warnetd +warnet ``` > [!NOTE] -> `warnetd` also accepts a `--no-debug` option which prevents daemonization +> `warnetd` also accepts a `--daemon` option which runs the process in the background. -Once `warnetd` is running it can be interacted with using the cli tool `warcli`. -Run `warnet --help` to see a list of possible commands. +Run `warnet --help` to see a list of options. -All `warnet` commands accept a `--network` option, which allows you to specify the warnet you want to control. +Once `warnet` is running it can be interacted with using the cli tool `warcli`. +All `warcli` commands accept a `--network` option, which allows you to specify the warnet you want to control. This is set by default to `--network="warnet"` to simplify default operation. -To start an example warnet, with your venv active, run the following command to use the default graph and network: +To start an example warnet, with your venv active and the server running, run the following command to use the default graph and network: ```bash warcli start @@ -147,7 +147,7 @@ warcli help networks Each container is a node as described in the graph, along with various data exporters and a demo grafana dashboard. -The commands listed in `warnet --help` can then be used to control and query the nodes. +The commands listed in `warcli help` can then be used to control and query the nodes. ### Run scenarios on a network @@ -163,7 +163,7 @@ To see available scenarios (loaded from the default directory): warcli scenarios list ``` -Once a scenarios is selected it can be run with `warnet scenarios run [--network=warnet]`, e.g.: +Once a scenarios is selected it can be run with `warcli scenarios run [--network=warnet]`, e.g.: ```bash # Command one node to generate a wallet and fill 100 blocks with 100 txs each @@ -177,7 +177,7 @@ This will run the run the scenario in the background until it exits or is killed Currently the warnet can be stopped, but **not** stopped, persisted and continued. Persisting the warnet during a stoppage is WIP. -To stop the warnet, and warnetd daemon: +To stop the warnet server: ```bash # stop but retain containers From 09f975eb91f94481a6c2f5cd6e3038fa4e9421ca Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 5 Sep 2023 19:34:31 -0400 Subject: [PATCH 23/28] warnetd: fix log file paths --- src/warnet/warnetd.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index e81b31e75..51ee97d79 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -34,11 +34,15 @@ WARNETD_PORT = 9276 -# Determine the log file path based on XDG_STATE_HOME, or ~/.warnet -log_file_dir = os.environ.get( - "XDG_STATE_HOME", os.path.join(os.environ["HOME"], ".warnet") -) -LOG_FILE_PATH = os.path.join(log_file_dir, "warnet", "warnet.log") +# Determine the log file path: +basedir = os.environ.get("XDG_STATE_HOME") +if basedir is None: + # ~/.warnet/warnet.log + basedir = os.path.join(os.environ["HOME"], ".warnet") +else: + # XDG_STATE_HOME / warnet / warnet.log + basedir = os.path.join(basedir, "warnet") +LOG_FILE_PATH = os.path.join(basedir, "warnet.log") # Ensure the directory exists os.makedirs(os.path.dirname(LOG_FILE_PATH), exist_ok=True) From 9a1dacbff34735a34453fd2165052bbbbd059a8f Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Tue, 5 Sep 2023 20:05:02 -0400 Subject: [PATCH 24/28] warnetd: restore log to stdout if not daemon --- src/warnet/warnetd.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index 51ee97d79..5127196d3 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -11,6 +11,7 @@ from collections import defaultdict from datetime import datetime from logging.handlers import RotatingFileHandler +from logging import StreamHandler from typing import List, Dict from flask import Flask from flask_jsonrpc.app import JSONRPC @@ -53,7 +54,8 @@ handlers=[ RotatingFileHandler( LOG_FILE_PATH, maxBytes=16_000_000, backupCount=3, delay=True - ) + ), + StreamHandler(sys.stdout) ], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) From 9beb5eb23309430ac5c1a8da230cf72211d2f7ee Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 6 Sep 2023 14:30:41 -0400 Subject: [PATCH 25/28] cli: remove logging from rpc output was being doubled by print() statements in calling functions --- src/warnet/cli/rpc.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/warnet/cli/rpc.py b/src/warnet/cli/rpc.py index a2ad8e79a..250f20375 100644 --- a/src/warnet/cli/rpc.py +++ b/src/warnet/cli/rpc.py @@ -1,18 +1,13 @@ -import logging import requests from jsonrpcclient.responses import Ok, parse from jsonrpcclient.requests import request from typing import Any, Dict, Tuple, Union, Optional from warnet.warnetd import WARNETD_PORT -logger = logging.getLogger(__name__) - def rpc_call(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]]]): payload = request(rpc_method, params) - logger.debug(f"Constructed rpc call: {payload}") response = requests.post(f"http://localhost:{WARNETD_PORT}/api", json=payload) - logger.debug(f"RPC respose: {response.status_code}, {response.text}") parsed = parse(response.json()) if isinstance(parsed, Ok): From 152ffb3895820ec6b79a29dc01dfb2990974e018 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 6 Sep 2023 14:50:43 -0400 Subject: [PATCH 26/28] warnet: stringify with table of tank details --- src/warnet/warnet.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index 7fcba10c7..5b458b56a 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -47,17 +47,18 @@ def __init__(self, config_dir): # shutil.copy(TEMPLATES / FLUENT_CONF, self.config_dir) def __str__(self) -> str: - tanks_str = ",\n".join([str(tank) for tank in self.tanks]) + template = "\t%-8.8s%-25.24s%-25.24s%-25.24s%-18.18s\n" + tanks_str = template % ("Index", "Version", "Conf", "Netem", "IPv4") + for tank in self.tanks: + tanks_str += template % (tank.index, tank.version, tank.conf, tank.netem, tank.ipv4) return ( - f"Warnet(\n" + f"Warnet:\n" f"\tTemp Directory: {self.config_dir}\n" f"\tBitcoin Network: {self.bitcoin_network}\n" f"\tDocker Network: {self.docker_network}\n" f"\tSubnet: {self.subnet}\n" f"\tGraph: {self.graph}\n" - f"\tTanks: [\n{tanks_str}\n" - f"\t]\n" - f")" + f"Tanks:\n{tanks_str}" ) @classmethod From 9cdec7ca8d1e59a484afad36006fd1e102f3ae07 Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Wed, 6 Sep 2023 15:33:40 -0400 Subject: [PATCH 27/28] tank: restore get_architecture() for docker-compose.yml creation --- src/warnet/tank.py | 5 ++--- src/warnet/utils.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/src/warnet/tank.py b/src/warnet/tank.py index 46a17c92a..c67b9bb86 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -7,9 +7,7 @@ import shutil from copy import deepcopy from pathlib import Path -from docker.api import service from docker.models.containers import Container -from services.fluentd import FLUENT_IP from templates import TEMPLATES from warnet.utils import ( exponential_backoff, @@ -17,6 +15,7 @@ sanitize_tc_netem_command, dump_bitcoin_conf, SUPPORTED_TAGS, + get_architecture, ) CONTAINER_PREFIX_BITCOIND = "tank" @@ -223,7 +222,7 @@ def add_services(self, services): "context": str(TEMPLATES), "dockerfile": str(TEMPLATES / f"Dockerfile"), "args": { - "ARCH": "x86_64", + "ARCH": get_architecture(), "BITCOIN_URL": "https://bitcoincore.org/bin", "BITCOIN_VERSION": f"{self.version}", }, diff --git a/src/warnet/utils.py b/src/warnet/utils.py index 9cdbc1785..32ff5f5c4 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -5,6 +5,7 @@ import os import random import re +import subprocess import sys import time from io import BytesIO @@ -63,6 +64,20 @@ def wrapper(*args, **kwargs): return decorator +def get_architecture(): + """ + Get the architecture of the machine. + :return: The architecture of the machine or None if an error occurred + """ + result = subprocess.run(["uname", "-m"], stdout=subprocess.PIPE) + arch = result.stdout.decode("utf-8").strip() + if arch == "arm64": + arch = "aarch64" + if arch is None: + raise Exception("Failed to detect architecture.") + return arch + + def generate_ipv4_addr(subnet): """ Generate a valid random IPv4 address within the given subnet. From d471bbb6f19af8f6e32aa82cab3d88b9bdadddd3 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 6 Sep 2023 21:58:15 +0100 Subject: [PATCH 28/28] reduce dockerfile layers --- src/templates/Dockerfile | 63 +++++++++++++++------------------------- 1 file changed, 23 insertions(+), 40 deletions(-) diff --git a/src/templates/Dockerfile b/src/templates/Dockerfile index fc85d053f..78bac18f3 100644 --- a/src/templates/Dockerfile +++ b/src/templates/Dockerfile @@ -7,21 +7,17 @@ ARG REPO ARG BRANCH ARG BITCOIN_URL -# Base requirements -RUN apt-get update && apt-get install -y --no-install-recommends \ +# install or build +RUN set -ex \ + && apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ wget \ - && apt-get clean - -# Download binary -RUN if [ -n "${BITCOIN_URL}" ]; then \ + && if [ -n "${BITCOIN_URL}" ]; then \ wget "${BITCOIN_URL}/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-${ARCH}-linux-gnu.tar.gz"; \ tar -xzf "bitcoin-${BITCOIN_VERSION}-${ARCH}-linux-gnu.tar.gz" -C /usr/local --strip-components=1; \ - fi; - -# Extra requirements to build from source -RUN if [ -n "${REPO}" ]; then \ - apt-get update && apt-get install -y \ + fi \ + && if [ -n "${REPO}" ]; then \ + apt-get install -y \ automake \ autotools-dev \ build-essential \ @@ -35,12 +31,7 @@ RUN if [ -n "${REPO}" ]; then \ libzmq3-dev \ libqrencode-dev \ libsqlite3-dev \ - pkg-config \ - && apt-get clean; \ - fi; - -# Build from source -RUN if [ -n "${REPO}" ]; then \ + pkg-config; \ mkdir build; \ cd /build; \ git clone --depth 1 --branch "${BRANCH}" "https://github.com/${REPO}"; \ @@ -54,12 +45,9 @@ RUN if [ -n "${REPO}" ]; then \ --enable-suppress-external-warnings; \ make -j$(nproc); \ make install; \ - fi; + fi -RUN rm -f /usr/local/bin/bitcoin-qt -RUN rm -f /usr/local/bin/test_bitcoin* - -# shrink image size a bit with a second stage +# shrink image size with a second stage FROM debian:bookworm-slim ARG UID=3338 @@ -68,20 +56,18 @@ ARG REPO # env var overrides ENV UID=$UID ENV GID=$GID - ENV BITCOIN_DATA=/home/bitcoin/.bitcoin -RUN groupadd --gid ${GID} bitcoin \ - && useradd --create-home --no-log-init -u ${UID} -g ${GID} bitcoin \ - && apt-get update && apt-get install -y --no-install-recommends \ +RUN set -ex \ + && groupadd --gid ${GID} bitcoin \ + && useradd --create-home --no-log-init -u ${UID} -g ${GID} bitcoin \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ gosu \ tor \ - iproute2; \ - apt-get clean; - -# libs needed to run source-build without depends -RUN if [ -n "${REPO}" ]; then \ - apt-get update && apt-get install -y \ + iproute2 \ + && if [ -n "${REPO}" ]; then \ + apt-get install -y --no-install-recommends \ libboost-dev \ libevent-dev \ libdb5.3++-dev \ @@ -89,21 +75,18 @@ RUN if [ -n "${REPO}" ]; then \ libnatpmp-dev \ libzmq3-dev \ libqrencode-dev \ - libsqlite3-dev \ - && apt-get clean; \ - fi; - -COPY --from=builder /usr/local/bin/bitcoin* /usr/local/bin/ + libsqlite3-dev; \ + fi \ + && apt-get clean \ + && rm -rf /var/cache/apt/* /var/lib/apt/lists/* +COPY --from=builder /usr/local/bin/bitcoind /usr/local/bin/bitcoin-cli /usr/local/bin/ COPY docker-entrypoint.sh /entrypoint.sh COPY tor-keys/* /home/debian-tor/.tor/keys/ VOLUME ["/home/bitcoin/.bitcoin"] - EXPOSE 8332 8333 18332 18333 18443 18444 38333 38332 ENTRYPOINT ["/entrypoint.sh"] - RUN bitcoind -version | grep -E "Bitcoin Core( Daemon)? version v${BITCOIN_VERSION}" - CMD ["bitcoind"]