diff --git a/.gitignore b/.gitignore index 6133dc87a..891f24271 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,5 @@ __pycache__ .venv warnet.egg-info .python-version -.env \ No newline at end of file +.env +src/templates/Dockerfile_[0-9]* diff --git a/README.md b/README.md index aea898e8b..590257608 100644 --- a/README.md +++ b/README.md @@ -111,33 +111,43 @@ pip install -e . ## Running -Warnet runs a daemon called `warnetd` which can be used to manage multiple warnets. -`warnetd` will by default log to a file `$XDG_STATE_HOME/warnet/warnet.log` if the `$XDG_STATE_HOME` environment variable is set, otherwise it will use `$HOME/.local/state/warnet/warnet.log`. +Warnet runs a daemon which can be used to manage multiple warnets. +`warnet` will by default log to a file `$XDG_STATE_HOME/warnet/warnet.log` if the `$XDG_STATE_HOME` environment variable is set, otherwise it will use `$HOME/.warnet/warnet.log`. -To start `warnetd` with your venv activated simply run: +To start `warnet` in the foreground with your venv activated simply run: ```bash -warnetd +warnet ``` > [!NOTE] -> `warnetd` also accepts a `--no-debug` option which prevents daemonization +> `warnetd` also accepts a `--daemon` option which runs the process in the background. -Once `warnetd` is running it can be interacted with using the cli tool `warnet`. -Run `warnet --help` to see a list of possible commands. +Run `warnet --help` to see a list of options. -All `warnet` commands accept a `--network` option, which allows you to specify the warnet you want to control. +Once `warnet` is running it can be interacted with using the cli tool `warcli`. +All `warcli` commands accept a `--network` option, which allows you to specify the warnet you want to control. This is set by default to `--network="warnet"` to simplify default operation. -To start an example warnet, with your venv active, run the following command to use the default graph and network: +To start an example warnet, with your venv active and the server running, run the following command to use the default graph and network: ```bash -warnet start +warcli start +``` + +To see available commands use: + +```bash +# All commands help +warcli help + +# Sub-command help +warcli help networks ``` Each container is a node as described in the graph, along with various data exporters and a demo grafana dashboard. -The commands listed in `warnet --help` can then be used to control and query the nodes. +The commands listed in `warcli help` can then be used to control and query the nodes. ### Run scenarios on a network @@ -150,31 +160,31 @@ See `/src/scenarios` for examples of how these can be written. To see available scenarios (loaded from the default directory): ```bash -warnet list +warcli scenarios list ``` -Once a scenarios is selected it can be run with `warnet run [--network=warnet]`, e.g.: +Once a scenarios is selected it can be run with `warcli scenarios run [--network=warnet]`, e.g.: ```bash # Command one node to generate a wallet and fill 100 blocks with 100 txs each -warnet run tx-flood.py +warcli scenarios run tx-flood.py ``` -This will run the run the scenario in the background until it exits, or is killed by the user. +This will run the run the scenario in the background until it exits or is killed by the user. ### Stopping -Currently the warnet can be stopped, or stopped and removed, but **not** stopped, persisted and restarted. +Currently the warnet can be stopped, but **not** stopped, persisted and continued. Persisting the warnet during a stoppage is WIP. -To stop the warnet, or remove it (which first stops, then deletes the containers): +To stop the warnet server: ```bash # stop but retain containers -warnet stop +warcli network down -# stop and erase containers -warnet wipe +# stop warnetd +warcli stop ``` ## Remote / Cloud Deployment diff --git a/pyproject.toml b/pyproject.toml index 68088f729..ded0736bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,22 +13,24 @@ classifiers = [ "Programming Language :: Python :: 3", ] dependencies = [ + "black==23.7.0", + "click==8.1.7", "docker==6.1.3", - "networkx==3.1", - "PyYAML==6.0.1", - "typer[all]==0.9.0", + "flask==2.3.3", + "Flask-JSONRPC==2.2.2", + "gunicorn==21.2.0", "jsonschema", "jsonrpcserver==5.0.3", "jsonrpcclient==4.0.0", - "gunicorn==21.2.0", - "flask==2.3.3", - "Flask-JSONRPC==2.2.2", + "networkx==3.1", + "rich==13.5.2", + "PyYAML==6.0.1", ] dynamic = ["version"] [project.scripts] -warnetd = "warnet.warnetd:run_gunicorn" -warnet = "warnet.cli:cli" +warnet = "warnet.warnetd:run_gunicorn" +warcli = "warnet.cli.main:cli" [tool.black] line-length = 88 diff --git a/src/scenarios/tx_flood.py b/src/scenarios/tx_flood.py index 91a5da8f7..224e099e2 100755 --- a/src/scenarios/tx_flood.py +++ b/src/scenarios/tx_flood.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - from warnet.test_framework_bridge import WarnetTestFramework from scenarios.utils import ensure_miner diff --git a/src/services/__init__.py b/src/services/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/services/base_service.py b/src/services/base_service.py new file mode 100644 index 000000000..35040b148 --- /dev/null +++ b/src/services/base_service.py @@ -0,0 +1,11 @@ +from pathlib import Path + + +class BaseService: + def __init__(self, docker_network, config_dir=Path()): + self.docker_network = docker_network + self.config_dir = config_dir + self.service = {} + + def get_service(self): + return self.service diff --git a/src/services/dns_seed.py b/src/services/dns_seed.py new file mode 100644 index 000000000..b36853d5c --- /dev/null +++ b/src/services/dns_seed.py @@ -0,0 +1,27 @@ +from .base_service import BaseService +import shutil + + +PORT = 15353 +DNS_SEED_NAME = "dns-seed" +ZONE_FILE_NAME = "dns-seed.zone" +NAMED_CONF_NAME = "named.conf.local" + + +class DnsSeed(BaseService): + def __init__(self, docker_network, templates, config_dir): + super().__init__(docker_network) + self.docker_network = docker_network + self.templates = templates + self.service = { + "container_name": f"{self.docker_network}_{DNS_SEED_NAME}", + "ports": [f"{PORT}:53/udp", f"{PORT}:53/tcp"], + "build": { + "context": ".", + "dockerfile": str(self.templates / "Dockerfile_bind9"), + }, + "networks": [self.docker_network], + } + # Copy files for dockerfile + shutil.copy(str(self.templates / ZONE_FILE_NAME), config_dir) + shutil.copy(str(self.templates / NAMED_CONF_NAME), config_dir) diff --git a/src/services/fluentd.py b/src/services/fluentd.py new file mode 100644 index 000000000..457f191f3 --- /dev/null +++ b/src/services/fluentd.py @@ -0,0 +1,24 @@ +from .base_service import BaseService + +FLUENT_IP = "100.102.108.117" +FLUENT_CONF = "fluent.conf" + +class Fluentd(BaseService): + PORT = 24224 + + def __init__(self, docker_network, config_dir): + super().__init__(docker_network, config_dir) + self.service = { + "image": "fluent/fluentd:v1.16-debian-1", # Debian version is recommended officially since it has jemalloc support. + "container_name": f"{self.docker_network}_fluentd", + "ports": [f"{self.PORT}:{self.PORT}"], + "volumes": [ + f"{self.config_dir / FLUENT_CONF}:/fluentd/etc/{FLUENT_CONF}" + ], + "command": ["/bin/sh", "-c", f"sleep 10 && fluentd -c /fluentd/etc/{FLUENT_CONF}"], + "networks": { + self.docker_network: { + "ipv4_address": f"{FLUENT_IP}", + } + }, + } diff --git a/src/services/fork_observer.py b/src/services/fork_observer.py new file mode 100644 index 000000000..2f952be31 --- /dev/null +++ b/src/services/fork_observer.py @@ -0,0 +1,16 @@ +from .base_service import BaseService + +PORT = 12323 + + +class ForkObserver(BaseService): + def __init__(self, docker_network, fork_observer_config): + super().__init__(docker_network) + self.fork_observer_config = fork_observer_config + self.service = { + "image": "b10c/fork-observer:latest", + "container_name": f"{self.docker_network}_fork-observer", + "ports": [f"{PORT}:2323"], + "volumes": [f"{self.fork_observer_config}:/app/config.toml"], + "networks": [self.docker_network], + } diff --git a/src/services/grafana.py b/src/services/grafana.py new file mode 100644 index 000000000..1196e0dd6 --- /dev/null +++ b/src/services/grafana.py @@ -0,0 +1,15 @@ +from .base_service import BaseService + +PORT = 3000 + + +class Grafana(BaseService): + def __init__(self, docker_network): + super().__init__(docker_network) + self.service = { + "image": "grafana/grafana:latest", + "container_name": f"{self.docker_network}_grafana", + "ports": [f"3000:{PORT}"], + "volumes": ["grafana-storage:/var/lib/grafana"], + "networks": [self.docker_network], + } diff --git a/src/services/node_exporter.py b/src/services/node_exporter.py new file mode 100644 index 000000000..c9add95e7 --- /dev/null +++ b/src/services/node_exporter.py @@ -0,0 +1,13 @@ +from .base_service import BaseService + + +class NodeExporter(BaseService): + def __init__(self, docker_network): + super().__init__(docker_network) + self.service = { + "image": "prom/node-exporter:latest", + "container_name": f"{self.docker_network}_node-exporter", + "volumes": ["/proc:/host/proc:ro", "/sys:/host/sys:ro", "/:/rootfs:ro"], + "command": ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"], + "networks": [self.docker_network], + } diff --git a/src/services/prometheus.py b/src/services/prometheus.py new file mode 100644 index 000000000..6039d6f82 --- /dev/null +++ b/src/services/prometheus.py @@ -0,0 +1,19 @@ +import docker +from .base_service import BaseService + +PORT = 9090 + + +class Prometheus(BaseService): + def __init__(self, docker_network, config_dir): + super().__init__(docker_network, config_dir) + self.service = { + "image": "prom/prometheus:latest", + "container_name": f"{self.docker_network}_prometheus", + "ports": [f"{PORT}:9090"], + "volumes": [ + f"{self.config_dir / 'prometheus.yml'}:/etc/prometheus/prometheus.yml" + ], + "command": ["--config.file=/etc/prometheus/prometheus.yml"], + "networks": [self.docker_network], + } diff --git a/src/services/tor.py b/src/services/tor.py new file mode 100644 index 000000000..d266f1451 --- /dev/null +++ b/src/services/tor.py @@ -0,0 +1,22 @@ +from .base_service import BaseService + +DOCKERFILE = "Dockerfile_tor_da" +DIRECTORY_AUTHORITY_IP = "100.20.15.18" + + +class Tor(BaseService): + def __init__(self, docker_network, templates): + super().__init__(docker_network) + self.templates = templates + self.service = { + "build": { + "context": str(self.templates), + "dockerfile": DOCKERFILE, + }, + "container_name": f"{self.docker_network}_tor", + "networks": { + self.docker_network: { + "ipv4_address": DIRECTORY_AUTHORITY_IP, + } + }, + } diff --git a/src/templates/Dockerfile b/src/templates/Dockerfile index 6cd2ac46e..78bac18f3 100644 --- a/src/templates/Dockerfile +++ b/src/templates/Dockerfile @@ -1,36 +1,28 @@ -FROM ubuntu:20.04 +FROM debian:bookworm-slim as builder ENV DEBIAN_FRONTEND=noninteractive ARG ARCH -ARG BITCOIN_URL ARG BITCOIN_VERSION ARG REPO ARG BRANCH +ARG BITCOIN_URL -# Base requirements -RUN apt-get update && apt-get install -y \ - ccache \ - python3 \ - vim \ - build-essential \ +# install or build +RUN set -ex \ + && apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ wget \ - tor \ - iproute2; \ - apt-get clean; - -# Download binary -RUN if [ -n "${BITCOIN_URL}" ]; then \ - wget "${BITCOIN_URL}"; \ + && if [ -n "${BITCOIN_URL}" ]; then \ + wget "${BITCOIN_URL}/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-${ARCH}-linux-gnu.tar.gz"; \ tar -xzf "bitcoin-${BITCOIN_VERSION}-${ARCH}-linux-gnu.tar.gz" -C /usr/local --strip-components=1; \ - fi; - -# Extra requirements to build from source -RUN if [ -n "${REPO}" ]; then \ + fi \ + && if [ -n "${REPO}" ]; then \ apt-get install -y \ + automake \ autotools-dev \ + build-essential \ + git \ libtool \ - automake \ - pkg-config \ libboost-dev \ libevent-dev \ libdb5.3++-dev \ @@ -39,12 +31,7 @@ RUN if [ -n "${REPO}" ]; then \ libzmq3-dev \ libqrencode-dev \ libsqlite3-dev \ - git; \ - apt-get clean; \ - fi; - -# Build from source -RUN if [ -n "${REPO}" ]; then \ + pkg-config; \ mkdir build; \ cd /build; \ git clone --depth 1 --branch "${BRANCH}" "https://github.com/${REPO}"; \ @@ -58,9 +45,48 @@ RUN if [ -n "${REPO}" ]; then \ --enable-suppress-external-warnings; \ make -j$(nproc); \ make install; \ - fi; + fi + +# shrink image size with a second stage +FROM debian:bookworm-slim + +ARG UID=3338 +ARG GID=3338 +ARG REPO +# env var overrides +ENV UID=$UID +ENV GID=$GID +ENV BITCOIN_DATA=/home/bitcoin/.bitcoin + +RUN set -ex \ + && groupadd --gid ${GID} bitcoin \ + && useradd --create-home --no-log-init -u ${UID} -g ${GID} bitcoin \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + gosu \ + tor \ + iproute2 \ + && if [ -n "${REPO}" ]; then \ + apt-get install -y --no-install-recommends \ + libboost-dev \ + libevent-dev \ + libdb5.3++-dev \ + libminiupnpc-dev \ + libnatpmp-dev \ + libzmq3-dev \ + libqrencode-dev \ + libsqlite3-dev; \ + fi \ + && apt-get clean \ + && rm -rf /var/cache/apt/* /var/lib/apt/lists/* + +COPY --from=builder /usr/local/bin/bitcoind /usr/local/bin/bitcoin-cli /usr/local/bin/ +COPY docker-entrypoint.sh /entrypoint.sh +COPY tor-keys/* /home/debian-tor/.tor/keys/ +VOLUME ["/home/bitcoin/.bitcoin"] +EXPOSE 8332 8333 18332 18333 18443 18444 38333 38332 -# Start tor with user debian-tor and the Bitcoin Core daemon. -CMD gosu debian-tor tor & \ - /usr/local/bin/bitcoind --datadir=/root/.bitcoin --conf=/root/.bitcoin/bitcoin.conf +ENTRYPOINT ["/entrypoint.sh"] +RUN bitcoind -version | grep -E "Bitcoin Core( Daemon)? version v${BITCOIN_VERSION}" +CMD ["bitcoind"] diff --git a/src/templates/Dockerfile_bind9 b/src/templates/Dockerfile_bind9 new file mode 100644 index 000000000..082261a2d --- /dev/null +++ b/src/templates/Dockerfile_bind9 @@ -0,0 +1,5 @@ +FROM ubuntu/bind9:9.16-20.04_beta +COPY dns-seed.zone /etc/bind/dns-seed.zone +COPY named.conf.local /etc/bind/named.conf.local + +CMD ["/usr/sbin/named", "-g", "-c", "/etc/bind/named.conf"] diff --git a/src/templates/Dockerfile_tor_da b/src/templates/Dockerfile_tor_da new file mode 100644 index 000000000..e07497642 --- /dev/null +++ b/src/templates/Dockerfile_tor_da @@ -0,0 +1,19 @@ +FROM alpine:latest + +RUN apk add --no-cache tor + +RUN addgroup -S debian-tor && adduser -S debian-tor -G debian-tor + +RUN mkdir -p /home/debian-tor/.tor/keys +RUN chown -R debian-tor:debian-tor /home/debian-tor +RUN mkdir -p /var/log/tor +RUN chown -R debian-tor:debian-tor /var/log/tor + +COPY tor-keys /home/debian-tor/.tor/keys +RUN chown -R debian-tor:debian-tor /home/debian-tor/.tor/keys +COPY torrc.da /etc/tor/torrc + +EXPOSE 9050 + +USER debian-tor +CMD ["tor", "-f", "/etc/tor/torrc"] diff --git a/src/templates/bitcoin.conf b/src/templates/bitcoin.conf index fbe6d397c..e18a76e19 100644 --- a/src/templates/bitcoin.conf +++ b/src/templates/bitcoin.conf @@ -14,6 +14,7 @@ debugexclude=rand # RPC rpcallowip=0.0.0.0/0 rpcbind=0.0.0.0 +rest=1 # for forkmonitor # Wallet fallbackfee=0.00001000 # P2P diff --git a/src/templates/dns-seed.zone b/src/templates/dns-seed.zone new file mode 100644 index 000000000..710b7b2dc --- /dev/null +++ b/src/templates/dns-seed.zone @@ -0,0 +1,14 @@ +; +; BIND data file for warnet dns seeder service +; +dns-seed. 300 IN SOA dns-seed. admin.warnet.com. ( + 2023082401 ; Serial + 3600 ; Refresh + 1800 ; Retry + 604800 ; Expire + 86400 ; Minimum TTL +) +dns-seed. 300 IN NS dns-seed. +; following line likely needs to be changed to the container ip address +dns-seed. 300 IN A 127.0.0.1 + diff --git a/src/templates/docker-entrypoint.sh b/src/templates/docker-entrypoint.sh new file mode 100755 index 000000000..83f343568 --- /dev/null +++ b/src/templates/docker-entrypoint.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -e + +# Add bitcoin user to tor group to read the auth cookie +usermod -a -G debian-tor bitcoin +cp /etc/tor/torrc_original /etc/tor/torrc +mkdir -p /home/debian-tor/.tor/keys +chown -R debian-tor:debian-tor /home/debian-tor +# Start tor in the background +gosu debian-tor tor & + +if [ -n "${UID+x}" ] && [ "${UID}" != "0" ]; then + usermod -u "$UID" bitcoin +fi + +if [ -n "${GID+x}" ] && [ "${GID}" != "0" ]; then + groupmod -g "$GID" bitcoin +fi + +echo "$0: assuming uid:gid for bitcoin:bitcoin of $(id -u bitcoin):$(id -g bitcoin)" + +if [ $(echo "$1" | cut -c1) = "-" ]; then + echo "$0: assuming arguments for bitcoind" + + set -- bitcoind "$@" +fi + +if [ $(echo "$1" | cut -c1) = "-" ] || [ "$1" = "bitcoind" ]; then + mkdir -p "$BITCOIN_DATA" + chmod 700 "$BITCOIN_DATA" + # Fix permissions for home dir. + chown -R bitcoin:bitcoin "$(getent passwd bitcoin | cut -d: -f6)" + # Fix permissions for bitcoin data dir. + chown -R bitcoin:bitcoin "$BITCOIN_DATA" + + echo "$0: setting data directory to $BITCOIN_DATA" + + set -- "$@" -datadir="$BITCOIN_DATA" +fi + +if [ "$1" = "bitcoind" ] || [ "$1" = "bitcoin-cli" ] || [ "$1" = "bitcoin-tx" ]; then + echo + exec gosu bitcoin "$@" +fi + +echo +exec "$@" diff --git a/src/templates/example.graphml b/src/templates/example.graphml index 417240e14..2bd485061 100644 --- a/src/templates/example.graphml +++ b/src/templates/example.graphml @@ -2,75 +2,78 @@ - - 24.0 + 25.0 uacomment=w0 - 24.0 + 24.1 uacomment=w1 - 24.0 + 23.2 uacomment=w2 - 24.0 + 22.1 uacomment=w3 - - 25.0 + 0.21.2 uacomment=w4 - 25.0 + 0.20.2 uacomment=w5 - 25.0 + 0.19.1 uacomment=w6 - 25.0 + 0.18.1 uacomment=w7 - - 24.1 + 0.18.0 uacomment=w8 - 24.1 + 0.17.2 uacomment=w9 - + 0.16.3 + uacomment=w10 + + + vasild/bitcoin#relay_tx_to_priv_nets sensitiverelayowntx=1,debugexclude=addrman,debug=sensitiverelay,debug=tor,debug=net,uacomment=sensitive_relay - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + diff --git a/src/templates/fluent.conf b/src/templates/fluent.conf new file mode 100644 index 000000000..c9f9444f6 --- /dev/null +++ b/src/templates/fluent.conf @@ -0,0 +1,9 @@ + + @type forward + port 24224 + bind 0.0.0.0 + + + + @type stdout + diff --git a/src/templates/fork_observer_config.toml b/src/templates/fork_observer_config.toml new file mode 100644 index 000000000..7b5421de7 --- /dev/null +++ b/src/templates/fork_observer_config.toml @@ -0,0 +1,31 @@ +# fork-observer base configuration file + +# Database path of the key value store. Will be created if non-existing. +database_path = "db" + +# path to the location of the static www files +www_path = "./www" + +# Interval for checking for new blocks +query_interval = 10 + +# Webserver listen address +address = "0.0.0.0:2323" + +# Custom footer for the site. +footer_html = """ +
+
+ Warnet fork-observer +
+
+ """ + +[[networks]] +id = 0xDEADBE +name = "Warnet" +description = "A custom regtest network" +min_fork_height = 0 +max_interesting_heights = 5 + + diff --git a/src/templates/named.conf.local b/src/templates/named.conf.local new file mode 100644 index 000000000..c76093d72 --- /dev/null +++ b/src/templates/named.conf.local @@ -0,0 +1,4 @@ +zone "dns-seed" { + type master; + file "/etc/bind/dns-seed.zone"; +}; diff --git a/src/templates/tor-entrypoint.sh b/src/templates/tor-entrypoint.sh new file mode 100644 index 000000000..e69de29bb diff --git a/src/templates/tor-keys/authority_certificate b/src/templates/tor-keys/authority_certificate index d94c6b8f6..5106d9ab2 100644 --- a/src/templates/tor-keys/authority_certificate +++ b/src/templates/tor-keys/authority_certificate @@ -1,46 +1,46 @@ dir-key-certificate-version 3 -dir-address 172.18.0.3:9030 -fingerprint 0303840D6B3AD1BDE9FC731EAA8387BD1939C29C -dir-key-published 2023-08-25 19:02:32 -dir-key-expires 2024-08-25 19:02:32 +dir-address 100.20.15.18:9030 +fingerprint 6892AEA34F17542F4E85E77AF2DF6C9F6C108798 +dir-key-published 2023-09-01 20:09:16 +dir-key-expires 2024-09-01 20:09:16 dir-identity-key -----BEGIN RSA PUBLIC KEY----- -MIIBigKCAYEA3jGqtZFeyIBSUKJzmh8dxP9wvR5HHxUVFmGUhWiJ7Ur36kAniNRZ -Kokrm3PMr8ptv4u7YpAWlMsNdikGsROeTLQaJaYOsiqSvP7lCQkVMQmqznmAnoHF -MIe68G26M5UhMP6r8N3BU3Y138AZcuQfr5NJ3BaZg0fvqlWBbLhUzwUH9++5L8RG -QILw8by8lmaRjd1vsnRCRo4j3vULk1C5Po4B2fCHN3L6I9ikK+jmEM/Iz82aeUWR -r6SDX+M1FkyxYeab2pqXL+E7VsvmfEhmZVebMS3C84/OOYG16rUqXTFpVFRoPvNm -jWajvU1ELSbLqYqt+ktstrLKF0ghJQheBH5xW/agAC5KEpjCrhDzym9309pahxhz -W+v4tlzFDWqiguxB9HgCH0cBoQTB8VFun781vzSUPwK0F4Q3vTu8wVsmhV6oi38S -OJNx02EG19cdx4SWEhDaeeOLOjKXa7NexqahiucoQrjFrcrBqjV0ghuR1H639jZT -wTVRc62AkzHxAgMBAAE= +MIIBigKCAYEAo9VyTKk227UQvVmdw4zSlov1UAPL7psRwmZ+kxa14ngClyX0y1K4 +Txj8/5hiTsQ54UOOe4ORLMX5OBOfG3Eg4JhsneFhJdu+jJznZZWA2A343SOgNxF9 +VbC3dshWjZUbYfQvAq7XDhWsFeLnIe/K3aGihUDtqk5IEF/dVQHG9lEQoJfYSmcm +0kylHVkF8lwE4x6/Y4mq14ECq7MHkKrTiGbZHfAAbm2N0T18zJxcJ9lS2MZU3A8+ +Nyfzady8VCqiNzlIJRQUojviGD0FdATAlOox9PepYHAW22V+D9YvSM5EtFk/22vM +hwnH9Q0a5FiZYWp2ubiZo+cNfrT6ccb9OSZAfju8Za/GCfuQhKYTFlMfk2laX34V +AUDdq6OSyn0c9eyGjHPRHi/bikEXSV8GK6IceM/aOKxndB5iVxeArQsUzvTR9CeR +633bPczmDeOJhStYE4CdWtpdczZct3gXKLP1sLLcOvvriZlz7Pyle5JL9Mlb6z1i +MPp/hqANGfMhAgMBAAE= -----END RSA PUBLIC KEY----- dir-signing-key -----BEGIN RSA PUBLIC KEY----- -MIIBCgKCAQEA7lYqoruSLa5+1N/47ojihMsWKCORqttlgHPc0QxSRbcxKfoeCZ2C -8FG+CSMy3c9k5GPBZ+pLrtBXFiEXJslyAkYysWYCTkI5bEpDmR1CzWVpGlciF2oC -ZH2x357KN/RlH22+T0tE2nnrGmwPulMoH72HTfB91a9dRm91+tk9uANKrGzidL4U -qsJK7r8IXzBR9RY8k3Ro6FJdnM1VbUAdEAKBwIfOxo0RJVcsZ9e+RdyOPERMP2dt -a+kYnv+ovY/xQZrP61GfqkqgNjEIs9xL7KsZzKerXn+2/cpxfzfcnawrgMd3EyCU -vYDMEHfSAFYPV2Xg/p0rZ5wDDFPwWHMOAQIDAQAB +MIIBCgKCAQEAmCH9KN37GbmztOVJNNaFSjt46uWUEGTap0GaewCJVhjuDTHvU6TA +bzSZadWhczYwulNnYGVBJD1J4qdA+ImrzxAgyIyuDmuJADWqlosCA46LK+5H2Uri +wTsJAxXDqmE0boWca1vXcF5Gwz07hjtXHMrbGZxMjqx6YT89jRfXob8AFXPknSpB ++BG3OaziNLf0MbMlpGlQEHHBpxj9O4myWMhMH+zrMY2MR6y8i1tEEiTqysqQIM0b +kUxIeHkoMUNxR6J1ucS4wmko9b1MVsjW8sEPCDAWDtm1OVOcdqvnjq7MEUHinynT +3ENZCNU5dB8c8pQ+ie2SouM5jF7vUyzSIQIDAQAB -----END RSA PUBLIC KEY----- dir-key-crosscert -----BEGIN ID SIGNATURE----- -s9Cy2FEH59iPYCLT5Ls59KP7BGW18tEI7seyM3Ps9pp4W8tv6XemIWr9Hffh2a2n -uP1o0jkbmOAWb48kWx11NqJDK8J/Qankfd1NXQoNJHZPFaVFj6yACdLmYvFv2Jak -JicW3PlSMx8kRrCt3gaZTpbcdV6pxVVak0yE99aJ1x1Ty83hBun7CsRI3sAbJVSb -ly05DcwNbnvMNDXOZJrGJeHgcfRR384usT67PjPpKJQ6R8vtTOi4WBh9DURsNVWJ -GEtz9mkSersA5IFlg05b47Ggvq7Yw1vlud1BLC6q3pGCkBJ6CcrouRvmQbfV/vDD -Mm21NTV29/qKwXvFBL50Aw== +focD/7ogTIuymfG1I7CPrNB5bXmeKqReXOuW5dAoPTU85feyp9J25SfoBFHs3y8N +o47Igf2mPmSrM+fV23xbFwp6GbBwQXkpIH4RSks3+mEBUSaqjvzf+C/Yx3Bf7TDO +zBpbmS7xOjA9eX2F1h2aQJGHgp2i5fBFzCiWRvTd6LLKqdwdbXTjUEPel6XyPfiQ +Io0VcSQr8qTTU+xsvLN4GxnANqnUyNcV75QOb0RPLpUy7qa53en6LnkqsruvXGd6 +RMF5AMK9e001gquMM6xBNwmU922MbQkYgbB4ljC6gCziP0ueYXTlThA68N9Gkncz +9xRSLZLHhBAO/veK3Xo83Q== -----END ID SIGNATURE----- dir-key-certification -----BEGIN SIGNATURE----- -xdnab0RdhKJWnRJHVF1OHSFnGG4KDgc5nae34TKlxpUuKOmST0XlJRFWcPib8zOV -uaqTLeVuyCuil8kXw5iHwJUzdr3x9kwcFGFmA/JeUV7ANDhvsU6wxMCx7iHHkIBe -f89ArUQdej1dPQFD4igElHVA3my7w0o67ZOcEYCW4sthOaS+0m8Cv5i1wL1p11K8 -eB20JtbWO1u60FI3Q01vO79P30D2mTgLSiNuDKaxvWFFRziCLvDMZLHCAtHlOkVR -HWQpAL6TKs8k2XIqUAuPWBhzR8ZtZB+FOQpHZ+Ol2ccB1jPOpg2LdHe8sWuNFeeZ -y6CEDh8xfw5yuWFOYv3+TH+pPdzZm6InWe+C7fmmvfOc+XV0kpy6Tn6mnOdgqZc0 -V8Rh/+2IlLxWZJMEliPAuhvPQByERL7mPudcnmoBOi+XK2orxWOVRBb0pgviu2kw -gvWGoXBlZ04fYZyu3Sq9wvgL04lhIZAQ0DXhju0JCUydILh2s9r4igM/eFk4PFOb +Pn6h6+5bB3yTqByA203cZLbVtLo+G9BB0/ADnRnl4B027QcR6e5PYwP/9dxSP4WC +jo76EEBHd1CYQUZ7pvNI4tBBMGMIO57FNebDNfpZygDR6m3F0AjwcuZhM12FRvHK +XP0XMlRidk0AfAVxVgG9UmYvzO/pvG7O5RbUi+wRzTH04ogMmImH5AbW4zDQ9u89 +uU3HoC/2DN8qADyyn64gXkHKS9Tc44qiTdre5uyK6/FSniuZs7TnDvocleJunKhI +SRc8Ricp3o2HhYaADIPVqvyaCrWqII5o9JwI+75woE3GWsbPrHVSi3OYXJY2hyxG +glbEhsaAATVWUQeJQ8vd9h+Rir4xeE3bJnP3LYsDWm406v8WpH3y4AztOz7FuWTA ++/l6is9S2UAqnmxhurrVFrBq6hf7mA+lS/cXRPSu2xzCIkV5D0qIYL8vHzLfKk6A +2MgG8nH2KSHFe8FpP7JkjwDwv3+U9YVPg/Ky7s0tBLw4vD7qbYG8L4pNH3BqGtpi -----END SIGNATURE----- diff --git a/src/templates/tor-keys/authority_identity_key b/src/templates/tor-keys/authority_identity_key index 72b8c5d38..c67d03a40 100644 --- a/src/templates/tor-keys/authority_identity_key +++ b/src/templates/tor-keys/authority_identity_key @@ -1,41 +1,41 @@ -----BEGIN ENCRYPTED PRIVATE KEY----- -MIIHKjAcBgoqhkiG9w0BDAEDMA4ECAdHyrhMiH3IAgIIAASCBwg1gnpfu0TI3Yj8 -aKynzSp2cTwzl/PuUbLqntC8EcAZpvZ0gy1UbovqwRZoRvy4EzQMMPhzLizXMzK/ -hUVBojXgtqhLsmm5l1891I5iJYjgp9K4HoJ7J1FexJxlmM9KnoYN6I+7gUO5BEI4 -sq02mDeBkZiBREREe7Aau05vG2akgUCtt94HYibN4Nsl2I5SlN9f2/+MUrlsY0YV -IKnTJb7BXiZGeRGLtDqe1fnCwJTuB7iXu+Xfq53GiDAHp9u0/LikjJvZ7zqy+tej -bItVJuCQFfT4B9TRj7mEIxzPo1HQdJdmaHFkPnZ+gNrq3JopMwhY0oX6mut/I+wW -axFmY9GomsV5mPz6dp8mINzux2rxUzhEiA+2cN4xqE5sBDhUetYzZbcyzF/WiBKD -mxZS3ZLyiEaZkg+PcH6oW6FQ7/cghX7/RXUbfAsuuq39SH206vUb/jfOZ55vvJJZ -6YFXvdXJuTGEd+Zh5AzPpk5uEOCGZ4MnuMVwJlU3NKBEktvqM081b3pSLTYh/+RY -3nlc2qM/6+Y+kcdHdUKrSxIlGYUcMk1BT6BYblcCrothtBlYzhtg4iO55ry6se4A -u5N5veepSfXmc9CUOcXzAeTAdPZZo0tfNq+R4TrMw3eKjUrt9/SzLgDIGUp91CYG -h079bsTe8bw4U5rrrFkXxuXqaaNlcfaK6cj9Y390CdSLSjCIOgdXqE4G8MYg0+Vh -TyZVRtVOAOB+FJInrnFJUtR/2r6Bick4iPuF9+mvYsInGWOfBJQo/jRSUKBw879c -VbAZM4oTL2iNnOSvdHYF2GDcyBl4zlJ+AjwF4KVABVYLtzf1vZ4uQLxae/Je/dVi -NTJ5fVSLyCzTISZwWXGWtweTkZDaXcPQdjtD/O1s+Dd8uFM17XtrWK9K5+hauSAW -N32u1MZ17c3dHJC6e8xLI4sJUnz+EnYD4UgRzJI1kzDpHzc6ihpgtAwlanfpIQLl -5q0pCj6+BBNrXtAPaOj76CwCGMYVj8aWUV8rkvN2OJ4vKFaymCNoKoP593MO3xC7 -gP4iZ6QzkBPmFl5HthXK77G2VtrqZkDv4Rjj5XC790Tk3FFgteUSQoFuyi2R0aNY -4jUVDfAVf56EXj0916anYktjVvqH5/nMI+qhJjoFbzJ37+MdqU3/w3bUbROW4110 -qVc/xGfinJ6Cam8h9sMLYcHOvHhCcVU49CTYr/06rzCr9oGf8vVRMv3C3DwxRpPF -pIszgohFOJhJWv++YdFPKgG7Er5XXyqDI5Z6NMBkDuUb0i/VLd89udwKdC6Y5+Rl -mCYsPkd7tb1Cm7U9h+J+Bb28xXvJMEv89r9W7iLAz91wMGrJtqYAxp5p+zQ37nMu -vT31i57AfS54vTfu70NUVJ6mc42S0O9IcSMBRT8DpdDLHyLKB5HQ+GLqT7hJzkdq -KqBYTbU8Q/zCs1qaio5DFgXTJf6lMC482GAKBRmFQ9i00dY4n5QT1SUwBojsonqO -CyQjNnChaRe+cZYXiPGM82sFZWelFngZrJ0tTEwkVJBCijN+X0U7R9nCPlFVvbbz -paAKs2j9p2RZQmJgMsnokwtdLlE4WZo1WAvMAP821Br6hDKp1UYYIJR26KhCa4sX -VWjSqB/JZGYk54s83X4DD7JvCAP+RkIcHJi8T0vstseTcoynat7yfgkMIWM6MGjP -t4+ClLp2I8teDdK7oIF8cTn9HsABovlIVyQwUv3TGnZJMM6TLfaWPb5NMxkuGEUg -G1TyAMXHyZrqsylmOiLUpKp5+6Z+zxNQtbMCVBxjjQ/N03clXI/CyDin/n1cCHbR -8jq9+NmFqhB9e8QAPdlaB8i3ZPbJyLBSHs5iar/XSMrKZOY2ST11nhFgKO+eGHtG -QcKEvporkwQWj7lPAA1vJqjvfHNBQl1V4zrcvwiHSp5qY4+ewSJI/tIirCU3+N5B -I3+WXeu+BfX6Wtrb1QuQO6O3pwHfdqXiIdWbYlalXFJVvcNUf1Rb7bfqI5HuEbof -qvQ7Hu1Y3hDi/cXte9y+hkS+/rVFYHvaJAvkknQl4WHw49XZQiI8a5YoLH4kYkwe -A7aYI24oibPtKQ2c9jXrFAetwffp0AWfEWkt/ehxIFwr0M8yIL5C9Xf0YbGcrYD9 -Fs2GazVdvf+9btBn6QsV1uKKjDDCIwJozeIYlntHxw1bHlV5gChGIQtHeJaAbDr1 -9VTRJKf2+FJnccbX17yDY62i0qJ4QH0rur3sVlJwGO4nFeMRKyEGhDVd5pAs/AUs -bD2or3QeZZ8Xfg4gQzi+3Ltcq0b07cS4QruYVhGVrznMvYbbmoTaFYveji236Xgb -Cuj+FFwZEYxoJ6I6PTARGBcwEBgr9DvuLJdC3cMIWDvpg/d0dLWrT5+kHLHQNfrK -C+jRpkqKVlhe2pvFAOA= +MIIHKjAcBgoqhkiG9w0BDAEDMA4ECB5bzKuqoWFjAgIIAASCBwikNfm7AMpe0787 +in2HNwCLUqZBrQGk9LLeRuF5lVBrdYrcSdSbvLY76+/veMcvYw/TMyAyNvRBSJ4H +NnR9xkqaCofXbWYx9V0UqwFqcSNcrE3u+DVFXrE1lv9KGMATGhx2ozDRCbACdXgk +1tBX3tdJ03SJVR8oN/0O6Tkk2813EQEipUF7IxanOzLZ7SwxloqvY4EHrYW+724+ +cdOvR7R7vJlXlZKmqA7RmgDYhHJDdBfMNUjmUUZh2dYEXsCR/3DeLvK7mcOeTCI6 +u5+C1XdjraPyaNXgHQ5ydkwCJ5kW8wiwa82JOS/Z5xEWYCUK7bx0sn8h92djkRli +fOfzwvvqlTe93Dd4iAIzn7qKhr7wrKgPyvZZLN3BfOnumNsxeCwr3nrvwexxx2nv +L+pY3kwLwxU24sU6f+a+gl7iUfEZP0MEM9YmE4ke7LVa4Br3dPXtTe24Rl5yH6hI +SeX5VVOS8HH+wCuj/QMkp5zk4YZgWy7ypjTSId/q+5+iXq+A4/n2RVPb8fye5hpk +k49DOQD2fRurh/xuvysS51s0w0shDj92z2OELKbIR6z67ebhMnbU+G86+yjQC2HR +PavUcGA+aVJMe1ZjWuiovWGCy3bLqGsM9HHkKYeLpXRx3ciHQkD3aCztpiyYLR/Z +IOeOpsjGXAI1KhbrSNVrW89CyFaVhp6c33hobKQkd0eNfBswBAvjd4h7xwCgUVas +gBlzdzkq2+RVSGtRNnViFT9hMq6oFpbXbjKS9gPzLrfX1VP6mxuk3nLEHQyJNfZ3 +Dj9oQN+mY+kXqxPw5ZjSrWNBZtmkGqHIpCsuAgH3oeuLE9jA+/OqxVlWJDSPAXhv +1k+YaeuPrriI61BHJ268YoRvoKHWV2ABqWAuE7VdILbyZjcHDdvgUtTui6+GJjkL +igCXhMQSdfOAv4JJ555na7n0rJ7pvv4zZBZDdaVxGTocy0jfqzMZGX+0bx5tVZGw +ilHm2DZP4H9QhrqcU4jV0SNC6LowZDv+WRT6I/kz9o1bjnxC2r4kYn759GvMjL7v +B6tMkeo/ajwgpWVaHYIqWpSPaBhENNI9eO24rv8ZeXTIfKt6Kr+NzWdWUsaicClo +kkkvznAgHxFgW9MJ31hAXpRSm8WlhZiPS0CBXChBPaW9MCFuHw/E1ez7xIrWzMnI +cSj7pO8DJG8TRovwB8IlGr7JLvyGfr/RSV+ORXVm3rfj98MaCqEReRdbp8jd+CwB +yEKf600gIi0V7pH+CaHNrHQ5lnT4YooKPEa3tvqW743aLrzlWrhT/Ng5S884cmYR +0To7G8Ap9yPbddEDQh+xFmos7c8FsOsPL+BtQAs6eXDY+2D/9m1oFXb0VrIfN4v4 +vGIi4+39FARUk+Qe6ek2JQIEZgZTh8Sw95PKmiRumqPW1DRXTONArwi7NJjIu6j1 +fl5QDldCi4fNeIkz9C8LrrW3zT90GudSCbuX0IPRdnB9Uf41kxzGogK1atzCQzH9 +JfVrWg0/OeiR0Tc+pLvhUrI8zd9xOPyRNKgy2S84OjQ2c+wFsEEfEkEU1kw7UmJX +DZD9D0Mt0sxebNLzAQQCLAe4aMVhkb7mHEKbxNLzIFXnPNIB/6p2tqfQZ8YYEZCP +pYLK9OSkV6fblVmACtmNnKMd/sVPlAbyjDb2XlbWU+1QO6MwcaJQSCvA/lQ5ZcWV +5a5sePxYJngUg2BvNuMc544VnaLtmsn1Qkx9R2IRc/wQudgCXtCCeF5ceVqgFUVK +Py1Gu9MukZnjDNxeyx1fJ98OxKPU2V5Fdo4ILo1J6RTAlgo/6qamfCOW2HI8pH81 +d3HmmKitLWzVfAfySMES5iFv7y8HfRV9IHW+w/CD/WWgW2Mc/9Z4sNhS09/mruZz +OPcfFba6CRfWPvcEMJgUsqU39EY6oJenqySv9vJbxbmM8z/Xh8bJ+zLO1wEeNFAO +K2HDZWt77dKoX/qh1oZHH9o/CDfl0sgikU+waKhgI+C09doMj7TMnoqqGR4NMI7N +naHrgA2cnlAWJ6rBRXLjRe/lytiq7jouLv7n4pKqyYG90R+o4dXUshlOG+4qNh2U +TWbecBknykO89xxLqjuo//qsWMyUw4ACzy1Ph1D53butm9TofdfWzCRzaQL0jbVh +LAqsAcTAKd3HSlSQ4Kuha9Bi6Tu0ByHsDFRUKh39nqOqG5UomL7RtH4REjZgnOxO +yE3VYvU8DfIC1oX1X+evGuCVF4bn/KbLuL6xVKciCOuClJsrjsCeee0emkEkJ6fY +SuZs6jpY2Xh1Pj22+Yi1xRW53HyVHOMrNMw5WnnxchdrdRwI09yfrrLAGpA9niC0 +k9Yu1HhVf4HXEy4R9p26Cs/Q54Cj7LSFbgaZ5MS5ydhK22dTseAPr1LuzOalqyGu +VAChjq6AkD6H/dVmifs= -----END ENCRYPTED PRIVATE KEY----- diff --git a/src/templates/tor-keys/authority_signing_key b/src/templates/tor-keys/authority_signing_key index f1347b5d2..48410dbc2 100644 --- a/src/templates/tor-keys/authority_signing_key +++ b/src/templates/tor-keys/authority_signing_key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA7lYqoruSLa5+1N/47ojihMsWKCORqttlgHPc0QxSRbcxKfoe -CZ2C8FG+CSMy3c9k5GPBZ+pLrtBXFiEXJslyAkYysWYCTkI5bEpDmR1CzWVpGlci -F2oCZH2x357KN/RlH22+T0tE2nnrGmwPulMoH72HTfB91a9dRm91+tk9uANKrGzi -dL4UqsJK7r8IXzBR9RY8k3Ro6FJdnM1VbUAdEAKBwIfOxo0RJVcsZ9e+RdyOPERM -P2dta+kYnv+ovY/xQZrP61GfqkqgNjEIs9xL7KsZzKerXn+2/cpxfzfcnawrgMd3 -EyCUvYDMEHfSAFYPV2Xg/p0rZ5wDDFPwWHMOAQIDAQABAoIBAGzVrjyHmPYBU+uT -p1IN8hqfvqtvqqox68Tfo7tWfA9X2zyG+cZ8RfnF6hi0GRtPBIyCvSPdyte2Tb5O -OAs/PC+rrKRzp6Pi++Pmeb3mrcWrhHZBydCVWShYeaxbD47DrJXQgHInNfbqa6Cy -LfBw3sa3vpypsFyu6tIqPd7h6xwmivPwL6JY3lAx+opednsRJM02CfTdbvHPy+iu -WPbtMngc0Q5sAgHp3m00/FHK7uv1kpdwxMvKKO8MjOgbscg8OvosJG+WQlmO2Z3d -pF3es3ZZHMe8V1ujIjPtzxA6c6lFNj5ngvP9Ebzsa5UlVXRrxgFjymTx0EHrCwXp -rgGZSQECgYEA/sxkQDCQo1MGY62sbMFQ6byjQsbMIoyPzZL1stccuzb0kffi6DjX -+Wss17+Rmi6AIpX5urElIsjhfSMx/0mhVDWhvH+VkIZJXy9ApQeNapFO1ZxuqyY3 -9CTUMZ7C+/X88qBY84GGzJZq8VhDrOVjzWUTjfofNUErA3uSteW95RECgYEA73Xm -tjXEnVPdckgjbTbmq7N+TM9J5gcv9sub09j0Ckjy5UKXbsU98pWeVmmuXf7FrLH5 -IY0sRomg2MS460cS8x17die28b3Ze/nmeR0rxW+LFQWn+0jG7a+nYV2vVDlUFyal -b+PSr7aqV2z2TUcTQYAXWpTo8k/H8XOUQoVT2fECgYBWjipUH+tSgCbAf8P+IeGR -tVIkohHAfs1vAUhT231YH8APQg0j5AOCKCVQmTQmEzvWA+qUwA6kdWccMnOOjH2w -dJOfzBuLwAJ2wj+MkDc7B2enav+xRkdluYkX3h3Qp+yoo030UoDvSP3FRL2go0A3 -CuoMpfYqChxJj5X0ErpicQKBgQDMMpAw5RzqxG9cWuPK6DxAQun6+fEcdRiTrgEN -4D4K3zYyBl04Gn6+9JN3QNtuUCmlIIc/8zDEXeyJrQe7qnogUw3LLga1kp15ORsu -QI/o5zuFC7O5dtAcM+PaBZfTjKeGrm/5QB+Ts4MC1moab/edrWs81SroJtxrq5HM -QAHHIQKBgFXc3hS3HC9CCBYg3WTygbPeT6J44lJBA8Gnx6VFUu8eSeOhI9mPXOI5 -bshun75tPO8TnT6q9+qWkZlvZu1omCA6JbGAcZ6N/MVn62UUxmLuoA1pImlKpNlZ -rNU94D6piYA9U/EOjfJNez4A5JMoLQ3PFH4tF41jzVsJNrowoYob +MIIEowIBAAKCAQEAmCH9KN37GbmztOVJNNaFSjt46uWUEGTap0GaewCJVhjuDTHv +U6TAbzSZadWhczYwulNnYGVBJD1J4qdA+ImrzxAgyIyuDmuJADWqlosCA46LK+5H +2UriwTsJAxXDqmE0boWca1vXcF5Gwz07hjtXHMrbGZxMjqx6YT89jRfXob8AFXPk +nSpB+BG3OaziNLf0MbMlpGlQEHHBpxj9O4myWMhMH+zrMY2MR6y8i1tEEiTqysqQ +IM0bkUxIeHkoMUNxR6J1ucS4wmko9b1MVsjW8sEPCDAWDtm1OVOcdqvnjq7MEUHi +nynT3ENZCNU5dB8c8pQ+ie2SouM5jF7vUyzSIQIDAQABAoIBACPHBob4YWek5Q7b +R1KeSp1xDBhN4nrShRuFkaNwtluhpEZFIpAMtFbSy2t42q3CJkaDe9HPLjksEraz +d9ObaOTa2aLgdsScZI1Akbwyt8gHW1J7CLmg41/nFtD7atckYhTF5knXQPzDpyee +PGPui6eRS2Kj/9sj0+oP2dVXMJZwfCVRIQLHX1KQg0n23v2EnTKUhQeQ/XpOeQtr +7ywy5PGyIOLahqa1P5xyWTgRUMQGU1OpPgcclVG6kcW1EGA1gmkYggEoonocdCoL +dMXpSC6n72tUopxJzkCZL03PbfNSaMD1zDCoMHpbkNqDVnOqE8J8w4PbjT4Nfpls +MY+kVZMCgYEAto9QCjNtFA6jvSJiczdx2VrIqhf/rCVbX7zXbZcvK0JMwQC7naO9 +8J8JCZRLVKUhy5BzRyUzf8CjohZyQUgsHS4dXEAbEt5/9UKUb2PVNZNpNgiwyBS0 +43jiR8Sk6O6VdPgPukdyhBYpRtc6MUjUNjKzZnC6DsVJLafhqoiRUjcCgYEA1VUv +pijruhVy1FZKGANzdF82j/orpsUxWeLMcXOJOVod66ApXQONR4jhytwcs+YMBwCp +dZuTjjTVvMsal3X6aKC1jqWgc8HtodkvnBbvpHaA8pTHHlMZeV9APe6ABK8O+5Jf +YCLS6B53O4Jzed0iXb3ifQyePYVf1aVgCzuKMmcCgYBqZ4FpUKwFArtg44eqS+ip +nQZUTYSRm3x0gqa1k2bEufHulDTGHsf4VVl5Idv+S/k0os7t6rrnfSpisN6LOjus +DlLCkAK3rsO5+cJ2YiPFBFRQUCRpmdgMtUp+NhlAQr8N2/+evUH7xsbsKUobq600 +obx6ur1XGpoaaD1ZnhvFZQKBgBCcbHRktU4tA17zRRuvkRrbmxUE5sc6jYgxN7gR +xfqYQMIrG21prGOIjgRMtVNwszkMXSWSMqAdDxN/QRfQEOp6sQhlbyP0UruKfMGL +q6dDrvC0kviSgeZIe2kXHBLBSspd8F08+O0oCiKxGqq/hECGW49fHyctLnQlR2A4 +h587AoGBAKXXGEwOSE96Cx0rJhC5sol2/mwM66831kcnAEoXIjb/EoB63jJWDZCZ +S3niNj85FNQLZAafmulxZPOlXq4+pXlE02YefBJBvEWf3zuh8ZToz519CYNuJZ9K +HBktvpst3yMZ1rz3esJPJRT8ZgrBXMZtoAJ1oTMIJRLkeIA4x4/j -----END RSA PRIVATE KEY----- diff --git a/src/templates/tor-keys/ed25519_master_id_public_key b/src/templates/tor-keys/ed25519_master_id_public_key deleted file mode 100644 index 48d0380d0..000000000 Binary files a/src/templates/tor-keys/ed25519_master_id_public_key and /dev/null differ diff --git a/src/templates/tor-keys/ed25519_master_id_secret_key b/src/templates/tor-keys/ed25519_master_id_secret_key deleted file mode 100644 index a4649aa10..000000000 Binary files a/src/templates/tor-keys/ed25519_master_id_secret_key and /dev/null differ diff --git a/src/templates/tor-keys/ed25519_signing_cert b/src/templates/tor-keys/ed25519_signing_cert deleted file mode 100644 index aeb02572e..000000000 Binary files a/src/templates/tor-keys/ed25519_signing_cert and /dev/null differ diff --git a/src/templates/tor-keys/ed25519_signing_secret_key b/src/templates/tor-keys/ed25519_signing_secret_key deleted file mode 100644 index a6b78b1c9..000000000 Binary files a/src/templates/tor-keys/ed25519_signing_secret_key and /dev/null differ diff --git a/src/templates/tor-keys/secret_id_key b/src/templates/tor-keys/secret_id_key deleted file mode 100644 index 12255d84f..000000000 --- a/src/templates/tor-keys/secret_id_key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXAIBAAKBgQDfv1F98ex0d95PYzSN8MbMHl2cYNnmQQEORNBQvygxojI9D7Pt -Yrj9f3fWJbphzhRYXZL3doxbZ5BVZJkGPKPwxFijxfzj7ExUyqvwUU9II9KIyygF -xnEk1DLkZKIlfmoEBb53MjzD32igzVZ0BtkZoZw/CSTD1rYT4gX1hryzAwIDAQAB -AoGALSeFIK+0EoUmXksdDhArboIqTibVkrxHpEOK7uDFEc8z6FLE8wFbZ/1Th+ox -RQ+712F7CWnpRYEPBcy5fSK7yriXYgYSnqzTuIGEnQ1MPUdDRNTk5OrS5vlU0G9o -g8xvttJq/5qckvaL7sdmtbrNHpROnWj3LsU84r/6VJlwytECQQDz2J8Acb2iuwUc -ZxsYcVwbYRw8Xfx+Lq1yaRBcc++SNZFJ0xuhqnia/Rga8aHUlXHSvaFjxd5aUcqz -4gfKhxJdAkEA6uY+gFCQe+x9c969QugrX/WfWCI5mY67brjgiBC6TEvr+fBZ3nSw -FuC6JP16VJXGXNc6NDSDJnVligVV1c9E3wJAMOfJO/WT6wdHRyMGtChIKNWZLCJN -eaEy+DdKKMC308MamIvWht4BwlPwebbslg5C/dk6fSA6MDMnTFyQ43WjoQJBANjq -1jzYKRTHRvKBdnUawTNUN1nEATz0udF9mJsOF3vtgHTGp7buOs5lqIAQM694wD1r -46vh8lrJfoL+ek1/ojECQFZOvdd8nBh6JU98A/zbmlPQqoQ/3qhb5H9WNFAL8PAs -u9vkLoJ0UDM9gxQUJelHEutG63n9oDwuCpeoriiRF4s= ------END RSA PRIVATE KEY----- diff --git a/src/templates/tor-keys/secret_onion_key b/src/templates/tor-keys/secret_onion_key deleted file mode 100644 index 57aa15ede..000000000 --- a/src/templates/tor-keys/secret_onion_key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQCbSRxFx7y5rAtONk6YUGf5jCSMufLZKuuako+72KA8KxpeW5AY -pCPhlAOvGm4QZGoRRq3Sj3hdCF9eu9SVsOhuHYymhgIeLhcFahlACbl7EVLT39NW -WTHlgJBB4E5d2yUK1J8MMxRkzTxjEn/ReRaexQS69Ns8CIFkAm9HUmJ9SwIDAQAB -AoGABifhzQpb+uhNEm6osvUNCiF6GXINpHpFGL2SoRm3UGtNAkyS68cl/P1HIBvm -HRQ9+C1bGqvBU77POQwtIzn+HL+TQwOC4sKqh1h+g3ObYzEnV2EsO2d1IZBj6Gc1 -y3G59nSJAM7jdTvat04O3ojMR1LxaiGSg0SVhWEmjYx86BECQQDL9XoagMhF4Xe+ -pA8CyZrZtH0Iy/fzKvUgpHk23pjLy9H2LItvbQFj8L+hDWn2Fx06OfxGtv2LBK5y -YHzqc+/VAkEAwuhPbE2s3HRrRykgQ42naAvcNXdAFGKs00MCfd0U7CFunwroCsWH -WL8Cjc74l2nqEYStb//mSiuEnz07OiponwJAUQ7oTiwsbAlli7Cr/pGdJzSiFKLr -kgrajPSQ8AubcjM46LtfVxNdYBjIL/uixxj8lq2A46HKNjvQEMjkHFGjWQJANSkN -uJl7A2oRdIhID4TMWzmY51BOPwEm6DxHqrKpKTp5JJHh9kYoA1GdzbXc4dd2iy6n -63tdgW9bLd+SjuvQOwJAGCvD2rMN2KpnjRq/wBDkLxU9ss4xVQ2f4G6J8XF1gcuQ -6tBebDcc4Rw5KmGZMAoLyqLity44+qDgrxvWgQZFdg== ------END RSA PRIVATE KEY----- diff --git a/src/templates/tor-keys/secret_onion_key_ntor b/src/templates/tor-keys/secret_onion_key_ntor deleted file mode 100644 index 90eb769a8..000000000 Binary files a/src/templates/tor-keys/secret_onion_key_ntor and /dev/null differ diff --git a/src/templates/torrc b/src/templates/torrc index 5fa8ea1ad..00ccc9c25 100644 --- a/src/templates/torrc +++ b/src/templates/torrc @@ -1,16 +1,19 @@ # Common Log debug file /var/log/tor/debug.log +DataDirectory /home/debian-tor/.tor/ RunAsDaemon 1 ControlPort 9051 CookieAuthentication 1 +CookieAuthFileGroupReadable 1 +DataDirectoryGroupReadable 1 ORPort 9001 ExitPolicy accept *:* TestingTorNetwork 1 # Relay -DirAuthority DAeesohphoox orport=9030 no-v2 v3ident=0303840D6B3AD1BDE9FC731EAA8387BD1939C29C 100.20.15.18:9030 6A68434F4CDC4664A8B129929CE4A4D39D4BC8B4 +DirAuthority orport=9030 no-v2 v3ident=6892AEA34F17542F4E85E77AF2DF6C9F6C108798 100.20.15.18:9030 6892AEA34F17542F4E85E77AF2DF6C9F6C108798 AssumeReachable 1 PathsNeededToBuildCircuits 0.25 TestingDirAuthVoteExit * TestingDirAuthVoteHSDir * -V3AuthNIntervalsValid 2 \ No newline at end of file +V3AuthNIntervalsValid 2 diff --git a/src/templates/torrc.da b/src/templates/torrc.da index 479c106c4..8c4bb7668 100644 --- a/src/templates/torrc.da +++ b/src/templates/torrc.da @@ -1,14 +1,19 @@ # Common Log debug file /var/log/tor/debug.log -RunAsDaemon 1 +DataDirectory /home/debian-tor/.tor +RunAsDaemon 0 ControlPort 9051 CookieAuthentication 1 +CookieAuthFileGroupReadable 1 +DataDirectoryGroupReadable 1 ORPort 9001 ExitPolicy accept *:* TestingTorNetwork 1 +# hardcode hack +Address 100.20.15.18 # Relay -DirAuthority DAeesohphoox orport=9030 no-v2 v3ident=0303840D6B3AD1BDE9FC731EAA8387BD1939C29C 100.20.15.18:9030 6A68434F4CDC4664A8B129929CE4A4D39D4BC8B4 +DirAuthority orport=9030 no-v2 v3ident=6892AEA34F17542F4E85E77AF2DF6C9F6C108798 100.20.15.18:9030 6892AEA34F17542F4E85E77AF2DF6C9F6C108798 AssumeReachable 1 PathsNeededToBuildCircuits 0.25 TestingDirAuthVoteExit * diff --git a/src/templates/warnet_entrypoint.sh b/src/templates/warnet_entrypoint.sh new file mode 100755 index 000000000..f41a26d72 --- /dev/null +++ b/src/templates/warnet_entrypoint.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# Custom warnet entrypoint instructions, will be run before base image entrypoint.sh + +# bitcoin +usermod -a -G debian-tor bitcoin + +# tor +cp /etc/tor/torrc_original /etc/tor/torrc +mkdir -p /home/debian-tor/.tor/keys +chown -R debian-tor:debian-tor /home/debian-tor +gosu debian-tor tor + +exec /entrypoint.sh bitcoind diff --git a/src/utils/gen_dockerfiles.py b/src/utils/gen_dockerfiles.py new file mode 100644 index 000000000..76efe8bd3 --- /dev/null +++ b/src/utils/gen_dockerfiles.py @@ -0,0 +1,26 @@ +from templates import TEMPLATES +from warnet.utils import SUPPORTED_TAGS + +base_url = "ruimarinho/bitcoin-core" + +dockerfile_template = """FROM {base_url}:{tag} + +RUN apt-get update && apt-get install -y --no-install-recommends \\ + tor \\ + iproute2 \\ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY tor-keys/* /home/debian-tor/.tor/keys/ +COPY warnet_entrypoint.sh /warnet_entrypoint.sh +""" + +for tag in SUPPORTED_TAGS: + dockerfile_content = dockerfile_template.format(base_url=base_url, tag=tag) + + with open(TEMPLATES / f"Dockerfile_{tag}", "w") as file: + file.write(dockerfile_content) + + print(f"generated Dockerfile for tag {tag}") + +print("done") diff --git a/src/warnet/cli.py b/src/warnet/cli.py deleted file mode 100644 index ac1680fcc..000000000 --- a/src/warnet/cli.py +++ /dev/null @@ -1,165 +0,0 @@ -import requests -from typing_extensions import Annotated -from typing import Optional, Any, Tuple, Dict, Union -from pathlib import Path - -from jsonrpcclient import Ok, parse, request -import typer -from rich import print - -from templates import TEMPLATES -from warnet.warnetd import WARNETD_PORT - -EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" - -cli = typer.Typer() -debug = typer.Typer() -cli.add_typer(debug, name="debug", help="Various warnet debug commands") - - -def rpc(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]]]): - payload = request(rpc_method, params) - response = requests.post(f"http://localhost:{WARNETD_PORT}/api", json=payload) - parsed = parse(response.json()) - - if isinstance(parsed, Ok): - return parsed.result - else: - print(parsed) - raise Exception(parsed.message) - - -@cli.command() -def bcli( - node: int, - method: str, - params: Annotated[Optional[list[str]], typer.Argument()] = None, - network: str = "warnet", -): - """ - Call bitcoin-cli on in - """ - try: - result = rpc( - "bcli", - {"network": network, "node": node, "method": method, "params": params}, - ) - print(result) - except Exception as e: - print(f"bitcoin-cli {method} {params} failed on node {node}:\n{e}") - - -@cli.command() -def debug_log(node: int, network: str = "warnet"): - """ - Fetch the Bitcoin Core debug log from in - """ - try: - result = rpc("debug_log", {"node": node, "network": network}) - print(result) - except Exception as e: - print(f"In our pursuit of knowledge from node {node}, we were thwarted: {e}") - - -@cli.command() -def messages(node_a: int, node_b: int, network: str = "warnet"): - """ - Fetch messages sent between and in - """ - try: - result = rpc( - "messages", {"network": network, "node_a": node_a, "node_b": node_b} - ) - print(result) - except Exception as e: - print( - f"Amidst the fog of war, we failed to relay messages between strongholds {node_a} and {node_b}: {e}" - ) - - -@cli.command() -def list(): - """ - List available scenarios in the Warnet Test Framework - """ - try: - result = rpc("list", None) - print(result) - except Exception as e: - print(f"Error listing scenarios: {e}") - - -@cli.command() -def run(scenario: str): - """ - Run from the Warnet Test Framework - """ - try: - res = rpc("run", {"scenario": scenario}) - print(res) - except Exception as e: - print(f"Error running scenario: {e}") - - -@debug.command() -def generate_compose(graph_file: str, network: str = "warnet"): - """ - Generate the docker-compose file for a given graph_file and return it. - Does not start the network. - """ - try: - result = rpc("generate_compose", {"graph_file": graph_file, "network": network}) - print(result) - except Exception as e: - print(f"Error generating compose: {e}") - -@cli.command() -def start(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): - """ - Start a warnet with topology loaded from a into [network] (default: "warnet") - """ - try: - result = rpc("from_file", {"graph_file": str(graph_file), "network": network}) - print(result) - except Exception as e: - print(f"Error creating network: {e}") - - -@cli.command() -def stop(network: str = "warnet"): - """ - Stop all docker containers in [network] (default: "warnet"). - """ - try: - result = rpc("stop", {"network": network}) - print(result) - except Exception as e: - print(f"As we endeavored to cease operations, adversity struck: {e}") - - -@cli.command() -def remove(network: str = "warnet"): - """ - Stop and then erase all docker containers in [network] (default: "warnet"). - """ - try: - result = rpc("remove", {"network": network}) - print(result) - except Exception as e: - print(f"Error removing network: {e}") - - -@cli.command() -def stop_daemon(): - """ - Stop the warnetd daemon. - """ - try: - result = rpc("stop_daemon", None) - print(result) - except Exception as e: - print(f"As we endeavored to cease operations, adversity struck: {e}") - - -if __name__ == "__main__": - cli() diff --git a/src/warnet/cli/__init__.py b/src/warnet/cli/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/warnet/cli/debug.py b/src/warnet/cli/debug.py new file mode 100644 index 000000000..a344309f0 --- /dev/null +++ b/src/warnet/cli/debug.py @@ -0,0 +1,46 @@ +from pathlib import Path + +import click +from rich import print + +from templates import TEMPLATES +from warnet.cli.rpc import rpc_call + +EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" + + +@click.group(name="debug") +def debug(): + """Debug commands""" + + +@debug.command() +@click.argument("graph_file", type=str) +@click.option("--network", default="warnet", show_default=True) +def generate_compose(graph_file: str, network: str = "warnet"): + """ + Generate the docker-compose file for a given and <--network> (default: "warnet") name and return it. + """ + try: + result = rpc_call( + "generate_compose", {"graph_file": graph_file, "network": network} + ) + print(result) + except Exception as e: + print(f"Error generating compose: {e}") + + +@debug.command() +@click.argument("graph_file", type=str) +@click.option("--network", default="warnet", show_default=True) +def update_dns_seed(graph_file: Path = EXAMPLE_GRAPH_FILE, network: str = "warnet"): + """ + Update the dns seed database using a on <--network> (default: "warnet") + """ + try: + result = rpc_call( + "update_dns_seeder", {"graph_file": str(graph_file), "network": network} + ) + print(result) + except Exception as e: + print(f"Error updating dns seed addresses: {e}") diff --git a/src/warnet/cli/main.py b/src/warnet/cli/main.py new file mode 100644 index 000000000..0644cac48 --- /dev/null +++ b/src/warnet/cli/main.py @@ -0,0 +1,148 @@ +import click +from rich import print as richprint + +from templates import TEMPLATES +from warnet.cli.debug import debug +from warnet.cli.network import network +from warnet.cli.rpc import rpc_call +from warnet.cli.scenarios import scenarios + +EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" + + +@click.group() +def cli(): + pass + + +cli.add_command(debug) +cli.add_command(scenarios) +cli.add_command(network) + + +@cli.command(name="help") +@click.argument("command", required=False, default=None) +@click.pass_context +def help_command(ctx, command): + """ + Display help information for the given command. + If no command is given, display help for the main CLI. + """ + if command is None: + # Display help for the main CLI + richprint(ctx.parent.get_help()) + return + + # Fetch the command object + cmd_obj = cli.get_command(ctx, command) + + if cmd_obj is None: + richprint(f"Unknown command: {command}") + return + + # Extract only the relevant help information (excluding the initial usage line) + # help_info = cmd_obj.get_help(ctx).split("\n", 1)[-1].strip() + help_info = cmd_obj.get_help(ctx).strip() + + + # Extract the arguments of the command + arguments = [ + param.human_readable_name.upper() + for param in cmd_obj.params + if isinstance(param, click.Argument) + ] + + # Determine the correct usage string based on whether the command has subcommands + if isinstance(cmd_obj, click.Group) and cmd_obj.list_commands(ctx): + usage_str = ( + f"Usage: warnet {command} [OPTIONS] COMMAND [ARGS...]\n\n{help_info}" + ) + else: + args_str = " ".join(arguments) + usage_str = f"Usage: warnet {command} [OPTIONS] {args_str}\n\n{help_info}" + + richprint(usage_str) + + +cli.add_command(help_command) + + +@cli.command(context_settings={"ignore_unknown_options": True}) +@click.argument("node", type=int) +@click.argument( + "method", type=str, nargs=-1 +) # this will capture all remaining arguments +@click.option("--params", type=str, multiple=True, default=()) +@click.option("--network", default="warnet", show_default=True) +def rpc(node, method, params, network): + """ + Call bitcoin-cli on in <--network> + """ + if len(method) > 2: + raise click.BadArgumentUsage( + "You can provide at most two arguments for 'method'." + ) + + # Convert tuple to space-separated string + method_str = " ".join(method) + + try: + result = rpc_call( + "bcli", + {"network": network, "node": node, "method": method_str, "params": params}, + ) + richprint(result) + except Exception as e: + richprint(f"bitcoin-cli {method_str} {params} failed on node {node}:\n{e}") + + +@cli.command() +@click.argument("node", type=int, required=True) +@click.option("--network", default="warnet", show_default=True) +def debug_log(node, network): + """ + Fetch the Bitcoin Core debug log from in [network] + """ + try: + result = rpc_call("debug_log", {"node": node, "network": network}) + print(result) + except Exception as e: + richprint(f"In our pursuit of knowledge from node {node}, we were thwarted: {e}") + + +@cli.command() +@click.argument("node_a", type=int, required=True) +@click.argument("node_b", type=int, required=True) +@click.option("--network", default="warnet", show_default=True) +def messages(node_a, node_b, network): + """ + Fetch messages sent between and in + """ + import logging + + logging.warning(f"got args: {node_a}, {node_b}, {network}") + try: + result = rpc_call( + "messages", {"network": network, "node_a": node_a, "node_b": node_b} + ) + richprint(result) + except Exception as e: + richprint( + f"Error fetching messages between {node_a} and {node_b}: {e}" + ) + + +@cli.command() +def stop(): + """ + Stop warnetd. + """ + try: + result = rpc_call("stop", None) + richprint(result) + except Exception as e: + richprint(f"Error stopping warnetd: {e}") + + +if __name__ == "__main__": + cli() diff --git a/src/warnet/cli/network.py b/src/warnet/cli/network.py new file mode 100644 index 000000000..92581ac96 --- /dev/null +++ b/src/warnet/cli/network.py @@ -0,0 +1,62 @@ +from pathlib import Path + +import click +from rich import print + +from templates import TEMPLATES +from warnet.cli.rpc import rpc_call + +EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" + + +@click.group(name="network") +def network(): + """Network commands""" + + + +@network.command() +@click.argument("graph_file", default=EXAMPLE_GRAPH_FILE, type=click.Path()) +@click.option("--force", default=False, is_flag=True, type=bool) +@click.option("--network", default="warnet", show_default=True) +def start( + graph_file: Path = EXAMPLE_GRAPH_FILE, force: bool = False, network: str = "warnet" +): + """ + Start a warnet with topology loaded from a into <--network> (default: "warnet") + """ + try: + result = rpc_call( + "from_file", + {"graph_file": str(graph_file), "force": force, "network": network}, + ) + print(result) + except Exception as e: + print(f"Error creating network: {e}") + + +@network.command() +@click.option("--network", default="warnet", show_default=True) +def up(network: str = "warnet"): + """ + Run 'docker-compose up' on a warnet named <--network> (default: "warnet"). + """ + try: + result = rpc_call("up", {"network": network}) + print(result) + except Exception as e: + print(f"Error creating network: {e}") + + +@network.command() +@click.option("--network", default="warnet", show_default=True) +def down(network: str = "warnet"): + """ + Run 'docker-compose down on a warnet named <--network> (default: "warnet"). + """ + try: + result = rpc_call("down", {"network": network}) + print(result) + except Exception as e: + print(f"Error running docker-compose down on network {network}: {e}") + diff --git a/src/warnet/cli/rpc.py b/src/warnet/cli/rpc.py new file mode 100644 index 000000000..250f20375 --- /dev/null +++ b/src/warnet/cli/rpc.py @@ -0,0 +1,18 @@ +import requests +from jsonrpcclient.responses import Ok, parse +from jsonrpcclient.requests import request +from typing import Any, Dict, Tuple, Union, Optional +from warnet.warnetd import WARNETD_PORT + + +def rpc_call(rpc_method, params: Optional[Union[Dict[str, Any], Tuple[Any, ...]]]): + payload = request(rpc_method, params) + response = requests.post(f"http://localhost:{WARNETD_PORT}/api", json=payload) + parsed = parse(response.json()) + + if isinstance(parsed, Ok): + return parsed.result + else: + error_message = getattr(parsed, 'message', 'Unknown RPC error') + raise Exception(error_message) + diff --git a/src/warnet/cli/scenarios.py b/src/warnet/cli/scenarios.py new file mode 100644 index 000000000..faaa2d5eb --- /dev/null +++ b/src/warnet/cli/scenarios.py @@ -0,0 +1,69 @@ +import click +from rich import print + +from templates import TEMPLATES +from warnet.cli.rpc import rpc_call + +EXAMPLE_GRAPH_FILE = TEMPLATES / "example.graphml" + + +@click.group(name="scenarios") +def scenarios(): + """Scenario commands""" + + +@scenarios.command() +def list(): + """ + List available scenarios in the Warnet Test Framework + """ + try: + result = rpc_call("list", None) + print(result) + except Exception as e: + print(f"Error listing scenarios: {e}") + + +@scenarios.command(context_settings={"ignore_unknown_options": True}) +@click.argument("scenario", type=str) +@click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) +@click.option("--network", default="warnet", show_default=True) +def run(scenario, network, additional_args): + """ + Run from the Warnet Test Framework on <--network> with optional arguments + """ + try: + params = {"scenario": scenario, "additional_args": additional_args, "network": network} + res = rpc_call("run", params) + print(res) + except Exception as e: + print(f"Error running scenario: {e}") + + +@scenarios.command() +@click.option("--network", default="warnet", show_default=True) +def active(network: str = "warnet"): + """ + List running scenarios on <--network> (default=warnet) as "name": "pid" pairs + """ + try: + result = rpc_call("list_running_scenarios", {"network": network}) + print(result) + except Exception as e: + print(f"Error listing scenarios: {e}") + + +@scenarios.command() +@click.argument("pid", type=int) +@click.option("--network", default="warnet", show_default=True) +def stop(pid: int, network: str = "warnet"): + """ + Stop scenario with from running on <--network> + """ + try: + params = {"pid": pid, "network": network} + res = rpc_call("stop_scenario", params) + print(res) + except Exception as e: + print(f"Error stopping scenario: {e}") + diff --git a/src/warnet/client.py b/src/warnet/client.py index a4173c6b8..04f3e0b8c 100644 --- a/src/warnet/client.py +++ b/src/warnet/client.py @@ -1,12 +1,13 @@ import concurrent.futures import logging import threading -from typing import List, Optional +from typing import List, Optional, Any, Dict import docker from warnet.utils import parse_raw_messages from warnet.tank import Tank +from warnet.warnet import Warnet logger = logging.getLogger("warnet.client") @@ -14,7 +15,7 @@ def get_bitcoin_debug_log(network: str, index: int) -> str: tank = Tank.from_docker_env(network, index) subdir = "/" if tank.bitcoin_network == "main" else f"{tank.bitcoin_network}/" - data, stat = tank.container.get_archive(f"/root/.bitcoin/{subdir}debug.log") + data, stat = tank.container.get_archive(f"/home/bitcoin/.bitcoin/{subdir}debug.log") out = "" for chunk in data: out += chunk.decode() @@ -27,12 +28,16 @@ def get_bitcoin_debug_log(network: str, index: int) -> str: def get_bitcoin_cli(network: str, index: int, method: str, params=None) -> str: tank = Tank.from_docker_env(network, index) - return tank.exec( - f"bitcoin-cli {method} {' '.join(map(str, params))}" - ).output.decode() + if params: + cmd = f"bitcoin-cli {method} {' '.join(map(str, params))}" + else: + cmd = f"bitcoin-cli {method}" + return tank.exec(cmd=cmd, user="bitcoin") -def get_messages(network: str, src_index: int, dst_index: int) -> List[Optional[str]]: +def get_messages( + network: str, src_index: int, dst_index: int +) -> List[Optional[Dict[str, Any]]]: src_node = Tank.from_docker_env(network, src_index) dst_node = Tank.from_docker_env(network, dst_index) # start with the IP of the peer @@ -42,14 +47,14 @@ def get_messages(network: str, src_index: int, dst_index: int) -> List[Optional[ subdir = ( "/" if src_node.bitcoin_network == "main" else f"{src_node.bitcoin_network}/" ) - exit_code, dirs = src_node.exec(f"ls /root/.bitcoin/{subdir}message_capture") - dirs = dirs.decode().splitlines() + dirs = src_node.exec(f"ls /home/bitcoin/.bitcoin/{subdir}message_capture") + dirs = dirs.splitlines() messages = [] for dir_name in dirs: if dst_ip in dir_name: for file, outbound in [["msgs_recv.dat", False], ["msgs_sent.dat", True]]: data, stat = src_node.container.get_archive( - f"/root/.bitcoin/{subdir}message_capture/{dir_name}/{file}" + f"/home/bitcoin/.bitcoin/{subdir}message_capture/{dir_name}/{file}" ) blob = b"" for chunk in data: @@ -69,10 +74,12 @@ def stop_container(c): logger.info(f"stopping container: {c.name}") c.stop() + def stop_network(network="warnet") -> bool: """ Stop all containers in the network in parallel using a background thread """ + def thread_stop(): d = docker.from_env() network_obj = d.networks.get(network) @@ -85,23 +92,10 @@ def thread_stop(): return True -def remove_container(c): - logger.warning(f"removing container: {c.name}") - c.remove() - -def remove_network(network_name="warnet") -> bool: - def thread_remove_network(): - d = docker.from_env() - network = d.networks.get(network_name) - containers = network.containers - - with concurrent.futures.ThreadPoolExecutor() as executor: - executor.map(stop_container, containers) - - # Use a second executor to ensure all stops complete before removes - with concurrent.futures.ThreadPoolExecutor() as executor: - executor.map(remove_container, containers) - - threading.Thread(target=thread_remove_network).start() +def compose_down(network="warnet") -> bool: + """ + Run docker-compose down on a warnet + """ + wn = Warnet.from_network(network=network) + wn.docker_compose_down() return True - diff --git a/src/warnet/tank.py b/src/warnet/tank.py index 9f075a6f7..c67b9bb86 100644 --- a/src/warnet/tank.py +++ b/src/warnet/tank.py @@ -4,13 +4,18 @@ import docker import logging +import shutil from copy import deepcopy +from pathlib import Path +from docker.models.containers import Container from templates import TEMPLATES from warnet.utils import ( - get_architecture, + exponential_backoff, generate_ipv4_addr, sanitize_tc_netem_command, dump_bitcoin_conf, + SUPPORTED_TAGS, + get_architecture, ) CONTAINER_PREFIX_BITCOIND = "tank" @@ -27,6 +32,7 @@ def __init__(self): self.version = "25.0" self.conf = "" self.conf_file = None + self.torrc_file = None self.netem = None self.rpc_port = 18443 self.rpc_user = "warnet_user" @@ -34,18 +40,21 @@ def __init__(self): self._container = None self._suffix = None self._ipv4 = None - self._bitcoind_name = None + self._container_name = None self._exporter_name = None + self.config_dir = Path() def __str__(self) -> str: - return (f"Tank(\n" - f"\tIndex: {self.index}\n" - f"\tVersion: {self.version}\n" - f"\tConf: {self.conf}\n" - f"\tConf File: {self.conf_file}\n" - f"\tNetem: {self.netem}\n" - f"\tIPv4: {self._ipv4}\n" - f"\t)") + return ( + f"Tank(\n" + f"\tIndex: {self.index}\n" + f"\tVersion: {self.version}\n" + f"\tConf: {self.conf}\n" + f"\tConf File: {self.conf_file}\n" + f"\tNetem: {self.netem}\n" + f"\tIPv4: {self._ipv4}\n" + f"\t)" + ) @classmethod def from_graph_node(cls, index, warnet): @@ -58,11 +67,32 @@ def from_graph_node(cls, index, warnet): self.index = int(index) node = warnet.graph.nodes[index] if "version" in node: + if not "/" and "#" in self.version: + if node["version"] not in SUPPORTED_TAGS: + raise Exception( + f"Unsupported version: can't be generated from Docker images: {node['version']}" + ) self.version = node["version"] if "bitcoin_config" in node: self.conf = node["bitcoin_config"] if "tc_netem" in node: self.netem = node["tc_netem"] + with open(self.warnet.fork_observer_config, "a") as f: + f.write( + f""" + [[networks.nodes]] + id = {self.index} + name = "Node {self.index}" + description = "Warnet tank {self.index}" + rpc_host = "{self.ipv4}" + rpc_port = {self.rpc_port} + rpc_user = "{self.rpc_user}" + rpc_password = "{self.rpc_password}" +""" + ) + self.config_dir = self.warnet.config_dir / str(self.suffix) + self.config_dir.mkdir(parents=True, exist_ok=True) + self.write_torrc() return self @classmethod @@ -84,32 +114,39 @@ def suffix(self): @property def ipv4(self): if self._ipv4 is None: - if self.index == 0: - self._ipv4 = "100.20.15.18" # Tor directory server - else: - self._ipv4 = generate_ipv4_addr(self.warnet.subnet) + self._ipv4 = generate_ipv4_addr(self.warnet.subnet) return self._ipv4 @property - def bitcoind_name(self): - if self._bitcoind_name is None: - self._bitcoind_name = f"{CONTAINER_PREFIX_BITCOIND}_{self.suffix}" - return self._bitcoind_name + def container_name(self): + if self._container_name is None: + self._container_name = ( + f"{self.docker_network}_{CONTAINER_PREFIX_BITCOIND}_{self.suffix}" + ) + return self._container_name @property def exporter_name(self): if self._exporter_name is None: - self._exporter_name = f"{CONTAINER_PREFIX_PROMETHEUS}_{self.suffix}" + self._exporter_name = ( + f"{self.docker_network}_{CONTAINER_PREFIX_PROMETHEUS}_{self.suffix}" + ) return self._exporter_name @property - def container(self): + def container(self) -> Container: if self._container is None: - self._container = docker.from_env().containers.get(self.bitcoind_name) + self._container = docker.from_env().containers.get(self.container_name) return self._container - def exec(self, cmd): - return self.container.exec_run(cmd) + @exponential_backoff() + def exec(self, cmd: str, user: str = "root"): + result = self.container.exec_run(cmd=cmd, user=user) + if result.exit_code != 0: + raise Exception( + f"Command failed with exit code {result.exit_code}: {result.output.decode('utf-8')}" + ) + return result.output.decode("utf-8") def apply_network_conditions(self): if self.netem is None: @@ -117,7 +154,7 @@ def apply_network_conditions(self): if not sanitize_tc_netem_command(self.netem): logger.warning( - f"Not applying unsafe tc-netem conditions to container {self.bitcoind_name}: `{self.netem}`" + f"Not applying unsafe tc-netem conditions to container {self.container_name}: `{self.netem}`" ) return @@ -125,11 +162,11 @@ def apply_network_conditions(self): rcode, result = self.exec(self.netem) if rcode == 0: logger.info( - f"Successfully applied network conditions to {self.bitcoind_name}: `{self.netem}`" + f"Successfully applied network conditions to {self.container_name}: `{self.netem}`" ) else: logger.error( - f"Error applying network conditions to {self.bitcoind_name}: `{self.netem}` ({result})" + f"Error applying network conditions to {self.container_name}: `{self.netem}` ({result})" ) def write_bitcoin_conf(self, base_bitcoin_conf): @@ -149,22 +186,30 @@ def write_bitcoin_conf(self, base_bitcoin_conf): conf[self.bitcoin_network].append(("rpcport", self.rpc_port)) conf_file = dump_bitcoin_conf(conf) - path = self.warnet.tmpdir / f"bitcoin.conf.{self.suffix}" + path = self.config_dir / f"bitcoin.conf" logger.info(f"Wrote file {path}") with open(path, "w") as file: file.write(conf_file) self.conf_file = path + def write_torrc(self): + src_tor_conf_file = TEMPLATES / "torrc" + + dest_path = self.config_dir / "torrc" + shutil.copyfile(src_tor_conf_file, dest_path) + self.torrc_file = dest_path + def add_services(self, services): assert self.index is not None assert self.conf_file is not None + services[self.container_name] = {} # Setup bitcoind, either release binary or build from source if "/" and "#" in self.version: # it's a git branch, building step is necessary repo, branch = self.version.split("#") build = { - "context": ".", + "context": str(TEMPLATES), "dockerfile": str(TEMPLATES / "Dockerfile"), "args": { "REPO": repo, @@ -173,42 +218,48 @@ def add_services(self, services): } else: # assume it's a release version, get the binary - arch = get_architecture() build = { - "context": ".", - "dockerfile": str(TEMPLATES / "Dockerfile"), + "context": str(TEMPLATES), + "dockerfile": str(TEMPLATES / f"Dockerfile"), "args": { - "ARCH": arch, - "BITCOIN_VERSION": self.version, - "BITCOIN_URL": f"https://bitcoincore.org/bin/bitcoin-core-{self.version}/bitcoin-{self.version}-{arch}-linux-gnu.tar.gz", + "ARCH": get_architecture(), + "BITCOIN_URL": "https://bitcoincore.org/bin", + "BITCOIN_VERSION": f"{self.version}", }, } - # Add the bitcoind service - services[self.bitcoind_name] = { - "container_name": self.bitcoind_name, - "build": build, - "volumes": [ - f"{self.conf_file}:/root/.bitcoin/bitcoin.conf", - f"{TEMPLATES / ('torrc' if self.index != 0 else 'torrc.da')}:/etc/tor/torrc" - ], - "networks": { - self.docker_network: { - "ipv4_address": f"{self.ipv4}", - } - }, - "privileged": True, - } - if self.index == 0: - services[self.bitcoind_name]["volumes"].append( - f"{TEMPLATES / 'tor-keys'}:/root/.tor/keys") + services[self.container_name].update( + { + "container_name": self.container_name, + "build": build, + "volumes": [ + f"{self.conf_file}:/home/bitcoin/.bitcoin/bitcoin.conf", + f"{self.torrc_file}:/etc/tor/torrc_original", + ], + "networks": { + self.docker_network: { + "ipv4_address": f"{self.ipv4}", + } + }, + "labels": {"warnet": "tank"}, + "privileged": True, + # "depends_on": ["fluentd"], + # "logging": { + # "driver": "fluentd", + # "options": { + # "fluentd-address": f"{FLUENT_IP}:24224", + # "tag": "{{.Name}}" + # } + # } + } + ) # Add the prometheus data exporter in a neighboring container services[self.exporter_name] = { "image": "jvstein/bitcoin-prometheus-exporter", "container_name": self.exporter_name, "environment": { - "BITCOIN_RPC_HOST": self.bitcoind_name, + "BITCOIN_RPC_HOST": self.container_name, "BITCOIN_RPC_PORT": self.rpc_port, "BITCOIN_RPC_USER": self.rpc_user, "BITCOIN_RPC_PASSWORD": self.rpc_password, @@ -220,7 +271,7 @@ def add_services(self, services): def add_scrapers(self, scrapers): scrapers.append( { - "job_name": self.bitcoind_name, + "job_name": self.container_name, "scrape_interval": "5s", "static_configs": [{"targets": [f"{self.exporter_name}:9332"]}], } diff --git a/src/warnet/test_framework_bridge.py b/src/warnet/test_framework_bridge.py index 533eb4d20..ffd06511d 100644 --- a/src/warnet/test_framework_bridge.py +++ b/src/warnet/test_framework_bridge.py @@ -9,13 +9,10 @@ from test_framework.test_framework import ( BitcoinTestFramework, TMPDIR_PREFIX, - TestStatus + TestStatus, ) from test_framework.test_node import TestNode -from test_framework.util import ( - get_rpc_proxy, - PortSeed -) +from test_framework.util import get_rpc_proxy, PortSeed from warnet.warnet import Warnet @@ -33,13 +30,13 @@ def run_test(self): # the original methods from BitcoinTestFramework def setup(self): - warnet = Warnet.from_docker_env("warnet") + warnet = Warnet.from_docker_env(self.options.network) for i, tank in enumerate(warnet.tanks): ip = tank.ipv4 - logger.info(f"Adding TestNode {i} from {tank.bitcoind_name} with IP {ip}") + logger.info(f"Adding TestNode {i} from {tank.container_name} with IP {ip}") node = TestNode( i, - "", # datadir path + "", # datadir path chain=tank.bitcoin_network, rpchost=ip, timewait=60, @@ -105,7 +102,7 @@ def setup(self): random.seed(seed) self.log.info("PRNG seed is: {}".format(seed)) - self.log.debug('Setting up network thread') + self.log.debug("Setting up network thread") self.network_thread = NetworkThread() self.network_thread.start() @@ -119,62 +116,143 @@ def setup(self): self.success = TestStatus.PASSED - def parse_args(self): previous_releases_path = "" parser = argparse.ArgumentParser(usage="%(prog)s [options]") - parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", - help="Leave bitcoinds and test.* datadir on exit or error") - parser.add_argument("--nosandbox", dest="nosandbox", default=False, action="store_true", - help="Don't use the syscall sandbox") - parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", - help="Don't stop bitcoinds after the test execution") - parser.add_argument("--cachedir", dest="cachedir", default=None, - help="Directory for caching pregenerated datadirs (default: %(default)s)") - parser.add_argument("--tmpdir", dest="tmpdir", default=None, - help="Root directory for datadirs") - parser.add_argument("-l", "--loglevel", dest="loglevel", default="DEBUG", - help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") - parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", - help="Print out all RPC calls as they are made") - parser.add_argument("--portseed", dest="port_seed", default=0, - help="The seed to use for assigning port numbers (default: current process id)") - parser.add_argument("--previous-releases", dest="prev_releases", default=None, action="store_true", - help="Force test of previous releases (default: %(default)s)") - parser.add_argument("--coveragedir", dest="coveragedir", default=None, - help="Write tested RPC commands into this directory") - parser.add_argument("--configfile", dest="configfile", default=None, - help="Location of the test framework config file (default: %(default)s)") - parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", - help="Attach a python debugger if test fails") - parser.add_argument("--usecli", dest="usecli", default=False, action="store_true", - help="use bitcoin-cli instead of RPC for all commands") - parser.add_argument("--perf", dest="perf", default=False, action="store_true", - help="profile running nodes with perf for the duration of the test") - parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true", - help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required.") - parser.add_argument("--randomseed", default=0x7761726e6574, # "warnet" ascii - help="set a random seed for deterministically reproducing a previous test run") - parser.add_argument("--timeout-factor", dest="timeout_factor", default=1, - help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts") + parser.add_argument( + "--nocleanup", + dest="nocleanup", + default=False, + action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error", + ) + parser.add_argument( + "--nosandbox", + dest="nosandbox", + default=False, + action="store_true", + help="Don't use the syscall sandbox", + ) + parser.add_argument( + "--noshutdown", + dest="noshutdown", + default=False, + action="store_true", + help="Don't stop bitcoinds after the test execution", + ) + parser.add_argument( + "--cachedir", + dest="cachedir", + default=None, + help="Directory for caching pregenerated datadirs (default: %(default)s)", + ) + parser.add_argument( + "--tmpdir", dest="tmpdir", default=None, help="Root directory for datadirs" + ) + parser.add_argument( + "-l", + "--loglevel", + dest="loglevel", + default="DEBUG", + help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.", + ) + parser.add_argument( + "--tracerpc", + dest="trace_rpc", + default=False, + action="store_true", + help="Print out all RPC calls as they are made", + ) + parser.add_argument( + "--portseed", + dest="port_seed", + default=0, + help="The seed to use for assigning port numbers (default: current process id)", + ) + parser.add_argument( + "--previous-releases", + dest="prev_releases", + default=None, + action="store_true", + help="Force test of previous releases (default: %(default)s)", + ) + parser.add_argument( + "--coveragedir", + dest="coveragedir", + default=None, + help="Write tested RPC commands into this directory", + ) + parser.add_argument( + "--configfile", + dest="configfile", + default=None, + help="Location of the test framework config file (default: %(default)s)", + ) + parser.add_argument( + "--pdbonfailure", + dest="pdbonfailure", + default=False, + action="store_true", + help="Attach a python debugger if test fails", + ) + parser.add_argument( + "--usecli", + dest="usecli", + default=False, + action="store_true", + help="use bitcoin-cli instead of RPC for all commands", + ) + parser.add_argument( + "--perf", + dest="perf", + default=False, + action="store_true", + help="profile running nodes with perf for the duration of the test", + ) + parser.add_argument( + "--valgrind", + dest="valgrind", + default=False, + action="store_true", + help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required.", + ) + parser.add_argument( + "--randomseed", + default=0x7761726E6574, # "warnet" ascii + help="set a random seed for deterministically reproducing a previous test run", + ) + parser.add_argument( + "--timeout-factor", + dest="timeout_factor", + default=1, + help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts", + ) + parser.add_argument( + "--network", + dest="network", + default="warnet", + help="Designate which warnet this should run on (default: warnet)", + ) self.add_options(parser) # Running TestShell in a Jupyter notebook causes an additional -f argument # To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument # source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168 - parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") + parser.add_argument( + "-f", "--fff", help="a dummy argument to fool ipython", default="1" + ) self.options = parser.parse_args() if self.options.timeout_factor == 0: self.options.timeout_factor = 99999 - self.options.timeout_factor = self.options.timeout_factor or (4 if self.options.valgrind else 1) + self.options.timeout_factor = self.options.timeout_factor or ( + 4 if self.options.valgrind else 1 + ) self.options.previous_releases_path = previous_releases_path config = configparser.ConfigParser() if self.options.configfile is not None: config.read_file(open(self.options.configfile)) - config["environment"] = { - "PACKAGE_BUGREPORT": "" - } + config["environment"] = {"PACKAGE_BUGREPORT": ""} self.config = config diff --git a/src/warnet/utils.py b/src/warnet/utils.py index d515a77f0..32ff5f5c4 100644 --- a/src/warnet/utils.py +++ b/src/warnet/utils.py @@ -1,4 +1,5 @@ import functools +import inspect import ipaddress import logging import os @@ -8,12 +9,28 @@ import sys import time from io import BytesIO +from pathlib import Path +from typing import Dict + from test_framework.p2p import MESSAGEMAP from test_framework.messages import ser_uint256 logger = logging.getLogger("utils") +SUPPORTED_TAGS = [ + "23.0", + "22.0", + "0.21.1", + "0.20.1", + "0.19.1", + "0.18.1", + "0.17.1", + "0.16.3", + "0.15.1", +] +RUNNING_PROC_FILE = "running_scenarios.dat" + def exponential_backoff(max_retries=5, base_delay=1, max_delay=32): """ @@ -33,12 +50,13 @@ def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: - logger.error(f"rpc error:\n\t{e}") + error_msg = str(e).replace("\n", " ").replace("\t", " ") + logger.error(f"rpc error: {error_msg}") retries += 1 if retries == max_retries: raise e delay = min(base_delay * (2**retries), max_delay) - logger.warning(f"retry in {delay} seconds...") + logger.warning(f"exponential_backoff: retry in {delay} seconds...") time.sleep(delay) return wrapper @@ -49,7 +67,6 @@ def wrapper(*args, **kwargs): def get_architecture(): """ Get the architecture of the machine. - :return: The architecture of the machine or None if an error occurred """ result = subprocess.run(["uname", "-m"], stdout=subprocess.PIPE) @@ -318,3 +335,72 @@ def parse_raw_messages(blob, outbound): msg_dict["body"] = to_jsonable(msg) messages.append(msg_dict) return messages + + +def gen_config_dir(network: str) -> Path: + """ + Determine a config dir based on network name + """ + config_dir = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.warnet")) + config_dir = Path(config_dir) / "warnet" / network + return config_dir + + +def bubble_exception_str(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + func_name = inspect.currentframe().f_code.co_name + local_vars = inspect.currentframe().f_locals + # Filter out the 'self' variable from the local_vars + context_str = ", ".join( + f"{k}={v}" for k, v in local_vars.items() if k != "self" + ) + raise Exception( + f"Exception in function '{func_name}' with context ({context_str}): {str(e)}" + ) + + return wrapper + + +def save_running_scenario(scenario: str, pid: int, config_dir: Path): + with open(config_dir / RUNNING_PROC_FILE, "a") as file: + file.write(f"{scenario}\t{pid}\n") + + +def load_running_scenarios(config_dir: Path) -> Dict[str, int]: + scenarios = {} + if os.path.exists(config_dir / RUNNING_PROC_FILE): + with open(os.path.join(config_dir, RUNNING_PROC_FILE), "r") as file: + for line in file.readlines(): + scenario, pid = line.strip().split("\t") + scenarios[scenario] = int(pid) + return scenarios + + +def remove_stopped_scenario(scenario: str, config_dir: Path): + lines = [] + with open(config_dir / RUNNING_PROC_FILE, "r") as file: + lines = file.readlines() + + with open(config_dir / RUNNING_PROC_FILE, "w") as file: + for line in lines: + if not line.startswith(scenario): + file.write(line) + + +def update_running_scenarios_file(config_dir: Path, running_scenarios: Dict[str, int]): + with open(config_dir / RUNNING_PROC_FILE, "w") as file: + for scenario, pid in running_scenarios.items(): + file.write(f"{scenario}\t{pid}\n") + + # Check if each PID is still running + still_running = {} + for scenario, pid in running_scenarios.items(): + try: + os.kill(pid, 0) # Will raise an error if the process doesn't exist + still_running[scenario] = pid + except OSError: + pass diff --git a/src/warnet/warnet.py b/src/warnet/warnet.py index ffdec4815..5b458b56a 100644 --- a/src/warnet/warnet.py +++ b/src/warnet/warnet.py @@ -5,60 +5,108 @@ import docker import logging import networkx +import shutil import subprocess import yaml from pathlib import Path -from tempfile import mkdtemp from templates import TEMPLATES from typing import List + +from services.prometheus import Prometheus +from services.node_exporter import NodeExporter +from services.grafana import Grafana +from services.tor import Tor +from services.fork_observer import ForkObserver +# from services.fluentd import FLUENT_CONF, Fluentd, FLUENT_IP +from services.dns_seed import DnsSeed, ZONE_FILE_NAME, DNS_SEED_NAME from warnet.tank import Tank -from warnet.utils import parse_bitcoin_conf +from warnet.utils import parse_bitcoin_conf, gen_config_dir, bubble_exception_str -logger = logging.getLogger("Warnet") -TMPDIR_PREFIX = "warnet_tmp_" +logger = logging.getLogger("warnet") +FO_CONF_NAME = "fork_observer_config.toml" +logging.getLogger("docker.utils.config").setLevel(logging.WARNING) +logging.getLogger("docker.auth").setLevel(logging.WARNING) class Warnet: - def __init__(self): - self.tmpdir: Path = Path(mkdtemp(prefix=TMPDIR_PREFIX)) + def __init__(self, config_dir): + self.config_dir: Path = config_dir + self.config_dir.mkdir(parents=True, exist_ok=True) self.docker = docker.from_env() - self.bitcoin_network:str = "regtest" - self.docker_network:str = "warnet" + self.bitcoin_network: str = "regtest" + self.docker_network: str = "warnet" self.subnet: str = "100.0.0.0/8" self.graph = None + self.graph_name = "graph.graphml" self.tanks: List[Tank] = [] - logger.info(f"Created Warnet with temp directory {self.tmpdir}") + self.fork_observer_config = self.config_dir / FO_CONF_NAME + logger.info( + f"copying config {TEMPLATES / FO_CONF_NAME} to {self.fork_observer_config}" + ) + shutil.copy(TEMPLATES / FO_CONF_NAME, self.fork_observer_config) + # shutil.copy(TEMPLATES / FLUENT_CONF, self.config_dir) def __str__(self) -> str: - tanks_str = ',\n'.join([str(tank) for tank in self.tanks]) - return (f"Warnet(\n" - f"\tTemp Directory: {self.tmpdir}\n" - f"\tBitcoin Network: {self.bitcoin_network}\n" - f"\tDocker Network: {self.docker_network}\n" - f"\tSubnet: {self.subnet}\n" - f"\tGraph: {self.graph}\n" - f"\tTanks: [\n{tanks_str}\n" - f"\t]\n" - f")") + template = "\t%-8.8s%-25.24s%-25.24s%-25.24s%-18.18s\n" + tanks_str = template % ("Index", "Version", "Conf", "Netem", "IPv4") + for tank in self.tanks: + tanks_str += template % (tank.index, tank.version, tank.conf, tank.netem, tank.ipv4) + return ( + f"Warnet:\n" + f"\tTemp Directory: {self.config_dir}\n" + f"\tBitcoin Network: {self.bitcoin_network}\n" + f"\tDocker Network: {self.docker_network}\n" + f"\tSubnet: {self.subnet}\n" + f"\tGraph: {self.graph}\n" + f"Tanks:\n{tanks_str}" + ) @classmethod - def from_graph_file(cls, graph_file: str, network: str = "warnet"): - self = cls() + @bubble_exception_str + def from_graph_file( + cls, graph_file: str, config_dir: Path, network: str = "warnet" + ): + self = cls(config_dir) + destination = self.config_dir / self.graph_name + destination.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(graph_file, destination) self.docker_network = network self.graph = networkx.read_graphml(graph_file, node_type=int) self.tanks_from_graph() + logger.info(f"Created Warnet using directory {self.config_dir}") return self @classmethod + @bubble_exception_str def from_graph(cls, graph): - self = cls() + self = cls(Path()) self.graph = graph self.tanks_from_graph() + logger.info(f"Created Warnet using directory {self.config_dir}") + return self + + @classmethod + @bubble_exception_str + def from_network( + cls, config_dir: Path = Path(), network: str = "warnet", tanks=True + ): + self = cls(config_dir) + self.config_dir = gen_config_dir(network) + self.graph = networkx.read_graphml( + Path(self.config_dir / self.graph_name), node_type=int + ) + if tanks: + self.tanks_from_graph() return self @classmethod + @bubble_exception_str def from_docker_env(cls, network_name): - self = cls() + config_dir = gen_config_dir(network_name) + self = cls(config_dir) + self.graph = networkx.read_graphml( + Path(self.config_dir / self.graph_name), node_type=int + ) self.docker_network = network_name index = 0 while index <= 999999: @@ -70,6 +118,12 @@ def from_docker_env(cls, network_name): break return self + @property + @bubble_exception_str + def zone_file_path(self): + return self.config_dir / ZONE_FILE_NAME + + @bubble_exception_str def tanks_from_graph(self): for node_id in self.graph.nodes(): if int(node_id) != len(self.tanks): @@ -79,6 +133,7 @@ def tanks_from_graph(self): self.tanks.append(Tank.from_graph_node(node_id, self)) logger.info(f"Imported {len(self.tanks)} tanks from graph") + @bubble_exception_str def write_bitcoin_confs(self): with open(TEMPLATES / "bitcoin.conf", "r") as file: text = file.read() @@ -86,24 +141,82 @@ def write_bitcoin_confs(self): for tank in self.tanks: tank.write_bitcoin_conf(base_bitcoin_conf) + @bubble_exception_str def apply_network_conditions(self): for tank in self.tanks: tank.apply_network_conditions() + @bubble_exception_str + def generate_zone_file_from_tanks(self): + records_list = [ + f"seed.dns-seed. 300 IN A {tank.ipv4}" for tank in self.tanks + ] + content = [] + with open(str(TEMPLATES / ZONE_FILE_NAME), "r") as f: + content = [line.rstrip() for line in f] + + # TODO: Really we should also read active SOA value from dns-seed, and increment from there + + content.extend(records_list) + # Join the content into a single string and escape single quotes for echoing + content_str = "\n".join(content).replace("'", "'\\''") + with open(self.config_dir / ZONE_FILE_NAME, "w") as f: + f.write(content_str) + + @bubble_exception_str + def apply_zone_file(self): + """ + Sync the dns seed list served by dns-seed with currently active Tanks. + """ + seeder = self.docker.containers.get(f"{self.docker_network}_{DNS_SEED_NAME}") + + # Read the content from the generated zone file + with open(self.config_dir / ZONE_FILE_NAME, "r") as f: + content_str = f.read().replace("'", "'\\''") + + # Overwrite all existing content + result = seeder.exec_run( + f"sh -c 'echo \"{content_str}\" > /etc/bind/dns-seed.zone'" + ) + logger.debug(f"result of updating {ZONE_FILE_NAME}: {result}") + + # Reload that single zone only + seeder.exec_run("rndc reload dns-seed") + + @bubble_exception_str def connect_edges(self): for edge in self.graph.edges(): (src, dst) = edge src_tank = self.tanks[int(src)] dst_ip = self.tanks[dst].ipv4 - logger.info(f"Using `addnode` to connect tanks {src} to {dst}") - src_tank.exec(f"bitcoin-cli addpeeraddress {dst_ip} 18444") + logger.info(f"Using `addpeeraddress` to connect tanks {src} to {dst}") + cmd = f"bitcoin-cli addpeeraddress {dst_ip} 18444" + src_tank.exec(cmd=cmd, user="bitcoin") + + @bubble_exception_str + def docker_compose_build_up(self): + command = ["docker-compose", "-p", self.docker_network, "up", "-d", "--build"] + try: + with subprocess.Popen( + command, + cwd=str(self.config_dir), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) as process: + for line in process.stdout: + logger.info(line.decode().rstrip()) + except Exception as e: + logger.error( + f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" + ) + @bubble_exception_str def docker_compose_up(self): - command = ["docker-compose", "-p", "warnet", "up", "-d", "--build"] + command = ["docker-compose", "-p", self.docker_network, "up", "-d"] try: with subprocess.Popen( command, - cwd=str(self.tmpdir), + cwd=str(self.config_dir), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) as process: @@ -111,10 +224,28 @@ def docker_compose_up(self): logger.info(line.decode().rstrip()) except Exception as e: logger.error( - f"An error occurred while executing `{' '.join(command)}` in {self.tmpdir}: {e}" + f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" ) - def write_docker_compose(self): + @bubble_exception_str + def docker_compose_down(self): + command = ["docker-compose", "down"] + try: + with subprocess.Popen( + command, + cwd=str(self.config_dir), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) as process: + for line in process.stdout: + logger.info(line.decode().rstrip()) + except Exception as e: + logger.error( + f"An error occurred while executing `{' '.join(command)}` in {self.config_dir}: {e}" + ) + + @bubble_exception_str + def write_docker_compose(self, dns=True): compose = { "version": "3.8", "networks": { @@ -131,33 +262,23 @@ def write_docker_compose(self): for tank in self.tanks: tank.add_services(compose["services"]) - # Add global services - compose["services"]["prometheus"] = { - "image": "prom/prometheus:latest", - "container_name": "prometheus", - "ports": ["9090:9090"], - "volumes": [ - f"{self.tmpdir / 'prometheus.yml'}:/etc/prometheus/prometheus.yml" - ], - "command": ["--config.file=/etc/prometheus/prometheus.yml"], - "networks": [self.docker_network], - } - compose["services"]["node-exporter"] = { - "image": "prom/node-exporter:latest", - "container_name": "node-exporter", - "volumes": ["/proc:/host/proc:ro", "/sys:/host/sys:ro", "/:/rootfs:ro"], - "command": ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"], - "networks": [self.docker_network], - } - compose["services"]["grafana"] = { - "image": "grafana/grafana:latest", - "container_name": "grafana", - "ports": ["3000:3000"], - "volumes": ["grafana-storage:/var/lib/grafana"], - "networks": [self.docker_network], - } + # Initialize services and add them to the compose + services = [ + Prometheus(self.docker_network, self.config_dir), + NodeExporter(self.docker_network), + Grafana(self.docker_network), + Tor(self.docker_network, TEMPLATES), + ForkObserver(self.docker_network, self.fork_observer_config), + # Fluentd(self.docker_network, self.config_dir), + ] + if dns: + services.append(DnsSeed(self.docker_network, TEMPLATES, self.config_dir)) + + for service_obj in services: + service_name = service_obj.__class__.__name__.lower() + compose["services"][service_name] = service_obj.get_service() - docker_compose_path = self.tmpdir / "docker-compose.yml" + docker_compose_path = self.config_dir / "docker-compose.yml" try: with open(docker_compose_path, "w") as file: yaml.dump(compose, file) @@ -167,6 +288,7 @@ def write_docker_compose(self): f"An error occurred while writing to {docker_compose_path}: {e}" ) + @bubble_exception_str def write_prometheus_config(self): config = { "global": {"scrape_interval": "15s"}, @@ -192,7 +314,7 @@ def write_prometheus_config(self): for tank in self.tanks: tank.add_scrapers(config["scrape_configs"]) - prometheus_path = self.tmpdir / "prometheus.yml" + prometheus_path = self.config_dir / "prometheus.yml" try: with open(prometheus_path, "w") as file: yaml.dump(config, file) diff --git a/src/warnet/warnetd.py b/src/warnet/warnetd.py index add63fdcf..5127196d3 100644 --- a/src/warnet/warnetd.py +++ b/src/warnet/warnetd.py @@ -2,13 +2,17 @@ import logging import os import pkgutil +import shutil import signal import subprocess import sys +import time import threading +from collections import defaultdict from datetime import datetime from logging.handlers import RotatingFileHandler - +from logging import StreamHandler +from typing import List, Dict from flask import Flask from flask_jsonrpc.app import JSONRPC @@ -18,89 +22,119 @@ get_bitcoin_cli, get_bitcoin_debug_log, get_messages, - stop_network, - remove_network, + compose_down, +) +from warnet.utils import ( + exponential_backoff, + gen_config_dir, + save_running_scenario, + load_running_scenarios, + remove_stopped_scenario, + update_running_scenarios_file, ) WARNETD_PORT = 9276 -continue_running = True -app = Flask(__name__) -jsonrpc = JSONRPC(app, "/api") - -# Determine the log file path based on XDG_STATE_HOME -xdg_state_home = os.environ.get( - "XDG_STATE_HOME", os.path.join(os.environ["HOME"], ".local", "state") -) -log_file_path = os.path.join(xdg_state_home, "warnet", "warnet.log") +# Determine the log file path: +basedir = os.environ.get("XDG_STATE_HOME") +if basedir is None: + # ~/.warnet/warnet.log + basedir = os.path.join(os.environ["HOME"], ".warnet") +else: + # XDG_STATE_HOME / warnet / warnet.log + basedir = os.path.join(basedir, "warnet") +LOG_FILE_PATH = os.path.join(basedir, "warnet.log") # Ensure the directory exists -os.makedirs(os.path.dirname(log_file_path), exist_ok=True) +os.makedirs(os.path.dirname(LOG_FILE_PATH), exist_ok=True) # Configure root logger logging.basicConfig( level=logging.DEBUG, handlers=[ RotatingFileHandler( - log_file_path, maxBytes=16_000_000, backupCount=3, delay=True - ) + LOG_FILE_PATH, maxBytes=16_000_000, backupCount=3, delay=True + ), + StreamHandler(sys.stdout) ], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) # Disable urllib3.connectionpool logging logging.getLogger("urllib3.connectionpool").setLevel(logging.CRITICAL) - logger = logging.getLogger("warnetd") +app = Flask(__name__) +jsonrpc = JSONRPC(app, "/api") + @jsonrpc.method("bcli") -def bcli(node: int, method: str, params: list[str] = [], network: str = "warnet"): +def bcli( + node: int, method: str, params: List[str] = [], network: str = "warnet" +) -> str: """ Call bitcoin-cli on in [network] """ try: result = get_bitcoin_cli(network, node, method, params) - return result + return str(result) except Exception as e: raise Exception(f"{e}") @jsonrpc.method("debug_log") -def debug_log(network: str, node: int): +def debug_log(network: str, node: int) -> str: """ Fetch the Bitcoin Core debug log from """ try: result = get_bitcoin_debug_log(network, node) - return result + return str(result) except Exception as e: raise Exception(f"{e}") @jsonrpc.method("messages") -def messages(network: str, node_a: int, node_b: int): +def messages(network: str, node_a: int, node_b: int) -> str: """ Fetch messages sent between and . """ try: - messages = get_messages(network, node_a, node_b) - out = "" - for m in messages: - timestamp = datetime.utcfromtimestamp(m["time"] / 1e6).strftime( + messages = [ + msg for msg in get_messages(network, node_a, node_b) if msg is not None + ] + if not messages: + return f"No messages found between {node_a} and {node_b}" + + messages_str_list = [] + + for message in messages: + # Check if 'time' key exists and its value is a number + if not (message.get("time") and isinstance(message["time"], (int, float))): + continue + + timestamp = datetime.utcfromtimestamp(message["time"] / 1e6).strftime( "%Y-%m-%d %H:%M:%S" ) - direction = ">>>" if m["outbound"] else "<<<" - body = "" - if "body" in m: - body = m["body"] - out = out + f"{timestamp} {direction} {m['msgtype']} {body}\n" - return out + direction = ">>>" if message.get("outbound", False) else "<<<" + msgtype = message.get("msgtype", "") + body_dict = message.get("body", {}) + + if not isinstance(body_dict, dict): # messages will be in dict form + continue + + body_str = ", ".join(f"{key}: {value}" for key, value in body_dict.items()) + messages_str_list.append(f"{timestamp} {direction} {msgtype} {body_str}") + + result_str = "\n".join(messages_str_list) + + return result_str + except Exception as e: raise Exception(f"{e}") @jsonrpc.method("list") -def list() -> list[str]: +def list() -> List[str]: """ List available scenarios in the Warnet Test Framework """ @@ -115,40 +149,142 @@ def list() -> list[str]: return [f"Exception {e}"] +running_scenarios = defaultdict(dict) + + @jsonrpc.method("run") -def run(scenario: str, network: str = "warnet") -> str: - """ - Run from the Warnet Test Framework - """ +def run(scenario: str, additional_args: List[str], network: str = "warnet") -> str: + config_dir = gen_config_dir(network) + base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + scenario_path = os.path.join(base_dir, "scenarios", f"{scenario}.py") + + if not os.path.exists(scenario_path): + return f"Scenario {scenario} not found at {scenario_path}." + try: - # TODO: should handle network argument - dir_path = os.path.dirname(os.path.realpath(__file__)) - mod_path = os.path.join(dir_path, "..", "scenarios", f"{sys.argv[2]}.py") - run_cmd = [sys.executable, mod_path] + sys.argv[3:] - subprocess.Popen(run_cmd, shell=False) - # TODO: We could here use python-prctl to give the background process - # a name prefixed with "warnet"? Might only work on linux... + run_cmd = ( + [sys.executable, scenario_path] + additional_args + [f"--network={network}"] + ) + logger.debug(f"Running {run_cmd}") + process = subprocess.Popen(run_cmd, shell=False, preexec_fn=os.setsid) + + save_running_scenario(scenario, process, config_dir) + return f"Running scenario {scenario} in the background..." except Exception as e: + logger.error(f"Exception occurred while running the scenario: {e}") return f"Exception {e}" +@jsonrpc.method("stop_scenario") +def stop_scenario(pid: int, network: str = "warnet") -> str: + + def is_running(pid): + try: + os.kill(pid, 0) + except ProcessLookupError: + return False + return True + + @exponential_backoff() + def kill_process(pid): + os.kill(pid, signal.SIGKILL) + + config_dir = gen_config_dir(network) + running_scenarios = load_running_scenarios(config_dir) + + scenario = None + for scenario_name, scenario_pid in running_scenarios.items(): + if scenario_pid == pid: + scenario = scenario_name + break + if not scenario: + return f"No active scenario found for PID {pid}." + + if not is_running(pid): + return f"Scenario {scenario} with PID {pid} was found in file but is not running." + + # First try with SIGTERM + os.kill(pid, signal.SIGTERM) + time.sleep(5) + # Then try SIGKILL with exponential backoff + if is_running(pid): + kill_process(pid) + + if is_running(pid): + return f"Could not kill scenario {scenario} with pid {pid} using SIGKILL" + + remove_stopped_scenario(scenario, config_dir) + return f"Stopped scenario {scenario} with PID {pid}." + + +@jsonrpc.method("list_running_scenarios") +def list_running_scenarios(network: str = "warnet") -> Dict[str, int]: + config_dir = gen_config_dir(network) + running_scenarios = load_running_scenarios(config_dir) + + # Check if each PID is still running + still_running = {} + for scenario, pid in running_scenarios.items(): + try: + os.kill(pid, 0) # Will raise an error if the process doesn't exist + still_running[scenario] = pid + except OSError: + pass + + # Update the file with only the still running scenarios + update_running_scenarios_file(config_dir, still_running) + + return still_running + + +@jsonrpc.method("up") +def up(network: str = "warnet") -> str: + wn = Warnet.from_network(network=network, tanks=False) + + def thread_start(wn): + try: + wn.docker_compose_up() + # Update warnet from docker here to get ip addresses + wn = Warnet.from_docker_env(network) + wn.apply_network_conditions() + wn.connect_edges() + logger.info( + f"Resumed warnet named '{network}' from config dir {wn.config_dir}" + ) + except Exception as e: + logger.error(f"Exception {e}") + + threading.Thread(target=lambda: thread_start(wn)).start() + return f"Resuming warnet..." + + @jsonrpc.method() -def from_file(graph_file: str, network: str = "warnet") -> str: +def from_file(graph_file: str, force: bool = False, network: str = "warnet") -> str: """ Run a warnet with topology loaded from a """ - wn = Warnet.from_graph_file(graph_file, network) + config_dir = gen_config_dir(network) + if config_dir.exists(): + if force: + shutil.rmtree(config_dir) + else: + return f"Config dir {config_dir} already exists, not overwriting existing warnet without --force" + wn = Warnet.from_graph_file(graph_file, config_dir, network) def thread_start(wn): try: wn.write_bitcoin_confs() wn.write_docker_compose() wn.write_prometheus_config() - wn.docker_compose_up() + wn.docker_compose_build_up() + wn.generate_zone_file_from_tanks() + wn.apply_zone_file() wn.apply_network_conditions() wn.connect_edges() - logger.info(f"Created warnet named '{network}' from graph file {graph_file}") + logger.info( + f"Created warnet named '{network}' from graph file {graph_file}" + ) except Exception as e: logger.error(f"Exception {e}") @@ -156,47 +292,55 @@ def thread_start(wn): return f"Starting warnet network named '{network}' with the following parameters:\n{wn}" +@jsonrpc.method() +def update_dns_seeder(graph_file: str, network: str = "warnet") -> str: + try: + config_dir = gen_config_dir(network) + wn = Warnet.from_graph_file(graph_file, config_dir, network) + wn.generate_zone_file_from_tanks() + wn.apply_zone_file() + with open(wn.zone_file_path, "r") as f: + zone_file = f.read() + + return f"DNS seeder update using zone file:\n{zone_file}" + except Exception as e: + return f"DNS seeder not updated due to exception: {e}" + + @jsonrpc.method() def generate_compose(graph_file: str, network: str = "warnet") -> str: """ Generate the docker compose file for a graph file and return import """ - wn = Warnet.from_graph_file(graph_file, network) + config_dir = gen_config_dir(network) + if config_dir.exists(): + return ( + f"Config dir {config_dir} already exists, not overwriting existing warnet" + ) + wn = Warnet.from_graph_file(graph_file, config_dir, network) wn.write_bitcoin_confs() wn.write_docker_compose() - docker_compose_path = wn.tmpdir / "docker-compose.yml" + docker_compose_path = wn.config_dir / "docker-compose.yml" with open(docker_compose_path, "r") as f: return f.read() -@jsonrpc.method("stop") -def stop(network: str = "warnet") -> str: +@jsonrpc.method("down") +def down(network: str = "warnet") -> str: """ Stop all docker containers in . """ try: - _ = stop_network(network) + _ = compose_down(network) return "Stopping warnet" except Exception as e: return f"Exception {e}" -@jsonrpc.method("remove") -def remove(network: str = "warnet") -> str: - """ - Stop and then erase all docker containers in . - """ - try: - remove_network(network) - return "Stopping and wiping warnet" - except Exception as e: - return f"Exception {e}" - - -@jsonrpc.method("stop_daemon") -def stop_daemon() -> str: +@jsonrpc.method("stop") +def stop() -> str: """ - Stop the daemon. + Stop warnetd. """ os.kill(os.getppid(), signal.SIGTERM) return "Stopping daemon..." @@ -206,8 +350,13 @@ def run_gunicorn(): """ Run the RPC server using gunicorn WSGI HTTP server """ - parser = argparse.ArgumentParser(description='Run the Warnet RPC server.') - parser.add_argument('--no-daemon', default=False, action='store_true', help='Run server in the foreground instead of daemon mode.') + parser = argparse.ArgumentParser(description="Run the Warnet RPC server.") + parser.add_argument( + "--daemon", + default=False, + action="store_true", + help="Run server in the background.", + ) args = parser.parse_args() command = [ @@ -221,15 +370,17 @@ def run_gunicorn(): ] # If in daemon mode, log to file and add daemon argument - if not args.no_daemon: - command.extend([ - "--daemon", - "--access-logfile", - log_file_path, - "--error-logfile", - log_file_path, - ]) - + if args.daemon: + command.extend( + [ + "--daemon", + "--access-logfile", + LOG_FILE_PATH, + "--error-logfile", + LOG_FILE_PATH, + ] + ) + subprocess.run(command)