diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index acac68266..af84234a7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,11 +44,15 @@ jobs: - dag_connection_test.py - graph_test.py - logging_test.py + - ln_basic_test.py + - ln_test.py - rpc_test.py - services_test.py - signet_test.py + - simln_test.py - scenarios_test.py - namespace_admin_test.py + - wargames_test.py steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 diff --git a/.gitignore b/.gitignore index f4b5d0076..35e704c67 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ warnet.egg-info dist/ build/ **/kubeconfigs/ +src/warnet/_version.py diff --git a/README.md b/README.md index 51199990e..d2301ad96 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ Monitor and analyze the emergent behaviors of Bitcoin networks. - [Installation](/docs/install.md) - [CLI Commands](/docs/warnet.md) - [Network configuration with yaml files](/docs/config.md) +- [Plugins](/docs/plugins.md) - [Scenarios](/docs/scenarios.md) - [Monitoring](/docs/logging_monitoring.md) - [Snapshots](/docs/snapshots.md) diff --git a/docker-bake.hcl b/docker-bake.hcl new file mode 100644 index 000000000..49db08eaa --- /dev/null +++ b/docker-bake.hcl @@ -0,0 +1,238 @@ +group "all" { + targets = [ + "bitcoin-28", + "bitcoin-27", + "bitcoin-26", + "v0-21-1", + "v0-20-0", + "v0-19-2", + "v0-17-0", + "v0-16-1", + "bitcoin-unknown-message", + "bitcoin-invalid-blocks", + "bitcoin-50-orphans", + "bitcoin-no-mp-trim", + "bitcoin-disabled-opcodes", + "bitcoin-5k-inv" + ] +} + +group "maintained" { + targets = [ + "bitcoin-28", + "bitcoin-27", + "bitcoin-26" + ] +} + +group "practice" { + targets = [ + "bitcoin-unknown-message", + "bitcoin-invalid-blocks", + "bitcoin-50-orphans", + "bitcoin-no-mp-trim", + "bitcoin-disabled-opcodes", + "bitcoin-5k-inv" + ] +} + +group "vulnerable" { + targets = [ + "v0-21-1", + "v0-20-0", + "v0-19-2", + "v0-17-0", + "v0-16-1", + ] +} + +target "maintained-base" { + context = "./resources/images/bitcoin" + args = { + REPO = "bitcoin/bitcoin" + BUILD_ARGS = "--disable-tests --without-gui --disable-bench --disable-fuzz-binary --enable-suppress-external-warnings" + } + platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7"] +} + +target "cmake-base" { + inherits = ["maintained-base"] + dockerfile = "./Dockerfile.dev" + args = { + BUILD_ARGS = "-DBUILD_TESTS=OFF -DBUILD_GUI=OFF -DBUILD_BENCH=OFF -DBUILD_FUZZ_BINARY=OFF -DWITH_ZMQ=ON" + } +} + +target "autogen-base" { + inherits = ["maintained-base"] + dockerfile = "./Dockerfile" +} + +target "bitcoin-master" { + inherits = ["cmake-base"] + tags = ["bitcoindevproject/bitcoin:28.1"] + args = { + COMMIT_SHA = "bd0ee07310c3dcdd08633c69eac330e2e567b235" + } +} + +target "bitcoin-28" { + inherits = ["autogen-base"] + tags = ["bitcoindevproject/bitcoin:28.0"] + args = { + COMMIT_SHA = "110183746150428e6385880c79f8c5733b1361ba" + } +} + +target "bitcoin-27" { + inherits = ["autogen-base"] + tags = ["bitcoindevproject/bitcoin:27.2"] + args = { + COMMIT_SHA = "bf03c458e994abab9be85486ed8a6d8813313579" + } +} + +target "bitcoin-26" { + inherits = ["autogen-base"] + tags = ["bitcoindevproject/bitcoin:26.2"] + args = { + COMMIT_SHA = "7b7041019ba5e7df7bde1416aa6916414a04f3db" + } +} + +target "practice-base" { + dockerfile = "./Dockerfile" + context = "./resources/images/bitcoin/insecure" + contexts = { + bitcoin-src = "." + } + args = { + ALPINE_VERSION = "3.20" + BITCOIN_VERSION = "28.1.1" + EXTRA_PACKAGES = "sqlite-dev" + EXTRA_RUNTIME_PACKAGES = "" + REPO = "willcl-ark/bitcoin" + } + platforms = ["linux/amd64", "linux/armhf"] +} + +target "bitcoin-unknown-message" { + inherits = ["practice-base"] + tags = ["bitcoindevproject/bitcoin:99.0.0-unknown-message"] + args = { + COMMIT_SHA = "ae999611026e941eca5c0b61f22012c3b3f3d8dc" + } +} + +target "bitcoin-invalid-blocks" { + inherits = ["practice-base"] + tags = ["bitcoindevproject/bitcoin:98.0.0-invalid-blocks"] + args = { + COMMIT_SHA = "9713324368e5a966ec330389a533ae8ad7a0ea8f" + } +} + +target "bitcoin-50-orphans" { + inherits = ["practice-base"] + tags = ["bitcoindevproject/bitcoin:97.0.0-50-orphans"] + args = { + COMMIT_SHA = "cbcb308eb29621c0db3a105e1a1c1788fb0dab6b" + } +} + +target "bitcoin-no-mp-trim" { + inherits = ["practice-base"] + tags = ["bitcoindevproject/bitcoin:96.0.0-no-mp-trim"] + args = { + COMMIT_SHA = "a3a15a9a06dd541d1dafba068c00eedf07e1d5f8" + } +} + +target "bitcoin-disabled-opcodes" { + inherits = ["practice-base"] + tags = ["bitcoindevproject/bitcoin:95.0.0-disabled-opcodes"] + args = { + COMMIT_SHA = "5bdb8c52a8612cac9aa928c84a499dd701542b2a" + } +} + +target "bitcoin-5k-inv" { + inherits = ["practice-base"] + tags = ["bitcoindevproject/bitcoin:94.0.0-5k-inv"] + args = { + COMMIT_SHA = "e70e610e07eea3aeb0c49ae0bd9f4049ffc1b88c" + } +} + +target "CVE-base" { + dockerfile = "./Dockerfile" + context = "./resources/images/bitcoin/insecure" + contexts = { + bitcoin-src = "." + } + platforms = ["linux/amd64", "linux/armhf"] + args = { + REPO = "josibake/bitcoin" + } +} + +target "v0-16-1" { + inherits = ["CVE-base"] + tags = ["bitcoindevproject/bitcoin:0.16.1"] + args = { + ALPINE_VERSION = "3.7" + BITCOIN_VERSION = "0.16.1" + COMMIT_SHA = "dc94c00e58c60412a4e1a540abdf0b56093179e8" + EXTRA_PACKAGES = "protobuf-dev libressl-dev" + EXTRA_RUNTIME_PACKAGES = "boost boost-program_options libressl" + PRE_CONFIGURE_COMMANDS = "sed -i '/AC_PREREQ/a\\AR_FLAGS=cr' src/univalue/configure.ac && sed -i '/AX_PROG_CC_FOR_BUILD/a\\AR_FLAGS=cr' src/secp256k1/configure.ac && sed -i 's:sys/fcntl.h:fcntl.h:' src/compat.h" + } +} + +target "v0-17-0" { + inherits = ["CVE-base"] + tags = ["bitcoindevproject/bitcoin:0.17.0"] + args = { + ALPINE_VERSION = "3.9" + BITCOIN_VERSION = "0.17.0" + COMMIT_SHA = "f6b2db49a707e7ad433d958aee25ce561c66521a" + EXTRA_PACKAGES = "protobuf-dev libressl-dev" + EXTRA_RUNTIME_PACKAGES = "boost boost-program_options libressl sqlite-dev" + } +} + +target "v0-19-2" { + inherits = ["CVE-base"] + tags = ["bitcoindevproject/bitcoin:0.19.2"] + args = { + ALPINE_VERSION = "3.12.12" + BITCOIN_VERSION = "0.19.2" + COMMIT_SHA = "e20f83eb5466a7d68227af14a9d0cf66fb520ffc" + EXTRA_PACKAGES = "sqlite-dev libressl-dev" + EXTRA_RUNTIME_PACKAGES = "boost boost-program_options libressl sqlite-dev" + } +} + +target "v0-20-0" { + inherits = ["CVE-base"] + tags = ["bitcoindevproject/bitcoin:0.20.0"] + args = { + ALPINE_VERSION = "3.12.12" + BITCOIN_VERSION = "0.20.0" + COMMIT_SHA = "0bbff8feff0acf1693dfe41184d9a4fd52001d3f" + EXTRA_PACKAGES = "sqlite-dev miniupnpc-dev" + EXTRA_RUNTIME_PACKAGES = "boost-filesystem miniupnpc-dev sqlite-dev" + } +} + +target "v0-21-1" { + inherits = ["CVE-base"] + tags = ["bitcoindevproject/bitcoin:0.21.1"] + args = { + ALPINE_VERSION = "3.17" + BITCOIN_VERSION = "0.21.1" + COMMIT_SHA = "e0a22f14c15b4877ef6221f9ee2dfe510092d734" + EXTRA_PACKAGES = "sqlite-dev" + EXTRA_RUNTIME_PACKAGES = "boost-filesystem sqlite-dev" + } +} diff --git a/docs/developer-notes.md b/docs/developer-notes.md index 061336d9a..a6de1b9aa 100644 --- a/docs/developer-notes.md +++ b/docs/developer-notes.md @@ -72,3 +72,20 @@ python3 -m build # Upload to Pypi python3 -m twine upload dist/* ``` + +## Building docker images + +The Bitcoin Core docker images used by warnet are specified in the *docker-bake.hcl* file. +This uses the (experimental) `bake` build functionality of docker buildx. +We use [HCL language](https://github.com/hashicorp/hcl) in the declaration file itself. +See the `bake` [documentation](https://docs.docker.com/build/bake/) for more information on specifications, and how to e.g. override arguments. + +In order to build (or "bake") a certain image, find the image's target (name) in the *docker-bake.hcl* file, and then run `docker buildx bake `. + +```bash +# build the dummy image that will crash on 5k invs +docker buildx bake bitcoin-5k-inv + +# build the same image, but set platform to only linux/amd64 +docker buildx bake bitcoin-5k-inv --set bitcoin-5k-inv.platform=linux/amd64 +``` diff --git a/docs/logging_monitoring.md b/docs/logging_monitoring.md index 9674979ad..f5bc705fa 100644 --- a/docs/logging_monitoring.md +++ b/docs/logging_monitoring.md @@ -96,7 +96,7 @@ For example, the default metrics listed above would be explicitly configured as nodes: - name: tank-0000 metricsExport: true - metrics: blocks=getblockcount() inbounds=getnetworkinfo()["connections_in"] outbounds=getnetworkinfo()["connections_in"] mempool_size=getmempoolinfo()["size"] + metrics: blocks=getblockcount() inbounds=getnetworkinfo()["connections_in"] outbounds=getnetworkinfo()["connections_out"] mempool_size=getmempoolinfo()["size"] ``` The data can be retrieved directly from the Prometheus exporter container in the tank pod via port `9332`, example: @@ -108,7 +108,7 @@ blocks 704.0 # HELP inbounds getnetworkinfo()["connections_in"] # TYPE inbounds gauge inbounds 0.0 -# HELP outbounds getnetworkinfo()["connections_in"] +# HELP outbounds getnetworkinfo()["connections_out"] # TYPE outbounds gauge outbounds 0.0 # HELP mempool_size getmempoolinfo()["size"] diff --git a/docs/plugins.md b/docs/plugins.md new file mode 100644 index 000000000..bce833864 --- /dev/null +++ b/docs/plugins.md @@ -0,0 +1,72 @@ +# Plugins + +Plugins extend Warnet. Plugin authors can import commands from Warnet and interact with the kubernetes cluster, and plugin users can run plugins from the command line or from the `network.yaml` file. + +## Activating plugins from `network.yaml` + +You can activate a plugin command by placing it in the `plugins` section at the bottom of each `network.yaml` file like so: + +````yaml +nodes: + <> + +plugins: # This marks the beginning of the plugin section + preDeploy: # This is a hook. This particular hook will call plugins before deploying anything else. + hello: # This is the name of the plugin. + entrypoint: "../plugins/hello" # Every plugin must specify a path to its entrypoint. + podName: "hello-pre-deploy" # Plugins can have their own particular configurations, such as how to name a pod. + helloTo: "preDeploy!" # This configuration tells the hello plugin who to say "hello" to. +```` + +## Many kinds of hooks +There are many hooks to the Warnet `deploy` command. The example below specifies them: + +````yaml +nodes: + <> + +plugins: + preDeploy: # Plugins will run before any other `deploy` code. + hello: + entrypoint: "../plugins/hello" + podName: "hello-pre-deploy" + helloTo: "preDeploy!" + postDeploy: # Plugins will run after all the `deploy` code has run. + simln: + entrypoint: "../plugins/simln" + activity: '[{"source": "tank-0003-ln", "destination": "tank-0005-ln", "interval_secs": 1, "amount_msat": 2000}]' + hello: + entrypoint: "../plugins/hello" + podName: "hello-post-deploy" + helloTo: "postDeploy!" + preNode: # Plugins will run before `deploy` launches a node (once per node). + hello: + entrypoint: "../plugins/hello" + helloTo: "preNode!" + postNode: # Plugins will run after `deploy` launches a node (once per node). + hello: + entrypoint: "../plugins/hello" + helloTo: "postNode!" + preNetwork: # Plugins will run before `deploy` launches the network (essentially between logging and when nodes are deployed) + hello: + entrypoint: "../plugins/hello" + helloTo: "preNetwork!" + podName: "hello-pre-network" + postNetwork: # Plugins will run after the network deploy threads have been joined. + hello: + entrypoint: "../plugins/hello" + helloTo: "postNetwork!" + podName: "hello-post-network" +```` + +Warnet will execute these plugin commands during each invocation of `warnet deploy`. + + + +## A "hello" example + +To get started with an example plugin, review the `README` of the `hello` plugin found in any initialized Warnet directory: + +1. `warnet init` +2. `cd plugins/hello/` + diff --git a/docs/warnet.md b/docs/warnet.md index 8c00840c8..26b78dd4c 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -48,6 +48,15 @@ options: |--------|--------|------------|-----------| | force | Bool | | False | +### `warnet import-network` +Create a network from an imported lightning network graph JSON + +options: +| name | type | required | default | +|-----------------|--------|------------|-----------| +| graph_file_path | Path | yes | | +| output_path | Path | yes | | + ### `warnet init` Initialize a warnet project in the current directory @@ -81,6 +90,7 @@ options: | debug | Bool | | False | | source_dir | Path | | | | additional_args | String | | | +| admin | Bool | | False | | namespace | String | | | ### `warnet setup` @@ -110,6 +120,10 @@ options: |---------------|--------|------------|-----------| | scenario_name | String | | | +### `warnet version` +Display the installed version of warnet + + ## Admin ### `warnet admin create-kubeconfigs` @@ -192,4 +206,33 @@ options: | arches | String | | | | action | String | | "load" | +## Ln + +### `warnet ln host` +Get lightning node host from \ + +options: +| name | type | required | default | +|--------|--------|------------|-----------| +| pod | String | yes | | + +### `warnet ln pubkey` +Get lightning node pub key from \ + +options: +| name | type | required | default | +|--------|--------|------------|-----------| +| pod | String | yes | | + +### `warnet ln rpc` +Call lightning cli rpc \ on \ + +options: +| name | type | required | default | +|-----------|--------|------------|-----------| +| pod | String | yes | | +| method | String | yes | | +| params | String | | | +| namespace | String | | | + diff --git a/pyproject.toml b/pyproject.toml index b0f2320b0..48d5d37f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "warnet" -version = "1.1.9" +dynamic = ["version"] description = "Monitor and analyze the emergent behaviours of bitcoin networks" readme = "README.md" requires-python = ">=3.9" @@ -34,7 +34,7 @@ warcli = "warnet.main:cli" [project.urls] Homepage = "https://warnet.dev" -GitHub = "https://github.com/bitcoindevproject/warnet" +GitHub = "https://github.com/bitcoin-dev-project/warnet" Pypi = "https://pypi.org/project/warnet/" [project.optional-dependencies] @@ -56,3 +56,8 @@ include = ["warnet*", "test_framework*", "resources*"] [tool.setuptools.package-data] "resources" = ["**/*"] + +[tool.setuptools_scm] +write_to = "src/warnet/_version.py" +version_scheme = "no-guess-dev" +local_scheme = "node-and-date" diff --git a/resources/charts/bitcoincore/Chart.yaml b/resources/charts/bitcoincore/Chart.yaml index f99064472..4feb6e32e 100644 --- a/resources/charts/bitcoincore/Chart.yaml +++ b/resources/charts/bitcoincore/Chart.yaml @@ -2,6 +2,11 @@ apiVersion: v2 name: bitcoincore description: A Helm chart for Bitcoin Core +dependencies: + - name: lnd + version: 0.1.0 + condition: ln.lnd + # A chart can be either an 'application' or a 'library' chart. # # Application charts are a collection of templates that can be packaged into versioned archives diff --git a/resources/charts/bitcoincore/charts/lnd/Chart.yaml b/resources/charts/bitcoincore/charts/lnd/Chart.yaml new file mode 100644 index 000000000..b77eb714a --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/Chart.yaml @@ -0,0 +1,25 @@ +apiVersion: v2 +name: lnd + +description: A Helm chart for LND + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 0.1.0 diff --git a/resources/charts/bitcoincore/charts/lnd/templates/_helpers.tpl b/resources/charts/bitcoincore/charts/lnd/templates/_helpers.tpl new file mode 100644 index 000000000..de7c0c156 --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/templates/_helpers.tpl @@ -0,0 +1,78 @@ +{{/* +Expand the name of the PARENT chart. +*/}} +{{- define "bitcoincore.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified PARENT app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "bitcoincore.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + + +{{/* +Expand the name of the chart. +*/}} +{{- define "lnd.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}-ln +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "lnd.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" }}-ln +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "lnd.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "lnd.labels" -}} +helm.sh/chart: {{ include "lnd.chart" . }} +{{ include "lnd.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "lnd.selectorLabels" -}} +app.kubernetes.io/name: {{ include "lnd.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "lnd.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "lnd.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/resources/charts/bitcoincore/charts/lnd/templates/configmap.yaml b/resources/charts/bitcoincore/charts/lnd/templates/configmap.yaml new file mode 100644 index 000000000..65cd54cd6 --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "lnd.fullname" . }} + labels: + {{- include "lnd.labels" . | nindent 4 }} +data: + lnd.conf: | + {{- .Values.baseConfig | nindent 4 }} + {{- .Values.defaultConfig | nindent 4 }} + {{- .Values.config | nindent 4 }} + bitcoin.{{ .Values.global.chain }}=1 + bitcoind.rpcpass={{ .Values.global.rpcpassword }} + bitcoind.rpchost={{ include "bitcoincore.fullname" . }}:{{ index .Values.global .Values.global.chain "RPCPort" }} + bitcoind.zmqpubrawblock=tcp://{{ include "bitcoincore.fullname" . }}:{{ .Values.global.ZMQBlockPort }} + bitcoind.zmqpubrawtx=tcp://{{ include "bitcoincore.fullname" . }}:{{ .Values.global.ZMQTxPort }} + alias={{ include "lnd.fullname" . }} + externalhosts={{ include "lnd.fullname" . }} + tlsextradomain={{ include "lnd.fullname" . }} + tls.cert: | + -----BEGIN CERTIFICATE----- + MIIB8TCCAZagAwIBAgIUJDsR6mmY+TaO9pCfjtotlbOkzJMwCgYIKoZIzj0EAwIw + MjEfMB0GA1UECgwWbG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2Fy + bmV0MB4XDTI0MTExMTE2NTM1MFoXDTM0MTEwOTE2NTM1MFowMjEfMB0GA1UECgwW + bG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2FybmV0MFkwEwYHKoZI + zj0CAQYIKoZIzj0DAQcDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLP + tp0fxE7hmteSt6gjQriy90fP8j9OJXBNAjt915kLY4zVvqOBiTCBhjAOBgNVHQ8B + Af8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAd + BgNVHQ4EFgQU5d8QMrwhLgTkDjWA+eXZGz+dybUwLwYDVR0RBCgwJoIJbG9jYWxo + b3N0ggEqhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMAoGCCqGSM49BAMCA0kAMEYC + IQDPofN0fEl5gTwCYhk3nZbjMqJhZ8BsSJ6K8XRhxr7zbwIhAPsgQCFOqUWg632O + NEO53OQ6CIqnpxSskjsFNH4ZBQOE + -----END CERTIFICATE----- + tls.key: | + -----BEGIN EC PRIVATE KEY----- + MHcCAQEEIIcFtWTLQv5JaRRxdkPKkO98OrvgeztbZ7h8Ev/4UbE4oAoGCCqGSM49 + AwEHoUQDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLPtp0fxE7hmteS + t6gjQriy90fP8j9OJXBNAjt915kLY4zVvg== + -----END EC PRIVATE KEY----- + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "lnd.fullname" . }}-channels + labels: + channels: "true" + {{- include "lnd.labels" . | nindent 4 }} +data: + source: {{ include "lnd.fullname" . }} + channels: | + {{ .Values.channels | toJson }} diff --git a/resources/charts/bitcoincore/charts/lnd/templates/pod.yaml b/resources/charts/bitcoincore/charts/lnd/templates/pod.yaml new file mode 100644 index 000000000..e3b9782d7 --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/templates/pod.yaml @@ -0,0 +1,82 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "lnd.fullname" . }} + labels: + {{- include "lnd.labels" . | nindent 4 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + app: {{ include "lnd.fullname" . }} + {{- if .Values.collectLogs }} + collect_logs: "true" + {{- end }} + chain: {{ .Values.global.chain }} +spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 4 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 4 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 8 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: rpc + containerPort: {{ .Values.RPCPort }} + protocol: TCP + - name: p2p + containerPort: {{ .Values.P2PPort }} + protocol: TCP + - name: rest + containerPort: {{ .Values.RestPort }} + protocol: TCP + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 8 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 8 }} + startupProbe: + {{- toYaml .Values.startupProbe | nindent 8 }} + resources: + {{- toYaml .Values.resources | nindent 8 }} + volumeMounts: + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} + - mountPath: /root/.lnd/lnd.conf + name: config + subPath: lnd.conf + - mountPath: /root/.lnd/tls.key + name: config + subPath: tls.key + - mountPath: /root/.lnd/tls.cert + name: config + subPath: tls.cert + {{- if .Values.circuitBreaker }} + - name: circuitbreaker + image: pinheadmz/circuitbreaker:278737d + imagePullPolicy: IfNotPresent + {{- end}} + volumes: + {{- with .Values.volumes }} + {{- toYaml . | nindent 4 }} + {{- end }} + - configMap: + name: {{ include "lnd.fullname" . }} + name: config + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/resources/charts/bitcoincore/charts/lnd/templates/service.yaml b/resources/charts/bitcoincore/charts/lnd/templates/service.yaml new file mode 100644 index 000000000..51826ee9b --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "lnd.fullname" . }} + labels: + {{- include "lnd.labels" . | nindent 4 }} + app: {{ include "lnd.fullname" . }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.RPCPort }} + targetPort: rpc + protocol: TCP + name: rpc + - port: {{ .Values.P2PPort }} + targetPort: p2p + protocol: TCP + name: p2p + - port: {{ .Values.RestPort }} + targetPort: rest + protocol: TCP + name: rest + selector: + {{- include "lnd.selectorLabels" . | nindent 4 }} diff --git a/resources/charts/bitcoincore/charts/lnd/values.yaml b/resources/charts/bitcoincore/charts/lnd/values.yaml new file mode 100644 index 000000000..d56e65bf4 --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/values.yaml @@ -0,0 +1,134 @@ +# Default values for lnd. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +namespace: warnet + +image: + repository: lightninglabs/lnd + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "v0.18.3-beta" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +podLabels: + app: "warnet" + mission: "lightning" + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + +RPCPort: 10009 +P2PPort: 9735 +RestPort: 8080 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +livenessProbe: + exec: + command: + - pidof + - lnd + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 +readinessProbe: + failureThreshold: 1 + periodSeconds: 1 + successThreshold: 1 + tcpSocket: + port: 10009 + timeoutSeconds: 1 +startupProbe: + failureThreshold: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 60 + exec: + command: + - /bin/sh + - -c + - | + PHRASE=`curl --silent --insecure https://localhost:8080/v1/genseed | grep -o '\[[^]]*\]'` + curl --insecure https://localhost:8080/v1/initwallet --data "{\"macaroon_root_key\":\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=\", \"wallet_password\":\"AAAAAAAAAAA=\", \"cipher_seed_mnemonic\": $PHRASE}" + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +baseConfig: | + norest=false + restlisten=0.0.0.0:8080 + debuglevel=debug + accept-keysend=true + bitcoin.active=true + bitcoin.node=bitcoind + maxpendingchannels=64 + trickledelay=1 + rpclisten=0.0.0.0:10009 + bitcoind.rpcuser=user + protocol.wumbo-channels=1 + # zmq* and bitcoind.rpcpass are set in configmap.yaml + +config: "" + +defaultConfig: "" + +channels: [] diff --git a/resources/charts/bitcoincore/templates/_helpers.tpl b/resources/charts/bitcoincore/templates/_helpers.tpl index 26258b5de..81ab85a37 100644 --- a/resources/charts/bitcoincore/templates/_helpers.tpl +++ b/resources/charts/bitcoincore/templates/_helpers.tpl @@ -65,6 +65,6 @@ Always add for custom semver, check version for valid semver {{- $custom := contains "-" .Values.image.tag -}} {{- $newer := semverCompare ">=0.17.0" .Values.image.tag -}} {{- if or $newer $custom -}} -[{{ .Values.chain }}] +[{{ .Values.global.chain }}] {{- end -}} {{- end -}} diff --git a/resources/charts/bitcoincore/templates/configmap.yaml b/resources/charts/bitcoincore/templates/configmap.yaml index 36c5ab389..cc1e580f2 100644 --- a/resources/charts/bitcoincore/templates/configmap.yaml +++ b/resources/charts/bitcoincore/templates/configmap.yaml @@ -6,14 +6,14 @@ metadata: {{- include "bitcoincore.labels" . | nindent 4 }} data: bitcoin.conf: | - {{ .Values.chain }}=1 + {{ .Values.global.chain }}=1 {{ template "bitcoincore.check_semver" . }} {{- .Values.baseConfig | nindent 4 }} - rpcport={{ index .Values .Values.chain "RPCPort" }} - rpcpassword={{ .Values.rpcpassword }} - zmqpubrawblock=tcp://0.0.0.0:{{ .Values.ZMQBlockPort }} - zmqpubrawtx=tcp://0.0.0.0:{{ .Values.ZMQTxPort }} + rpcport={{ index .Values.global .Values.global.chain "RPCPort" }} + rpcpassword={{ .Values.global.rpcpassword }} + zmqpubrawblock=tcp://0.0.0.0:{{ .Values.global.ZMQBlockPort }} + zmqpubrawtx=tcp://0.0.0.0:{{ .Values.global.ZMQTxPort }} {{- .Values.defaultConfig | nindent 4 }} {{- .Values.config | nindent 4 }} {{- range .Values.addnode }} diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index d7076e6e9..56cd61958 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -7,9 +7,11 @@ metadata: {{- with .Values.podLabels }} {{- toYaml . | nindent 4 }} {{- end }} - chain: {{ .Values.chain }} - RPCPort: "{{ index .Values .Values.chain "RPCPort" }}" - rpcpassword: {{ .Values.rpcpassword }} + chain: {{ .Values.global.chain }} + RPCPort: "{{ index .Values.global .Values.global.chain "RPCPort" }}" + ZMQTxPort: "{{ .Values.global.ZMQTxPort }}" + ZMQBlockPort: "{{ .Values.global.ZMQBlockPort }}" + rpcpassword: {{ .Values.global.rpcpassword }} app: {{ include "bitcoincore.fullname" . }} {{- if .Values.collectLogs }} collect_logs: "true" @@ -32,8 +34,8 @@ spec: args: - | apk add --no-cache curl - mkdir -p /root/.bitcoin/{{ .Values.chain }} - curl -L {{ .Values.loadSnapshot.url }} | tar -xz -C /root/.bitcoin/{{ .Values.chain }} + mkdir -p /root/.bitcoin/{{ .Values.global.chain }} + curl -L {{ .Values.loadSnapshot.url }} | tar -xz -C /root/.bitcoin/{{ .Values.global.chain }} volumeMounts: - name: data mountPath: /root/.bitcoin @@ -46,23 +48,23 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: rpc - containerPort: {{ index .Values .Values.chain "RPCPort" }} + containerPort: {{ index .Values.global .Values.global.chain "RPCPort" }} protocol: TCP - name: p2p - containerPort: {{ index .Values .Values.chain "P2PPort" }} + containerPort: {{ index .Values.global .Values.global.chain "P2PPort" }} protocol: TCP - name: zmq-tx - containerPort: {{ .Values.ZMQTxPort }} + containerPort: {{ .Values.global.ZMQTxPort }} protocol: TCP - name: zmq-block - containerPort: {{ .Values.ZMQBlockPort }} + containerPort: {{ .Values.global.ZMQBlockPort }} protocol: TCP livenessProbe: {{- toYaml .Values.livenessProbe | nindent 8 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 8 }} tcpSocket: - port: {{ index .Values .Values.chain "RPCPort" }} + port: {{ index .Values.global .Values.global.chain "RPCPort" }} resources: {{- toYaml .Values.resources | nindent 8 }} volumeMounts: @@ -86,11 +88,11 @@ spec: - name: BITCOIN_RPC_HOST value: "127.0.0.1" - name: BITCOIN_RPC_PORT - value: "{{ index .Values .Values.chain "RPCPort" }}" + value: "{{ index .Values.global .Values.global.chain "RPCPort" }}" - name: BITCOIN_RPC_USER value: user - name: BITCOIN_RPC_PASSWORD - value: {{ .Values.rpcpassword }} + value: {{ .Values.global.rpcpassword }} {{- if .Values.metrics }} - name: METRICS value: {{ .Values.metrics }} diff --git a/resources/charts/bitcoincore/templates/service.yaml b/resources/charts/bitcoincore/templates/service.yaml index f37c384ef..8d8fa5324 100644 --- a/resources/charts/bitcoincore/templates/service.yaml +++ b/resources/charts/bitcoincore/templates/service.yaml @@ -8,19 +8,19 @@ metadata: spec: type: {{ .Values.service.type }} ports: - - port: {{ index .Values .Values.chain "RPCPort" }} + - port: {{ index .Values.global .Values.global.chain "RPCPort" }} targetPort: rpc protocol: TCP name: rpc - - port: {{ index .Values .Values.chain "P2PPort" }} + - port: {{ index .Values.global .Values.global.chain "P2PPort" }} targetPort: p2p protocol: TCP name: p2p - - port: {{ .Values.ZMQTxPort }} + - port: {{ .Values.global.ZMQTxPort }} targetPort: zmq-tx protocol: TCP name: zmq-tx - - port: {{ .Values.ZMQBlockPort }} + - port: {{ .Values.global.ZMQBlockPort }} targetPort: zmq-block protocol: TCP name: zmq-block diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 6314ae32c..8c9f3215f 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -33,17 +33,6 @@ securityContext: {} service: type: ClusterIP -regtest: - RPCPort: 18443 - P2PPort: 18444 - -signet: - RPCPort: 38332 - P2PPort: 38333 - -ZMQTxPort: 28333 -ZMQBlockPort: 28332 - ingress: enabled: false className: "" @@ -109,12 +98,23 @@ tolerations: [] affinity: {} -chain: regtest - collectLogs: false metricsExport: false prometheusMetricsPort: 9332 +# These are values that are propogated to the sub-charts (i.e. lightning nodes) +global: + chain: regtest + regtest: + RPCPort: 18443 + P2PPort: 18444 + signet: + RPCPort: 38332 + P2PPort: 38333 + ZMQTxPort: 28333 + ZMQBlockPort: 28332 + rpcpassword: gn0cchi + baseConfig: | checkmempool=0 debuglogfile=debug.log @@ -130,8 +130,6 @@ baseConfig: | rest=1 # rpcport and zmq endpoints are configured by chain in configmap.yaml -rpcpassword: gn0cchi - config: "" defaultConfig: "" @@ -141,3 +139,6 @@ addnode: [] loadSnapshot: enabled: false url: "" + +ln: + lnd: false \ No newline at end of file diff --git a/resources/charts/commander/templates/pod.yaml b/resources/charts/commander/templates/pod.yaml index 1a9bb9310..0ad4583e1 100644 --- a/resources/charts/commander/templates/pod.yaml +++ b/resources/charts/commander/templates/pod.yaml @@ -23,7 +23,7 @@ spec: mountPath: /shared containers: - name: {{ .Chart.Name }} - image: python:3.12-slim + image: bitcoindevproject/commander imagePullPolicy: IfNotPresent command: ["/bin/sh", "-c"] args: @@ -35,3 +35,4 @@ spec: volumes: - name: shared-volume emptyDir: {} + serviceAccountName: {{ include "commander.fullname" . }} \ No newline at end of file diff --git a/resources/charts/commander/templates/rbac.yaml b/resources/charts/commander/templates/rbac.yaml new file mode 100644 index 000000000..365ec62ff --- /dev/null +++ b/resources/charts/commander/templates/rbac.yaml @@ -0,0 +1,65 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ .Chart.Name }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ .Chart.Name }} +rules: + - apiGroups: [""] + resources: ["pods", "configmaps"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ .Chart.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "commander.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- if .Values.admin }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ .Chart.Name }} +rules: + - apiGroups: [""] + resources: ["pods", "namespaces", "configmaps"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ .Chart.Name }} +roleRef: + kind: ClusterRole + name: {{ include "commander.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end}} \ No newline at end of file diff --git a/resources/charts/commander/values.yaml b/resources/charts/commander/values.yaml index 55ad80f7a..23ba35354 100644 --- a/resources/charts/commander/values.yaml +++ b/resources/charts/commander/values.yaml @@ -66,3 +66,5 @@ volumeMounts: [] port: args: "" + +admin: false \ No newline at end of file diff --git a/resources/charts/namespaces/values.yaml b/resources/charts/namespaces/values.yaml index 23ef66754..b68480705 100644 --- a/resources/charts/namespaces/values.yaml +++ b/resources/charts/namespaces/values.yaml @@ -7,13 +7,13 @@ roles: - name: pod-viewer rules: - apiGroups: [""] - resources: ["pods", "services"] + resources: ["pods", "services", "configmaps"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] verbs: ["get"] - apiGroups: [""] - resources: ["configmaps", "secrets"] + resources: ["secrets"] verbs: ["get", "list"] - apiGroups: [""] resources: ["persistentvolumeclaims", "namespaces"] @@ -33,7 +33,10 @@ roles: resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] verbs: ["get", "create"] - apiGroups: [""] - resources: ["configmaps", "secrets"] + resources: ["configmaps", "secrets", "serviceaccounts"] + verbs: ["get", "list", "create", "update", "watch"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] verbs: ["get", "list", "create", "update"] - apiGroups: [""] resources: ["persistentvolumeclaims", "namespaces"] diff --git a/resources/images/bitcoin/Dockerfile.dev b/resources/images/bitcoin/Dockerfile.dev new file mode 100644 index 000000000..d64b6aff2 --- /dev/null +++ b/resources/images/bitcoin/Dockerfile.dev @@ -0,0 +1,80 @@ +# Setup deps stage +FROM alpine AS deps +ARG REPO +ARG COMMIT_SHA +ARG BUILD_ARGS + +RUN --mount=type=cache,target=/var/cache/apk \ + sed -i 's/http\:\/\/dl-cdn.alpinelinux.org/https\:\/\/alpine.global.ssl.fastly.net/g' /etc/apk/repositories \ + && apk --no-cache add \ + cmake \ + python3 \ + boost-dev \ + build-base \ + chrpath \ + file \ + gnupg \ + git \ + libevent-dev \ + libressl \ + libtool \ + linux-headers \ + sqlite-dev \ + zeromq-dev + +COPY isroutable.patch /tmp/ +COPY addrman.patch /tmp/ + + +# Clone and patch and build stage +FROM deps AS build +ENV BITCOIN_PREFIX=/opt/bitcoin +WORKDIR /build + +RUN set -ex \ + && cd /build \ + && git clone --depth 1 "https://github.com/${REPO}" \ + && cd bitcoin \ + && git fetch --depth 1 origin "$COMMIT_SHA" \ + && git checkout "$COMMIT_SHA" \ + && git apply /tmp/isroutable.patch \ + && git apply /tmp/addrman.patch \ + && sed -i s:sys/fcntl.h:fcntl.h: src/compat/compat.h \ + && cmake -B build \ + -DCMAKE_INSTALL_PREFIX=${BITCOIN_PREFIX} \ + ${BUILD_ARGS} \ + && cmake --build build -j$(nproc) \ + && cmake --install build \ + && strip ${BITCOIN_PREFIX}/bin/bitcoin-cli \ + && strip ${BITCOIN_PREFIX}/bin/bitcoind \ + && rm -f ${BITCOIN_PREFIX}/lib/libbitcoinconsensus.a \ + && rm -f ${BITCOIN_PREFIX}/lib/libbitcoinconsensus.so.0.0.0 + +# Final clean stage +FROM alpine +ARG UID=100 +ARG GID=101 +ENV BITCOIN_DATA=/root/.bitcoin +ENV BITCOIN_PREFIX=/opt/bitcoin +ENV PATH=${BITCOIN_PREFIX}/bin:$PATH +LABEL maintainer.0="bitcoindevproject" + +RUN addgroup bitcoin --gid ${GID} --system \ + && adduser --uid ${UID} --system bitcoin --ingroup bitcoin +RUN --mount=type=cache,target=/var/cache/apk sed -i 's/http\:\/\/dl-cdn.alpinelinux.org/https\:\/\/alpine.global.ssl.fastly.net/g' /etc/apk/repositories \ + && apk --no-cache add \ + bash \ + libevent \ + libzmq \ + shadow \ + sqlite-dev \ + su-exec + +COPY --from=build /opt/bitcoin /usr/local +COPY entrypoint.sh / + +VOLUME ["/home/bitcoin/.bitcoin"] +EXPOSE 8332 8333 18332 18333 18443 18444 38333 38332 + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["bitcoind"] diff --git a/resources/images/bitcoin/insecure/Dockerfile b/resources/images/bitcoin/insecure/Dockerfile index 6f59a4c2e..ce6699872 100644 --- a/resources/images/bitcoin/insecure/Dockerfile +++ b/resources/images/bitcoin/insecure/Dockerfile @@ -48,7 +48,12 @@ RUN mkdir -p ${BERKELEYDB_PREFIX} WORKDIR /${BERKELEYDB_VERSION}/build_unix -RUN ../dist/configure --enable-cxx --disable-shared --with-pic --prefix=${BERKELEYDB_PREFIX} +ARG TARGETPLATFORM +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + ../dist/configure --enable-cxx --disable-shared --with-pic --prefix=${BERKELEYDB_PREFIX} --build=aarch64-unknown-linux-gnu; \ +else \ + ../dist/configure --enable-cxx --disable-shared --with-pic --prefix=${BERKELEYDB_PREFIX}; \ +fi RUN make -j$(nproc) RUN make install RUN rm -rf ${BERKELEYDB_PREFIX}/docs diff --git a/resources/images/bitcoin/insecure/build.md b/resources/images/bitcoin/insecure/build.md deleted file mode 100644 index a824a8316..000000000 --- a/resources/images/bitcoin/insecure/build.md +++ /dev/null @@ -1,198 +0,0 @@ -# Historic CVE images - -These images are for old versions of Bitcoin Core with known CVEs. These images have signet backported -and the addrman and isroutable patches applied. - -# Build incantations - -Run from top-level of project - -## v0.21.1 - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.17" \ - --build-arg BITCOIN_VERSION="0.21.1" \ - --build-arg EXTRA_PACKAGES="sqlite-dev" \ - --build-arg EXTRA_RUNTIME_PACKAGES="boost-filesystem sqlite-dev" \ - --build-arg REPO="josibake/bitcoin" \ - --build-arg COMMIT_SHA="e0a22f14c15b4877ef6221f9ee2dfe510092d734" \ - --tag bitcoindevproject/bitcoin:0.21.1 \ - resources/images/bitcoin/insecure -``` - -## v0.20.0 - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.12.12" \ - --build-arg BITCOIN_VERSION="0.20.0" \ - --build-arg EXTRA_PACKAGES="sqlite-dev miniupnpc" \ - --build-arg EXTRA_RUNTIME_PACKAGES="boost-filesystem sqlite-dev" \ - --build-arg REPO="josibake/bitcoin" \ - --build-arg COMMIT_SHA="0bbff8feff0acf1693dfe41184d9a4fd52001d3f" \ - --tag bitcoindevproject/bitcoin:0.20.0 \ - resources/images/bitcoin/insecure -``` - -## v0.19.2 - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.12.12" \ - --build-arg BITCOIN_VERSION="0.19.2" \ - --build-arg EXTRA_PACKAGES="sqlite-dev libressl-dev" \ - --build-arg EXTRA_RUNTIME_PACKAGES="boost-chrono boost-filesystem libressl sqlite-dev" \ - --build-arg REPO="josibake/bitcoin" \ - --build-arg COMMIT_SHA="e20f83eb5466a7d68227af14a9d0cf66fb520ffc" \ - --tag bitcoindevproject/bitcoin:0.19.2 \ - resources/images/bitcoin/insecure -``` - -## v0.17.0 - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.9" \ - --build-arg BITCOIN_VERSION="0.17.0" \ - --build-arg EXTRA_PACKAGES="protobuf-dev libressl-dev" \ - --build-arg EXTRA_RUNTIME_PACKAGES="boost boost-program_options libressl sqlite-dev" \ - --build-arg REPO="josibake/bitcoin" \ - --build-arg COMMIT_SHA="f6b2db49a707e7ad433d958aee25ce561c66521a" \ - --tag bitcoindevproject/bitcoin:0.17.0 \ - resources/images/bitcoin/insecure -``` - -## v0.16.1 - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.7" \ - --build-arg BITCOIN_VERSION="0.16.1" \ - --build-arg EXTRA_PACKAGES="protobuf-dev libressl-dev" \ - --build-arg PRE_CONFIGURE_COMMANDS="sed -i '/AC_PREREQ/a\AR_FLAGS=cr' src/univalue/configure.ac && sed -i '/AX_PROG_CC_FOR_BUILD/a\AR_FLAGS=cr' src/secp256k1/configure.ac && sed -i 's:sys/fcntl.h:fcntl.h:' src/compat.h" \ - --build-arg EXTRA_RUNTIME_PACKAGES="boost boost-program_options libressl" \ - --build-arg REPO="josibake/bitcoin" \ - --build-arg COMMIT_SHA="dc94c00e58c60412a4e1a540abdf0b56093179e8" \ - --tag bitcoindevproject/bitcoin:0.16.1 \ - resources/images/bitcoin/insecure -``` - -## unknown p2p message crash - -Will crash when sent an "unknown" P2P message is received from a node using protocol version >= 70016 - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.20" \ - --build-arg BITCOIN_VERSION="28.1.1" \ - --build-arg EXTRA_PACKAGES="sqlite-dev" \ - --build-arg EXTRA_RUNTIME_PACKAGES="" \ - --build-arg REPO="willcl-ark/bitcoin" \ - --build-arg COMMIT_SHA="df1768325cca49bb867b7919675ae06c964b5ffa" \ - --tag bitcoindevproject/bitcoin:99.1.0-unknown-message \ - resources/images/bitcoin/insecure -``` - -## invalid blocks crash - -Will crash when sent an invalid block - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.20" \ - --build-arg BITCOIN_VERSION="28.1.1" \ - --build-arg EXTRA_PACKAGES="sqlite-dev" \ - --build-arg EXTRA_RUNTIME_PACKAGES="" \ - --build-arg REPO="willcl-ark/bitcoin" \ - --build-arg COMMIT_SHA="f72bc595fc762c7afcbd156f4f84bf48f7ff4fdb" \ - --tag bitcoindevproject/bitcoin:99.1.0-invalid-blocks \ - resources/images/bitcoin/insecure -``` - -## too many orphans crash - -Will crash when we have 50 orphans in the orphanage - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.20" \ - --build-arg BITCOIN_VERSION="28.1.1" \ - --build-arg EXTRA_PACKAGES="sqlite-dev" \ - --build-arg EXTRA_RUNTIME_PACKAGES="" \ - --build-arg REPO="willcl-ark/bitcoin" \ - --build-arg COMMIT_SHA="38aff9d695f5aa187fc3b75f08228248963372ee" \ - --tag bitcoindevproject/bitcoin:99.1.0-50-orphans \ - resources/images/bitcoin/insecure -``` - -## full mempool crash - -Will crash when we would normally trim the mempool size. -Mempool set to 50MB by default. - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.20" \ - --build-arg BITCOIN_VERSION="28.1.1" \ - --build-arg EXTRA_PACKAGES="sqlite-dev" \ - --build-arg EXTRA_RUNTIME_PACKAGES="" \ - --build-arg REPO="willcl-ark/bitcoin" \ - --build-arg COMMIT_SHA="d30f8112611c4732ccb01f0a0216eb7ed10e04a7" \ - --tag bitcoindevproject/bitcoin:99.1.0-no-mp-trim\ - resources/images/bitcoin/insecure -``` - -## disabled opcodes crash - -Will crash when processing a disabled opcode - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.20" \ - --build-arg BITCOIN_VERSION="28.1.1" \ - --build-arg EXTRA_PACKAGES="sqlite-dev" \ - --build-arg EXTRA_RUNTIME_PACKAGES="" \ - --build-arg REPO="willcl-ark/bitcoin" \ - --build-arg COMMIT_SHA="51e068ed42727eee08af62e09eb5789d8b910f61" \ - --tag bitcoindevproject/bitcoin:99.1.0-disabled-opcodes \ - resources/images/bitcoin/insecure -``` - -## crash when 5k inv messages received - -Will crash when we receive a total of 5k `INV` p2p messages are received from a single peer. - -```bash -docker buildx build \ - --platform linux/amd64,linux/armhf \ - --build-context bitcoin-src="." \ - --build-arg ALPINE_VERSION="3.20" \ - --build-arg BITCOIN_VERSION="28.1.1" \ - --build-arg EXTRA_PACKAGES="sqlite-dev" \ - --build-arg EXTRA_RUNTIME_PACKAGES="" \ - --build-arg REPO="willcl-ark/bitcoin" \ - --build-arg COMMIT_SHA="3e1ce7de0d19f791315fa87e0d29504ee0c80fe8" \ - --tag bitcoindevproject/bitcoin:99.1.0-5k-inv \ - resources/images/bitcoin/insecure -``` diff --git a/resources/images/commander/Dockerfile b/resources/images/commander/Dockerfile new file mode 100644 index 000000000..4a4744717 --- /dev/null +++ b/resources/images/commander/Dockerfile @@ -0,0 +1,5 @@ +# Use an official Python runtime as the base image +FROM python:3.12-slim + +# Python dependencies +RUN pip install --no-cache-dir kubernetes diff --git a/resources/images/exporter/bitcoin-exporter.py b/resources/images/exporter/bitcoin-exporter.py index 8b2cbec25..99c992207 100644 --- a/resources/images/exporter/bitcoin-exporter.py +++ b/resources/images/exporter/bitcoin-exporter.py @@ -28,7 +28,7 @@ def auth_proxy_request(self, method, path, postdata): # label=method(params)[return object key][...] METRICS = os.environ.get( "METRICS", - 'blocks=getblockcount() inbounds=getnetworkinfo()["connections_in"] outbounds=getnetworkinfo()["connections_in"] mempool_size=getmempoolinfo()["size"]', + 'blocks=getblockcount() inbounds=getnetworkinfo()["connections_in"] outbounds=getnetworkinfo()["connections_out"] mempool_size=getmempoolinfo()["size"]', ) # Set up bitcoind RPC client diff --git a/resources/networks/hello/network.yaml b/resources/networks/hello/network.yaml new file mode 100644 index 000000000..f5acf0a83 --- /dev/null +++ b/resources/networks/hello/network.yaml @@ -0,0 +1,87 @@ +nodes: + - name: tank-0000 + addnode: + - tank-0001 + ln: + lnd: true + + - name: tank-0001 + addnode: + - tank-0002 + ln: + lnd: true + + - name: tank-0002 + addnode: + - tank-0000 + ln: + lnd: true + + - name: tank-0003 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + config: | + bitcoin.timelockdelta=33 + channels: + - id: + block: 300 + index: 1 + target: tank-0004-ln + capacity: 100000 + push_amt: 50000 + + - name: tank-0004 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + channels: + - id: + block: 300 + index: 2 + target: tank-0005-ln + capacity: 50000 + push_amt: 25000 + + - name: tank-0005 + addnode: + - tank-0000 + ln: + lnd: true + +plugins: # Each plugin section has a number of hooks available (preDeploy, postDeploy, etc) + preDeploy: # For example, the preDeploy hook means it's plugin will run before all other deploy code + hello: + entrypoint: "../../plugins/hello" # This entrypoint path is relative to the network.yaml file + podName: "hello-pre-deploy" + helloTo: "preDeploy!" + postDeploy: + hello: + entrypoint: "../../plugins/hello" + podName: "hello-post-deploy" + helloTo: "postDeploy!" + simln: # You can have multiple plugins per hook + entrypoint: "../../plugins/simln" + activity: '[{"source": "tank-0003-ln", "destination": "tank-0005-ln", "interval_secs": 1, "amount_msat": 2000}]' + preNode: # preNode plugins run before each node is deployed + hello: + entrypoint: "../../plugins/hello" + helloTo: "preNode!" + postNode: + hello: + entrypoint: "../../plugins/hello" + helloTo: "postNode!" + preNetwork: + hello: + entrypoint: "../../plugins/hello" + helloTo: "preNetwork!" + podName: "hello-pre-network" + postNetwork: + hello: + entrypoint: "../../plugins/hello" + helloTo: "postNetwork!" + podName: "hello-post-network" diff --git a/resources/networks/hello/node-defaults.yaml b/resources/networks/hello/node-defaults.yaml new file mode 100644 index 000000000..24a00b5c8 --- /dev/null +++ b/resources/networks/hello/node-defaults.yaml @@ -0,0 +1,8 @@ +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: "27.0" + +lnd: + defaultConfig: | + color=#000000 diff --git a/resources/plugins/__init__.py b/resources/plugins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/resources/plugins/hello/README.md b/resources/plugins/hello/README.md new file mode 100644 index 000000000..77bb5040f --- /dev/null +++ b/resources/plugins/hello/README.md @@ -0,0 +1,124 @@ +# Hello Plugin + +## Hello World! +*Hello* is an example plugin to demonstrate the features of Warnet's plugin architecture. It uses each of the hooks available in the `warnet deploy` command (see the example below for details). + +## Usage +In your python virtual environment with Warnet installed and setup, create a new Warnet user folder (follow the prompts): + +`$ warnet new user_folder` + +`$ cd user_folder` + +Deploy the *hello* network. + +`$ warnet deploy networks/hello` + +While that is launching, take a look inside the `networks/hello/network.yaml` file. You can also see the copy below which includes commentary on the structure of plugins in the `network.yaml` file. + +Also, take a look at the `plugins/hello/plugin.py` file to see how plugins work and to find out how to author your own plugin. + +Once `deploy` completes, view the pods of the *hello* network by invoking `kubectl get all -A`. + +To view the various "Hello World!" messages, run `kubectl logs pod/POD_NAME` + +### A `network.yaml` example +When you initialize a new Warnet network, Warnet will create a new `network.yaml` file. You can modify these files to fit your needs. + +For example, the `network.yaml` file below includes the *hello* plugin, lightning nodes, and the *simln* plugin. + +
+network.yaml + +````yaml +nodes: + - name: tank-0000 + addnode: + - tank-0001 + ln: + lnd: true + + - name: tank-0001 + addnode: + - tank-0002 + ln: + lnd: true + + - name: tank-0002 + addnode: + - tank-0000 + ln: + lnd: true + + - name: tank-0003 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + config: | + bitcoin.timelockdelta=33 + channels: + - id: + block: 300 + index: 1 + target: tank-0004-ln + capacity: 100000 + push_amt: 50000 + + - name: tank-0004 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + channels: + - id: + block: 300 + index: 2 + target: tank-0005-ln + capacity: 50000 + push_amt: 25000 + + - name: tank-0005 + addnode: + - tank-0000 + ln: + lnd: true + +plugins: # Each plugin section has a number of hooks available (preDeploy, postDeploy, etc) + preDeploy: # For example, the preDeploy hook means it's plugin will run before all other deploy code + hello: + entrypoint: "../../plugins/hello" # This entrypoint path is relative to the network.yaml file + podName: "hello-pre-deploy" + helloTo: "preDeploy!" + postDeploy: + hello: + entrypoint: "../../plugins/hello" + podName: "hello-post-deploy" + helloTo: "postDeploy!" + simln: # You can have multiple plugins per hook + entrypoint: "../../plugins/simln" + activity: '[{"source": "tank-0003-ln", "destination": "tank-0005-ln", "interval_secs": 1, "amount_msat": 2000}]' + preNode: # preNode plugins run before each node is deployed + hello: + entrypoint: "../../plugins/hello" + helloTo: "preNode!" + postNode: + hello: + entrypoint: "../../plugins/hello" + helloTo: "postNode!" + preNetwork: + hello: + entrypoint: "../../plugins/hello" + helloTo: "preNetwork!" + podName: "hello-pre-network" + postNetwork: + hello: + entrypoint: "../../plugins/hello" + helloTo: "postNetwork!" + podName: "hello-post-network" +```` + +
+ diff --git a/resources/plugins/hello/charts/hello/Chart.yaml b/resources/plugins/hello/charts/hello/Chart.yaml new file mode 100644 index 000000000..abd94467e --- /dev/null +++ b/resources/plugins/hello/charts/hello/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: hello-chart +description: A Helm chart for a hello Pod +version: 0.1.0 +appVersion: "1.0" \ No newline at end of file diff --git a/resources/plugins/hello/charts/hello/templates/pod.yaml b/resources/plugins/hello/charts/hello/templates/pod.yaml new file mode 100644 index 000000000..ba5319670 --- /dev/null +++ b/resources/plugins/hello/charts/hello/templates/pod.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ .Values.podName }} + labels: + app: {{ .Chart.Name }} +spec: + restartPolicy: Never + containers: + - name: {{ .Values.podName }}-container + image: alpine:latest + command: ["sh", "-c"] + args: + - echo "Hello {{ .Values.helloTo }}"; + resources: {} \ No newline at end of file diff --git a/resources/plugins/hello/charts/hello/values.yaml b/resources/plugins/hello/charts/hello/values.yaml new file mode 100644 index 000000000..302da3c15 --- /dev/null +++ b/resources/plugins/hello/charts/hello/values.yaml @@ -0,0 +1,2 @@ +podName: hello-pod +helloTo: "world" \ No newline at end of file diff --git a/resources/plugins/hello/plugin.py b/resources/plugins/hello/plugin.py new file mode 100755 index 000000000..3253216e9 --- /dev/null +++ b/resources/plugins/hello/plugin.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +import json +import logging +from enum import Enum +from pathlib import Path +from typing import Optional + +import click + +from warnet.constants import PLUGIN_ANNEX, AnnexMember, HookValue, WarnetContent +from warnet.process import run_command + +# It is common for Warnet objects to have a "mission" label to help query them in the cluster. +MISSION = "hello" +PRIMARY_CONTAINER = MISSION + +PLUGIN_DIR_TAG = "plugin_dir" + + +class PluginError(Exception): + pass + + +log = logging.getLogger(MISSION) +if not log.hasHandlers(): + console_handler = logging.StreamHandler() + console_handler.setLevel(logging.DEBUG) + formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + console_handler.setFormatter(formatter) + log.addHandler(console_handler) +log.setLevel(logging.DEBUG) +log.propagate = True + + +# Plugins look like this in the `network.yaml` file: +# +# plugins: +# hello: +# podName: "a-pod-name" +# helloTo: "World!" +# +# "podName" and "helloTo" are essentially dictionary keys, and it helps to keep those keys in an +# enum in order to prevent typos. +class PluginContent(Enum): + POD_NAME = "podName" + HELLO_TO = "helloTo" + + +# Warnet uses a python package called "click" to manage terminal interactions with the user. +# To use click, we must declare a click "group" by decorating a function named after the plugin. +# While optional, using click makes it easy for users to interact with your plugin. +@click.group() +@click.pass_context +def hello(ctx): + """Commands for the Hello plugin""" + ctx.ensure_object(dict) + plugin_dir = Path(__file__).resolve().parent + ctx.obj[PLUGIN_DIR_TAG] = Path(plugin_dir) + + +# Each Warnet plugin must have an entrypoint function which takes two JSON objects: plugin_content +# and warnet_content. We have seen the PluginContent enum above. Warnet also has a WarnetContent +# enum which holds the keys to the warnet_content dictionary. +@hello.command() +@click.argument("plugin_content", type=str) +@click.argument("warnet_content", type=str) +@click.pass_context +def entrypoint(ctx, plugin_content: str, warnet_content: str): + """Plugin entrypoint""" + plugin_content: dict = json.loads(plugin_content) + warnet_content: dict = json.loads(warnet_content) + + hook_value = warnet_content.get(WarnetContent.HOOK_VALUE.value) + + assert hook_value in { + item.value for item in HookValue + }, f"{hook_value} is not a valid HookValue" + + if warnet_content.get(PLUGIN_ANNEX): + for annex_member in [annex_item for annex_item in warnet_content.get(PLUGIN_ANNEX)]: + assert annex_member in { + item.value for item in AnnexMember + }, f"{annex_member} is not a valid AnnexMember" + + warnet_content[WarnetContent.HOOK_VALUE.value] = HookValue(hook_value) + + _entrypoint(ctx, plugin_content, warnet_content) + + +def _entrypoint(ctx, plugin_content: dict, warnet_content: dict): + """Called by entrypoint""" + hook_value = warnet_content[WarnetContent.HOOK_VALUE.value] + + match hook_value: + case ( + HookValue.PRE_NETWORK + | HookValue.POST_NETWORK + | HookValue.PRE_DEPLOY + | HookValue.POST_DEPLOY + ): + data = get_data(plugin_content) + if data: + _launch_pod(ctx, install_name=hook_value.value.lower() + "-hello", **data) + else: + _launch_pod(ctx, install_name=hook_value.value.lower() + "-hello") + case HookValue.PRE_NODE: + name = warnet_content[PLUGIN_ANNEX][AnnexMember.NODE_NAME.value] + "-pre-hello-pod" + _launch_pod(ctx, install_name=hook_value.value.lower() + "-" + name, podName=name) + case HookValue.POST_NODE: + name = warnet_content[PLUGIN_ANNEX][AnnexMember.NODE_NAME.value] + "-post-hello-pod" + _launch_pod(ctx, install_name=hook_value.value.lower() + "-" + name, podName=name) + + +def get_data(plugin_content: dict) -> Optional[dict]: + data = { + key: plugin_content.get(key) + for key in (PluginContent.POD_NAME.value, PluginContent.HELLO_TO.value) + if plugin_content.get(key) + } + return data or None + + +def _launch_pod( + ctx, install_name: str = "hello", podName: str = "hello-pod", helloTo: str = "World!" +): + command = ( + f"helm upgrade --install {install_name} {ctx.obj[PLUGIN_DIR_TAG]}/charts/hello " + f"--set podName={podName} --set helloTo={helloTo}" + ) + log.info(command) + log.info(run_command(command)) + + +if __name__ == "__main__": + hello() diff --git a/resources/plugins/simln/README.md b/resources/plugins/simln/README.md new file mode 100644 index 000000000..f6b24ef92 --- /dev/null +++ b/resources/plugins/simln/README.md @@ -0,0 +1,113 @@ +# SimLN Plugin + +## SimLN +SimLN helps you generate lightning payment activity. + +* Website: https://simln.dev/ +* Github: https://github.com/bitcoin-dev-project/sim-ln + +## Usage +SimLN uses "activity" definitions to create payment activity between lightning nodes. These definitions are in JSON format. + +SimLN also requires access details for each node; however, the SimLN plugin will automatically generate these access details for each LND node. The access details look like this: + +```` JSON +{ + "id": , + "address": https://, + "macaroon": , + "cert": +} +```` + +Since SimLN already has access to those LND connection details, it means you can focus on the "activity" definitions. + +### Launch activity definitions from the command line +The SimLN plugin takes "activity" definitions like so: + +`./simln/plugin.py launch-activity '[{\"source\": \"tank-0003-ln\", \"destination\": \"tank-0005-ln\", \"interval_secs\": 1, \"amount_msat\": 2000}]'"''` + +### Launch activity definitions from within `network.yaml` +When you initialize a new Warnet network, Warnet will create a new `network.yaml` file. If your `network.yaml` file includes lightning nodes, then you can use SimLN to produce activity between those nodes like this: + +
+network.yaml + +````yaml +nodes: + - name: tank-0000 + addnode: + - tank-0001 + ln: + lnd: true + + - name: tank-0001 + addnode: + - tank-0002 + ln: + lnd: true + + - name: tank-0002 + addnode: + - tank-0000 + ln: + lnd: true + + - name: tank-0003 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + config: | + bitcoin.timelockdelta=33 + channels: + - id: + block: 300 + index: 1 + target: tank-0004-ln + capacity: 100000 + push_amt: 50000 + + - name: tank-0004 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + channels: + - id: + block: 300 + index: 2 + target: tank-0005-ln + capacity: 50000 + push_amt: 25000 + + - name: tank-0005 + addnode: + - tank-0000 + ln: + lnd: true + +plugins: + postDeploy: + simln: + entrypoint: "../../plugins/simln" # This is the path to the simln plugin folder (relative to the network.yaml file). + activity: '[{"source": "tank-0003-ln", "destination": "tank-0005-ln", "interval_secs": 1, "amount_msat": 2000}]' +```` + +
+ + +## Generating your own SimLn image +The SimLN plugin fetches a SimLN docker image from dockerhub. You can generate your own docker image if you choose: + +1. Clone SimLN: `git clone git@github.com:bitcoin-dev-project/sim-ln.git` +2. Follow the instructions to build a docker image as detailed in the SimLN repository. +3. Tag the resulting docker image: `docker tag IMAGEID YOURUSERNAME/sim-ln:VERSION` +4. Push the tagged image to your dockerhub account. +5. Modify the `values.yaml` file in the plugin's chart to reflect your username and version number: +```YAML + repository: "YOURUSERNAME/sim-ln" + tag: "VERSION" +``` diff --git a/resources/plugins/simln/charts/simln/.helmignore b/resources/plugins/simln/charts/simln/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/resources/plugins/simln/charts/simln/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/resources/plugins/simln/charts/simln/Chart.yaml b/resources/plugins/simln/charts/simln/Chart.yaml new file mode 100644 index 000000000..3df6dd232 --- /dev/null +++ b/resources/plugins/simln/charts/simln/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: simln +description: A Helm chart to deploy SimLN +version: 0.1.0 +appVersion: "0.1.0" diff --git a/resources/plugins/simln/charts/simln/templates/NOTES.txt b/resources/plugins/simln/charts/simln/templates/NOTES.txt new file mode 100644 index 000000000..74486845f --- /dev/null +++ b/resources/plugins/simln/charts/simln/templates/NOTES.txt @@ -0,0 +1 @@ +Thank you for installing SimLN. diff --git a/resources/plugins/simln/charts/simln/templates/_helpers.tpl b/resources/plugins/simln/charts/simln/templates/_helpers.tpl new file mode 100644 index 000000000..a699083e5 --- /dev/null +++ b/resources/plugins/simln/charts/simln/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "mychart.name" -}} +{{- .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "mychart.fullname" -}} +{{- printf "%s-%s" (include "mychart.name" .) .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/resources/plugins/simln/charts/simln/templates/configmap.yaml b/resources/plugins/simln/charts/simln/templates/configmap.yaml new file mode 100644 index 000000000..9688722b6 --- /dev/null +++ b/resources/plugins/simln/charts/simln/templates/configmap.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mychart.fullname" . }}-data +data: + tls.cert: | + -----BEGIN CERTIFICATE----- + MIIB8TCCAZagAwIBAgIUJDsR6mmY+TaO9pCfjtotlbOkzJMwCgYIKoZIzj0EAwIw + MjEfMB0GA1UECgwWbG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2Fy + bmV0MB4XDTI0MTExMTE2NTM1MFoXDTM0MTEwOTE2NTM1MFowMjEfMB0GA1UECgwW + bG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2FybmV0MFkwEwYHKoZI + zj0CAQYIKoZIzj0DAQcDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLP + tp0fxE7hmteSt6gjQriy90fP8j9OJXBNAjt915kLY4zVvqOBiTCBhjAOBgNVHQ8B + Af8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAd + BgNVHQ4EFgQU5d8QMrwhLgTkDjWA+eXZGz+dybUwLwYDVR0RBCgwJoIJbG9jYWxo + b3N0ggEqhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMAoGCCqGSM49BAMCA0kAMEYC + IQDPofN0fEl5gTwCYhk3nZbjMqJhZ8BsSJ6K8XRhxr7zbwIhAPsgQCFOqUWg632O + NEO53OQ6CIqnpxSskjsFNH4ZBQOE + -----END CERTIFICATE----- + admin.macaroon.hex: | + 0201036c6e6402f801030a1062beabbf2a614b112128afa0c0b4fdd61201301a160a0761646472657373120472656164120577726974651a130a04696e666f120472656164120577726974651a170a08696e766f69636573120472656164120577726974651a210a086d616361726f6f6e120867656e6572617465120472656164120577726974651a160a076d657373616765120472656164120577726974651a170a086f6666636861696e120472656164120577726974651a160a076f6e636861696e120472656164120577726974651a140a057065657273120472656164120577726974651a180a067369676e6572120867656e657261746512047265616400000620b17be53e367290871681055d0de15587f6d1cd47d1248fe2662ae27f62cfbdc6 diff --git a/resources/plugins/simln/charts/simln/templates/pod.yaml b/resources/plugins/simln/charts/simln/templates/pod.yaml new file mode 100644 index 000000000..69790c9eb --- /dev/null +++ b/resources/plugins/simln/charts/simln/templates/pod.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "mychart.fullname" . }} + labels: + app: {{ include "mychart.name" . }} + mission: {{ .Values.name }} +spec: + initContainers: + - name: "init" + image: "busybox" + command: + - "sh" + - "-c" + args: + - > + cp /configmap/* /working && + cd /working && + cat admin.macaroon.hex | xxd -r -p > admin.macaroon && + while [ ! -f /working/sim.json ]; do + echo "Waiting for /working/sim.json to exist..." + sleep 1 + done + volumeMounts: + - name: {{ .Values.workingVolume.name }} + mountPath: {{ .Values.workingVolume.mountPath }} + - name: {{ .Values.configmapVolume.name }} + mountPath: {{ .Values.configmapVolume.mountPath }} + containers: + - name: {{ .Values.name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "sh" + - "-c" + args: + - > + cd /working; + sim-cli + volumeMounts: + - name: {{ .Values.workingVolume.name }} + mountPath: {{ .Values.workingVolume.mountPath }} + - name: {{ .Values.configmapVolume.name }} + mountPath: {{ .Values.configmapVolume.mountPath }} + volumes: + - name: {{ .Values.configmapVolume.name }} + configMap: + name: {{ include "mychart.fullname" . }}-data + - name: {{ .Values.workingVolume.name }} + emptyDir: {} diff --git a/resources/plugins/simln/charts/simln/values.yaml b/resources/plugins/simln/charts/simln/values.yaml new file mode 100644 index 000000000..a1647a963 --- /dev/null +++ b/resources/plugins/simln/charts/simln/values.yaml @@ -0,0 +1,13 @@ +name: "simln" +image: + repository: "bitcoindevproject/simln" + tag: "0.2.3" + pullPolicy: IfNotPresent + +workingVolume: + name: working-volume + mountPath: /working +configmapVolume: + name: configmap-volume + mountPath: /configmap + diff --git a/resources/plugins/simln/plugin.py b/resources/plugins/simln/plugin.py new file mode 100755 index 000000000..1411ea645 --- /dev/null +++ b/resources/plugins/simln/plugin.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python3 +import json +import logging +import time +from enum import Enum +from pathlib import Path +from typing import Optional + +import click +from kubernetes.stream import stream + +from warnet.constants import LIGHTNING_MISSION, PLUGIN_ANNEX, AnnexMember, HookValue, WarnetContent +from warnet.k8s import ( + download, + get_default_namespace, + get_mission, + get_static_client, + wait_for_init, + write_file_to_container, +) +from warnet.process import run_command + +MISSION = "simln" +PRIMARY_CONTAINER = MISSION + +PLUGIN_DIR_TAG = "plugin_dir" + + +class PluginError(Exception): + pass + + +log = logging.getLogger(MISSION) +log.setLevel(logging.DEBUG) +console_handler = logging.StreamHandler() +console_handler.setLevel(logging.DEBUG) +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") +console_handler.setFormatter(formatter) +log.addHandler(console_handler) + + +class PluginContent(Enum): + ACTIVITY = "activity" + + +@click.group() +@click.pass_context +def simln(ctx): + """Commands for the SimLN plugin""" + ctx.ensure_object(dict) + plugin_dir = Path(__file__).resolve().parent + ctx.obj[PLUGIN_DIR_TAG] = Path(plugin_dir) + + +@simln.command() +@click.argument("plugin_content", type=str) +@click.argument("warnet_content", type=str) +@click.pass_context +def entrypoint(ctx, plugin_content: str, warnet_content: str): + """Plugin entrypoint""" + plugin_content: dict = json.loads(plugin_content) + warnet_content: dict = json.loads(warnet_content) + + hook_value = warnet_content.get(WarnetContent.HOOK_VALUE.value) + + assert hook_value in { + item.value for item in HookValue + }, f"{hook_value} is not a valid HookValue" + + if warnet_content.get(PLUGIN_ANNEX): + for annex_member in [annex_item for annex_item in warnet_content.get(PLUGIN_ANNEX)]: + assert annex_member in { + item.value for item in AnnexMember + }, f"{annex_member} is not a valid AnnexMember" + + warnet_content[WarnetContent.HOOK_VALUE.value] = HookValue(hook_value) + + _entrypoint(ctx, plugin_content, warnet_content) + + +def _entrypoint(ctx, plugin_content: dict, warnet_content: dict): + """Called by entrypoint""" + # write your plugin startup commands here + activity = plugin_content.get(PluginContent.ACTIVITY.value) + if activity: + activity = json.loads(activity) + print(activity) + _launch_activity(activity, ctx.obj.get(PLUGIN_DIR_TAG)) + + +@simln.command() +def list_pod_names(): + """Get a list of SimLN pod names""" + print([pod.metadata.name for pod in get_mission(MISSION)]) + + +@simln.command() +@click.argument("pod_name", type=str) +def download_results(pod_name: str): + """Download SimLN results to the current directory""" + dest = download(pod_name, source_path=Path("/working/results")) + print(f"Downloaded results to: {dest}") + + +def _get_example_activity() -> list[dict]: + pods = get_mission(LIGHTNING_MISSION) + try: + pod_a = pods[1].metadata.name + pod_b = pods[2].metadata.name + except Exception as err: + raise PluginError( + "Could not access the lightning nodes needed for the example.\n Try deploying some." + ) from err + return [{"source": pod_a, "destination": pod_b, "interval_secs": 1, "amount_msat": 2000}] + + +@simln.command() +def get_example_activity(): + """Get an activity representing node 2 sending msat to node 3""" + print(json.dumps(_get_example_activity())) + + +@simln.command() +@click.argument(PluginContent.ACTIVITY.value, type=str) +@click.pass_context +def launch_activity(ctx, activity: str): + """Deploys a SimLN Activity which is a JSON list of objects""" + try: + parsed_activity = json.loads(activity) + except json.JSONDecodeError: + log.error("Invalid JSON input for activity.") + raise click.BadArgumentUsage("Activity must be a valid JSON string.") from None + plugin_dir = ctx.obj.get(PLUGIN_DIR_TAG) + print(_launch_activity(parsed_activity, plugin_dir)) + + +def _launch_activity(activity: Optional[list[dict]], plugin_dir: str) -> str: + """Launch a SimLN chart which optionally includes the `activity`""" + timestamp = int(time.time()) + name = f"simln-{timestamp}" + + command = f"helm upgrade --install {timestamp} {plugin_dir}/charts/simln" + + run_command(command) + activity_json = _generate_activity_json(activity) + wait_for_init(name, namespace=get_default_namespace(), quiet=True) + + if write_file_to_container( + name, + "init", + "/working/sim.json", + activity_json, + namespace=get_default_namespace(), + quiet=True, + ): + return name + else: + raise PluginError(f"Could not write sim.json to the init container: {name}") + + +def _generate_activity_json(activity: Optional[list[dict]]) -> str: + nodes = [] + + for i in get_mission(LIGHTNING_MISSION): + name = i.metadata.name + node = { + "id": name, + "address": f"https://{name}:10009", + "macaroon": "/working/admin.macaroon", + "cert": "/working/tls.cert", + } + nodes.append(node) + + if activity: + data = {"nodes": nodes, PluginContent.ACTIVITY.value: activity} + else: + data = {"nodes": nodes} + + return json.dumps(data, indent=2) + + +def _sh(pod, method: str, params: tuple[str, ...]) -> str: + namespace = get_default_namespace() + + sclient = get_static_client() + if params: + cmd = [method] + cmd.extend(params) + else: + cmd = [method] + try: + resp = stream( + sclient.connect_get_namespaced_pod_exec, + pod, + namespace, + container=PRIMARY_CONTAINER, + command=cmd, + stderr=True, + stdin=False, + stdout=True, + tty=False, + _preload_content=False, + ) + stdout = "" + stderr = "" + while resp.is_open(): + resp.update(timeout=1) + if resp.peek_stdout(): + stdout_chunk = resp.read_stdout() + stdout += stdout_chunk + if resp.peek_stderr(): + stderr_chunk = resp.read_stderr() + stderr += stderr_chunk + return stdout + stderr + except Exception as err: + print(f"Could not execute stream: {err}") + + +@simln.command(context_settings={"ignore_unknown_options": True}) +@click.argument("pod", type=str) +@click.argument("method", type=str) +@click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments +def sh(pod: str, method: str, params: tuple[str, ...]): + """Run shell commands in a pod""" + print(_sh(pod, method, params)) + + +if __name__ == "__main__": + simln() diff --git a/resources/scenarios/commander.py b/resources/scenarios/commander.py index 1f7d34a80..8a50b2fc5 100644 --- a/resources/scenarios/commander.py +++ b/resources/scenarios/commander.py @@ -1,4 +1,5 @@ import argparse +import base64 import configparser import json import logging @@ -8,8 +9,12 @@ import signal import sys import tempfile +import threading +from time import sleep from typing import Dict +from kubernetes import client, config +from ln_framework.ln import LND from test_framework.authproxy import AuthServiceProxy from test_framework.p2p import NetworkThread from test_framework.test_framework import ( @@ -20,13 +25,52 @@ from test_framework.test_node import TestNode from test_framework.util import PortSeed, get_rpc_proxy -WARNET_FILE = "/shared/warnet.json" +# Figure out what namespace we are in +with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace") as f: + NAMESPACE = f.read().strip() + +# Get the in-cluster k8s client to determine what we have access to +config.load_incluster_config() +sclient = client.CoreV1Api() try: - with open(WARNET_FILE) as file: - WARNET = json.load(file) + # An admin with cluster access can list everything. + # A wargames player with namespaced access will get a FORBIDDEN error here + pods = sclient.list_pod_for_all_namespaces() + cmaps = sclient.list_config_map_for_all_namespaces() except Exception: - WARNET = [] + # Just get whatever we have access to in this namespace only + pods = sclient.list_namespaced_pod(namespace=NAMESPACE) + cmaps = sclient.list_namespaced_config_map(namespace=NAMESPACE) + +WARNET = {"tanks": [], "lightning": [], "channels": []} +for pod in pods.items: + if "mission" not in pod.metadata.labels: + continue + + if pod.metadata.labels["mission"] == "tank": + WARNET["tanks"].append( + { + "tank": pod.metadata.name, + "chain": pod.metadata.labels["chain"], + "rpc_host": pod.status.pod_ip, + "rpc_port": int(pod.metadata.labels["RPCPort"]), + "rpc_user": "user", + "rpc_password": pod.metadata.labels["rpcpassword"], + "init_peers": pod.metadata.annotations["init_peers"], + } + ) + + if pod.metadata.labels["mission"] == "lightning": + WARNET["lightning"].append(pod.metadata.name) + +for cm in cmaps.items: + if not cm.metadata.labels or "channels" not in cm.metadata.labels: + continue + channel_jsons = json.loads(cm.data["channels"]) + for channel_json in channel_jsons: + channel_json["source"] = cm.data["source"] + WARNET["channels"].append(channel_json) # Ensure that all RPC calls are made with brand new http connections @@ -55,6 +99,41 @@ def ensure_miner(node): node.createwallet("miner", descriptors=True) return node.get_wallet_rpc("miner") + @staticmethod + def hex_to_b64(hex): + return base64.b64encode(bytes.fromhex(hex)).decode() + + @staticmethod + def b64_to_hex(b64, reverse=False): + if reverse: + return base64.b64decode(b64)[::-1].hex() + else: + return base64.b64decode(b64).hex() + + def wait_for_tanks_connected(self): + def tank_connected(self, tank): + while True: + peers = tank.getpeerinfo() + count = sum( + 1 + for peer in peers + if peer.get("connection_type") == "manual" or peer.get("addnode") is True + ) + self.log.info(f"Tank {tank.tank} connected to {count}/{tank.init_peers} peers") + if count >= tank.init_peers: + break + else: + sleep(1) + + conn_threads = [ + threading.Thread(target=tank_connected, args=(self, tank)) for tank in self.nodes + ] + for thread in conn_threads: + thread.start() + + all(thread.join() is None for thread in conn_threads) + self.log.info("Network connected") + def handle_sigterm(self, signum, frame): print("SIGTERM received, stopping...") self.shutdown() @@ -82,8 +161,10 @@ def setup(self): # Keep a separate index of tanks by pod name self.tanks: Dict[str, TestNode] = {} + self.lns: Dict[str, LND] = {} + self.channels = WARNET["channels"] - for i, tank in enumerate(WARNET): + for i, tank in enumerate(WARNET["tanks"]): self.log.info( f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}" ) @@ -107,10 +188,14 @@ def setup(self): coveragedir=self.options.coveragedir, ) node.rpc_connected = True - node.init_peers = tank["init_peers"] + node.init_peers = int(tank["init_peers"]) + self.nodes.append(node) self.tanks[tank["tank"]] = node + for ln in WARNET["lightning"]: + self.lns[ln] = LND(ln) + self.num_nodes = len(self.nodes) # Set up temp directory and start logging diff --git a/resources/scenarios/ln_framework/__init__.py b/resources/scenarios/ln_framework/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/resources/scenarios/ln_framework/ln.py b/resources/scenarios/ln_framework/ln.py new file mode 100644 index 000000000..8fcdc1bc7 --- /dev/null +++ b/resources/scenarios/ln_framework/ln.py @@ -0,0 +1,174 @@ +import http.client +import json +import ssl +import time + +# hard-coded deterministic lnd credentials +ADMIN_MACAROON_HEX = "0201036c6e6402f801030a1062beabbf2a614b112128afa0c0b4fdd61201301a160a0761646472657373120472656164120577726974651a130a04696e666f120472656164120577726974651a170a08696e766f69636573120472656164120577726974651a210a086d616361726f6f6e120867656e6572617465120472656164120577726974651a160a076d657373616765120472656164120577726974651a170a086f6666636861696e120472656164120577726974651a160a076f6e636861696e120472656164120577726974651a140a057065657273120472656164120577726974651a180a067369676e6572120867656e657261746512047265616400000620b17be53e367290871681055d0de15587f6d1cd47d1248fe2662ae27f62cfbdc6" +# Don't worry about lnd's self-signed certificates +INSECURE_CONTEXT = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) +INSECURE_CONTEXT.check_hostname = False +INSECURE_CONTEXT.verify_mode = ssl.CERT_NONE + + +# https://github.com/lightningcn/lightning-rfc/blob/master/07-routing-gossip.md#the-channel_update-message +# We use the field names as written in the BOLT as our canonical, internal field names. +# In LND, Policy objects returned by DescribeGraph have completely different labels +# than policy objects expected by the UpdateChannelPolicy API, and neither +# of these are the names used in the BOLT... +class Policy: + def __init__( + self, + cltv_expiry_delta: int, + htlc_minimum_msat: int, + fee_base_msat: int, + fee_proportional_millionths: int, + htlc_maximum_msat: int, + ): + self.cltv_expiry_delta = cltv_expiry_delta + self.htlc_minimum_msat = htlc_minimum_msat + self.fee_base_msat = fee_base_msat + self.fee_proportional_millionths = fee_proportional_millionths + self.htlc_maximum_msat = htlc_maximum_msat + + @classmethod + def from_lnd_describegraph(cls, policy: dict): + return cls( + cltv_expiry_delta=int(policy.get("time_lock_delta")), + htlc_minimum_msat=int(policy.get("min_htlc")), + fee_base_msat=int(policy.get("fee_base_msat")), + fee_proportional_millionths=int(policy.get("fee_rate_milli_msat")), + htlc_maximum_msat=int(policy.get("max_htlc_msat")), + ) + + @classmethod + def from_dict(cls, policy: dict): + return cls( + cltv_expiry_delta=policy.get("cltv_expiry_delta"), + htlc_minimum_msat=policy.get("htlc_minimum_msat"), + fee_base_msat=policy.get("fee_base_msat"), + fee_proportional_millionths=policy.get("fee_proportional_millionths"), + htlc_maximum_msat=policy.get("htlc_maximum_msat"), + ) + + def to_dict(self): + return { + "cltv_expiry_delta": self.cltv_expiry_delta, + "htlc_minimum_msat": self.htlc_minimum_msat, + "fee_base_msat": self.fee_base_msat, + "fee_proportional_millionths": self.fee_proportional_millionths, + "htlc_maximum_msat": self.htlc_maximum_msat, + } + + def to_lnd_chanpolicy(self, capacity): + # LND requires a 1% reserve + reserve = ((capacity * 99) // 100) * 1000 + return { + "time_lock_delta": self.cltv_expiry_delta, + "min_htlc_msat": self.htlc_minimum_msat, + "base_fee_msat": self.fee_base_msat, + "fee_rate_ppm": self.fee_proportional_millionths, + "max_htlc_msat": min(self.htlc_maximum_msat, reserve), + "min_htlc_msat_specified": True, + } + + +class LND: + def __init__(self, pod_name): + self.name = pod_name + self.conn = http.client.HTTPSConnection( + host=pod_name, port=8080, timeout=5, context=INSECURE_CONTEXT + ) + + def get(self, uri): + while True: + try: + self.conn.request( + method="GET", + url=uri, + headers={"Grpc-Metadata-macaroon": ADMIN_MACAROON_HEX, "Connection": "close"}, + ) + return self.conn.getresponse().read().decode("utf8") + except Exception: + time.sleep(1) + + def post(self, uri, data): + body = json.dumps(data) + attempt = 0 + while True: + attempt += 1 + try: + self.conn.request( + method="POST", + url=uri, + body=body, + headers={ + "Content-Type": "application/json", + "Content-Length": str(len(body)), + "Grpc-Metadata-macaroon": ADMIN_MACAROON_HEX, + "Connection": "close", + }, + ) + # Stream output, otherwise we get a timeout error + res = self.conn.getresponse() + stream = "" + while True: + try: + data = res.read(1) + if len(data) == 0: + break + else: + stream += data.decode("utf8") + except Exception: + break + return stream + except Exception: + time.sleep(1) + + def newaddress(self): + res = self.get("/v1/newaddress") + return json.loads(res) + + def walletbalance(self): + res = self.get("/v1/balance/blockchain") + return int(json.loads(res)["confirmed_balance"]) + + def uri(self): + res = self.get("/v1/getinfo") + info = json.loads(res) + if "uris" not in info or len(info["uris"]) == 0: + return None + return info["uris"][0] + + def connect(self, target_uri): + pk, host = target_uri.split("@") + res = self.post("/v1/peers", data={"addr": {"pubkey": pk, "host": host}}) + return json.loads(res) + + def channel(self, pk, capacity, push_amt, fee_rate): + res = self.post( + "/v1/channels/stream", + data={ + "local_funding_amount": capacity, + "push_sat": push_amt, + "node_pubkey": pk, + "sat_per_vbyte": fee_rate, + }, + ) + return json.loads(res) + + def update(self, txid_hex: str, policy: dict, capacity: int): + ln_policy = Policy.from_dict(policy).to_lnd_chanpolicy(capacity) + data = {"chan_point": {"funding_txid_str": txid_hex, "output_index": 0}, **ln_policy} + res = self.post( + "/v1/chanpolicy", + # Policy objects returned by DescribeGraph have + # completely different labels than policy objects expected + # by the UpdateChannelPolicy API. + data=data, + ) + return json.loads(res) + + def graph(self): + res = self.get("/v1/graph") + return json.loads(res) diff --git a/resources/scenarios/ln_init.py b/resources/scenarios/ln_init.py index 82745a123..96dd8161f 100644 --- a/resources/scenarios/ln_init.py +++ b/resources/scenarios/ln_init.py @@ -1,8 +1,10 @@ #!/usr/bin/env python3 +import threading from time import sleep from commander import Commander +from ln_framework.ln import Policy class LNInit(Commander): @@ -14,171 +16,374 @@ def add_options(self, parser): parser.usage = "warnet run /path/to/ln_init.py" def run_test(self): - self.log.info("Lock out of IBD") + ## + # L1 P2P + ## + self.log.info("Waiting for L1 p2p network connections...") + self.wait_for_tanks_connected() + + ## + # MINER + ## + self.log.info("Setting up miner...") miner = self.ensure_miner(self.nodes[0]) miner_addr = miner.getnewaddress() - self.generatetoaddress(self.nodes[0], 1, miner_addr, sync_fun=self.no_op) - self.log.info("Get LN nodes and wallet addresses") - ln_nodes = [] - recv_addrs = [] - for tank in self.warnet.tanks: - if tank.lnnode is not None: - recv_addrs.append(tank.lnnode.getnewaddress()) - ln_nodes.append(tank.index) + def gen(n): + return self.generatetoaddress(self.nodes[0], n, miner_addr, sync_fun=self.no_op) - self.log.info("Fund LN wallets") - miner = self.ensure_miner(self.nodes[0]) - miner_addr = miner.getnewaddress() - # 298 block base - self.generatetoaddress(self.nodes[0], 297, miner_addr, sync_fun=self.no_op) - # divvy up the goods - split = (miner.getbalance() - 1) // len(recv_addrs) + self.log.info("Locking out of IBD...") + gen(1) + + ## + # WALLET ADDRESSES + ## + self.log.info("Getting LN wallet addresses...") + ln_addrs = [] + + def get_ln_addr(self, ln): + while True: + res = ln.newaddress() + if "address" in res: + addr = res["address"] + ln_addrs.append(addr) + self.log.info(f"Got wallet address {addr} from {ln.name}") + break + else: + self.log.info( + f"Couldn't get wallet address from {ln.name}:\n {res}\n wait and retry..." + ) + sleep(1) + + addr_threads = [ + threading.Thread(target=get_ln_addr, args=(self, ln)) for ln in self.lns.values() + ] + for thread in addr_threads: + thread.start() + + all(thread.join() is None for thread in addr_threads) + self.log.info(f"Got {len(ln_addrs)} addresses from {len(self.lns)} LN nodes") + + ## + # FUNDS + ## + self.log.info("Funding LN wallets...") + # 298 block base for miner wallet + gen(297) + # divvy up the goods, except fee. + # 10 UTXOs per node means 10 channel opens per node per block + split = (miner.getbalance() - 1) // len(ln_addrs) // 10 sends = {} - for addr in recv_addrs: - sends[addr] = split - miner.sendmany("", sends) + for _ in range(10): + for addr in ln_addrs: + sends[addr] = split + miner.sendmany("", sends) # confirm funds in block 299 - self.generatetoaddress(self.nodes[0], 1, miner_addr, sync_fun=self.no_op) + gen(1) self.log.info( - f"Waiting for funds to be spendable: {split} BTC each for {len(recv_addrs)} LN nodes" + f"Waiting for funds to be spendable: 10x{split} BTC UTXOs each for {len(ln_addrs)} LN nodes" ) - def funded_lnnodes(): - for tank in self.warnet.tanks: - if tank.lnnode is None: - continue - if int(tank.lnnode.get_wallet_balance()) < (split * 100000000): - return False - return True + def confirm_ln_balance(self, ln): + bal = 0 + while True: + bal = ln.walletbalance() + if bal >= (split * 100000000): + self.log.info(f"LN node {ln.name} confirmed funds") + break + sleep(1) - self.wait_until(funded_lnnodes, timeout=5 * 60) + fund_threads = [ + threading.Thread(target=confirm_ln_balance, args=(self, ln)) for ln in self.lns.values() + ] + for thread in fund_threads: + thread.start() - ln_nodes_uri = ln_nodes.copy() - while len(ln_nodes_uri) > 0: - self.log.info( - f"Waiting for all LN nodes to have URI, LN nodes remaining: {ln_nodes_uri}" - ) - for index in ln_nodes_uri: - lnnode = self.warnet.tanks[index].lnnode - if lnnode.getURI(): - ln_nodes_uri.remove(index) - sleep(5) - - self.log.info("Adding p2p connections to LN nodes") - for edge in self.warnet.graph.edges(data=True): - (src, dst, data) = edge - # Copy the L1 p2p topology (where applicable) to L2 - # so we get a more robust p2p graph for lightning - if ( - "channel_open" not in data - and self.warnet.tanks[src].lnnode - and self.warnet.tanks[dst].lnnode - ): - self.warnet.tanks[src].lnnode.connect_to_tank(dst) - - # Start confirming channel opens in block 300 - self.log.info("Opening channels, one per block") - chan_opens = [] - edges = self.warnet.graph.edges(data=True, keys=True) - edges = sorted(edges, key=lambda edge: edge[2]) - for edge in edges: - (src, dst, key, data) = edge - if "channel_open" in data: - src_node = self.warnet.get_ln_node_from_tank(src) - assert src_node is not None - assert self.warnet.get_ln_node_from_tank(dst) is not None - self.log.info(f"opening channel {src}->{dst}") - chan_pt = src_node.open_channel_to_tank(dst, data["channel_open"]) - # We can guarantee deterministic short channel IDs as long as - # the change output is greater than the channel funding output, - # which will then be output 0 - assert chan_pt[64:] == ":0" - chan_opens.append((edge, chan_pt)) - self.log.info(f" pending channel point: {chan_pt}") - self.wait_until( - lambda chan_pt=chan_pt: chan_pt[:64] in self.nodes[0].getrawmempool() - ) - self.generatetoaddress(self.nodes[0], 1, miner_addr) - assert chan_pt[:64] not in self.nodes[0].getrawmempool() - height = self.nodes[0].getblockcount() - self.log.info(f" confirmed in block {height}") + all(thread.join() is None for thread in fund_threads) + self.log.info("All LN nodes are funded") + + ## + # URIs + ## + self.log.info("Getting URIs for all LN nodes...") + ln_uris = {} + + def get_ln_uri(self, ln): + uri = None + while True: + uri = ln.uri() + if uri: + ln_uris[ln.name] = uri + self.log.info(f"LN node {ln.name} has URI {uri}") + break + sleep(1) + + uri_threads = [ + threading.Thread(target=get_ln_uri, args=(self, ln)) for ln in self.lns.values() + ] + for thread in uri_threads: + thread.start() + + all(thread.join() is None for thread in uri_threads) + self.log.info("Got URIs from all LN nodes") + + ## + # P2P CONNECTIONS + ## + self.log.info("Adding p2p connections to LN nodes...") + # (source: LND, target_uri: str) tuples of LND instances + connections = [] + # Cycle graph through all LN nodes + nodes = list(self.lns.values()) + prev_node = nodes[-1] + for node in nodes: + connections.append((node, prev_node)) + prev_node = node + # Explicit connections between every pair of channel partners + for ch in self.channels: + src = self.lns[ch["source"]] + tgt = self.lns[ch["target"]] + # Avoid duplicates and reciprocals + if (src, tgt) not in connections and (tgt, src) not in connections: + connections.append((src, tgt)) + + def connect_ln(self, pair): + while True: + res = pair[0].connect(ln_uris[pair[1].name]) + if res == {}: + self.log.info(f"Connected LN nodes {pair[0].name} -> {pair[1].name}") + break + if "message" in res: + if "already connected" in res["message"]: + self.log.info( + f"Already connected LN nodes {pair[0].name} -> {pair[1].name}" + ) + break + if "process of starting" in res["message"]: + self.log.info( + f"{pair[0].name} not ready for connections yet, wait and retry..." + ) + sleep(1) + else: + self.log.info( + f"Unexpected response attempting to connect {pair[0].name} -> {pair[1].name}:\n {res}\n ABORTING" + ) + break + + p2p_threads = [ + threading.Thread(target=connect_ln, args=(self, pair)) for pair in connections + ] + for thread in p2p_threads: + thread.start() + + all(thread.join() is None for thread in p2p_threads) + self.log.info("Established all LN p2p connections") + + ## + # CHANNELS + ## + self.log.info("Opening lightning channels...") + # Sort the channels by assigned block and index + # so their channel ids are deterministic + ch_by_block = {} + for ch in self.channels: + # TODO: if "id" not in ch ... + block = ch["id"]["block"] + if block not in ch_by_block: + ch_by_block[block] = [ch] + else: + ch_by_block[block].append(ch) + blocks = list(ch_by_block.keys()) + blocks = sorted(blocks) + + for target_block in blocks: + # First make sure the target block is the next block + current_height = self.nodes[0].getblockcount() + need = target_block - current_height + if need < 1: + raise Exception("Blockchain too long for deterministic channel ID") + if need > 1: + gen(need - 1) + + def open_channel(self, ch, fee_rate): + src = self.lns[ch["source"]] + tgt_uri = ln_uris[ch["target"]] + tgt_pk, _ = tgt_uri.split("@") self.log.info( - f" channel_id should be: {int.from_bytes(height.to_bytes(3, 'big') + (1).to_bytes(3, 'big') + (0).to_bytes(2, 'big'), 'big')}" + f"Sending channel open from {ch['source']} -> {ch['target']} with fee_rate={fee_rate}" ) - - # Ensure all channel opens are sufficiently confirmed - self.generatetoaddress(self.nodes[0], 10, miner_addr, sync_fun=self.no_op) - ln_nodes_gossip = ln_nodes.copy() - while len(ln_nodes_gossip) > 0: - self.log.info(f"Waiting for graph gossip sync, LN nodes remaining: {ln_nodes_gossip}") - for index in ln_nodes_gossip: - lnnode = self.warnet.tanks[index].lnnode - count_channels = len(lnnode.get_graph_channels()) - count_graph_nodes = len(lnnode.get_graph_nodes()) - if count_channels == len(chan_opens) and count_graph_nodes == len(ln_nodes): - ln_nodes_gossip.remove(index) + res = src.channel( + pk=self.hex_to_b64(tgt_pk), + capacity=ch["capacity"], + push_amt=ch["push_amt"], + fee_rate=fee_rate, + ) + if "result" not in res: + self.log.info( + "Unexpected channel open response:\n " + + f"From {ch['source']} -> {ch['target']} fee_rate={fee_rate}\n " + + f"{res}" + ) else: + txid = self.b64_to_hex(res["result"]["chan_pending"]["txid"], reverse=True) + ch["txid"] = txid self.log.info( - f" node {index} not synced (channels: {count_channels}/{len(chan_opens)}, nodes: {count_graph_nodes}/{len(ln_nodes)})" + f"Channel open {ch['source']} -> {ch['target']}\n " + + f"outpoint={txid}:{res['result']['chan_pending']['output_index']}\n " + + f"expected channel id: {ch['id']}" ) - sleep(5) - - self.log.info("Updating channel policies") - for edge, chan_pt in chan_opens: - (src, dst, key, data) = edge - if "target_policy" in data: - target_node = self.warnet.get_ln_node_from_tank(dst) - target_node.update_channel_policy(chan_pt, data["target_policy"]) - if "source_policy" in data: - source_node = self.warnet.get_ln_node_from_tank(src) - source_node.update_channel_policy(chan_pt, data["source_policy"]) - - while True: - self.log.info("Waiting for all channel policies to match") - score = 0 - for tank_index, me in enumerate(ln_nodes): - you = (tank_index + 1) % len(ln_nodes) - my_channels = self.warnet.tanks[me].lnnode.get_graph_channels() - your_channels = self.warnet.tanks[you].lnnode.get_graph_channels() - match = True - for _chan_index, my_chan in enumerate(my_channels): - your_chan = [ - chan - for chan in your_channels - if chan.short_chan_id == my_chan.short_chan_id - ][0] - if not your_chan: - print(f"Channel policy missing for channel: {my_chan.short_chan_id}") - match = False - break - try: - if not my_chan.channel_match(your_chan): - print( - f"Channel policy doesn't match between tanks {me} & {you}: {my_chan.short_chan_id}" - ) - match = False - break - except Exception as e: - print(f"Error comparing channel policies: {e}") - print( - f"Channel policy doesn't match between tanks {me} & {you}: {my_chan.short_chan_id}" - ) - match = False - break - if match: - print(f"All channel policies match between tanks {me} & {you}") - score += 1 - print(f"Score: {score} / {len(ln_nodes)}") - if score == len(ln_nodes): - break - sleep(5) + channels = sorted(ch_by_block[target_block], key=lambda ch: ch["id"]["index"]) + index = 0 + fee_rate = 5006 # s/vB, decreases by 5 per tx for up to 1000 txs per block + ch_threads = [] + for ch in channels: + index += 1 # noqa + fee_rate -= 5 + assert index == ch["id"]["index"], "Channel ID indexes are not consecutive" + assert fee_rate >= 1, "Too many TXs in block, out of fee range" + t = threading.Thread(target=open_channel, args=(self, ch, fee_rate)) + t.start() + ch_threads.append(t) - self.log.info( - f"Warnet LN ready with {len(recv_addrs)} nodes and {len(chan_opens)} channels." - ) + all(thread.join() is None for thread in ch_threads) + self.log.info(f"Waiting for {len(channels)} channel opens in mempool...") + self.wait_until( + lambda channels=channels: self.nodes[0].getmempoolinfo()["size"] >= len(channels), + timeout=500, + ) + block_hash = gen(1)[0] + self.log.info(f"Confirmed {len(channels)} channel opens in block {target_block}") + self.log.info("Checking deterministic channel IDs in block...") + block = self.nodes[0].getblock(block_hash) + block_txs = block["tx"] + block_height = block["height"] + for ch in channels: + assert ch["id"]["block"] == block_height, f"Actual block:{block_height}\n{ch}" + assert ( + block_txs[ch["id"]["index"]] == ch["txid"] + ), f"Actual txid:{block_txs[ch["id"]["index"]]}\n{ch}" + self.log.info("👍") + + gen(5) + self.log.info(f"Confirmed {len(self.channels)} total channel opens") + + self.log.info("Waiting for channel announcement gossip...") + + def ln_all_chs(self, ln): + expected = len(self.channels) + while len(ln.graph()["edges"]) != expected: + sleep(1) + self.log.info(f"LN {ln.name} has graph with all {expected} channels") + + ch_ann_threads = [ + threading.Thread(target=ln_all_chs, args=(self, ln)) for ln in self.lns.values() + ] + for thread in ch_ann_threads: + thread.start() + + all(thread.join() is None for thread in ch_ann_threads) + self.log.info("All LN nodes have complete graph") + + ## + # UPDATE CHANNEL POLICIES + ## + self.log.info("Updating channel policies...") + + def update_policy(self, ln, txid_hex, policy, capacity): + self.log.info(f"Sending update from {ln.name} for channel with outpoint: {txid_hex}:0") + res = ln.update(txid_hex, policy, capacity) + assert ( + len(res["failed_updates"]) == 0 + ), f" Failed updates: {res["failed_updates"]}\n txid: {txid_hex}\n policy:{policy}" + + update_threads = [] + for ch in self.channels: + if "source_policy" in ch: + ts = threading.Thread( + target=update_policy, + args=( + self, + self.lns[ch["source"]], + ch["txid"], + ch["source_policy"], + ch["capacity"], + ), + ) + ts.start() + update_threads.append(ts) + if "target_policy" in ch: + tt = threading.Thread( + target=update_policy, + args=( + self, + self.lns[ch["target"]], + ch["txid"], + ch["target_policy"], + ch["capacity"], + ), + ) + tt.start() + update_threads.append(tt) + count = len(update_threads) + + all(thread.join() is None for thread in update_threads) + self.log.info(f"Sent {count} channel policy updates") + + self.log.info("Waiting for all channel policy gossip to synchronize...") + + def policy_equal(pol1, pol2, capacity): + return pol1.to_lnd_chanpolicy(capacity) == pol2.to_lnd_chanpolicy(capacity) + + def matching_graph(self, expected, ln): + while True: + actual = ln.graph()["edges"] + assert len(expected) == len(actual) + done = True + for i, actual_ch in enumerate(actual): + expected_ch = expected[i] + capacity = expected_ch["capacity"] + # We assert this because it isn't updated as part of policy. + # If this fails we have a bigger issue + assert int(actual_ch["capacity"]) == capacity + + # Policies were not defined in network.yaml + if "source_policy" not in expected_ch or "target_policy" not in expected_ch: + continue + + # policy actual/expected source/target + polas = Policy.from_lnd_describegraph(actual_ch["node1_policy"]) + polat = Policy.from_lnd_describegraph(actual_ch["node2_policy"]) + poles = Policy(**expected_ch["source_policy"]) + polet = Policy(**expected_ch["target_policy"]) + # Allow policy swap when comparing channels + if policy_equal(polas, poles, capacity) and policy_equal( + polat, polet, capacity + ): + continue + if policy_equal(polas, polet, capacity) and policy_equal( + polat, poles, capacity + ): + continue + done = False + break + if done: + self.log.info(f"LN {ln.name} graph channel policies all match expected source") + break + else: + sleep(1) + + expected = sorted(self.channels, key=lambda ch: (ch["id"]["block"], ch["id"]["index"])) + policy_threads = [ + threading.Thread(target=matching_graph, args=(self, expected, ln)) + for ln in self.lns.values() + ] + for thread in policy_threads: + thread.start() + + all(thread.join() is None for thread in policy_threads) + self.log.info("All LN nodes have matching graph!") def main(): diff --git a/resources/scenarios/test_scenarios/generate_one_allnodes.py b/resources/scenarios/test_scenarios/generate_one_allnodes.py new file mode 100644 index 000000000..30ea41445 --- /dev/null +++ b/resources/scenarios/test_scenarios/generate_one_allnodes.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 + +# The base class exists inside the commander container +try: + from commander import Commander +except Exception: + from resources.scenarios.commander import Commander + + +class GenOneAllNodes(Commander): + def set_test_params(self): + self.num_nodes = 1 + + def add_options(self, parser): + parser.description = ( + "Attempt to generate one block on every node the scenario has access to" + ) + parser.usage = "warnet run /path/to/generate_one_allnodes.py" + + def run_test(self): + for node in self.nodes: + wallet = self.ensure_miner(node) + addr = wallet.getnewaddress("bech32") + self.log.info(f"node: {node.tank}") + self.log.info(self.generatetoaddress(node, 1, addr)) + + +def main(): + GenOneAllNodes().main() + + +if __name__ == "__main__": + main() diff --git a/resources/scripts/ssl/cert-gen.sh b/resources/scripts/ssl/cert-gen.sh new file mode 100755 index 000000000..c1370f884 --- /dev/null +++ b/resources/scripts/ssl/cert-gen.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# Generate the private key using the P-256 curve +openssl ecparam -name prime256v1 -genkey -noout -out tls.key + +# Generate the self-signed certificate using the configuration file +# Expires in ten years, 2034 +openssl req -x509 -new -nodes -key tls.key -days 3650 -out tls.cert -config openssl-config.cnf diff --git a/resources/scripts/ssl/openssl-config.cnf b/resources/scripts/ssl/openssl-config.cnf new file mode 100644 index 000000000..db4e4a162 --- /dev/null +++ b/resources/scripts/ssl/openssl-config.cnf @@ -0,0 +1,28 @@ +[ req ] +distinguished_name = req_distinguished_name +req_extensions = req_ext +x509_extensions = v3_ca +prompt = no + +[ req_distinguished_name ] +O = lnd autogenerated cert +CN = warnet + +[ req_ext ] +keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign +extendedKeyUsage = serverAuth +basicConstraints = critical, CA:true +subjectKeyIdentifier = hash + +[ v3_ca ] +keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign +extendedKeyUsage = serverAuth +basicConstraints = critical, CA:true +subjectKeyIdentifier = hash +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = localhost +DNS.2 = * +IP.1 = 127.0.0.1 +IP.2 = ::1 diff --git a/resources/scripts/ssl/tls.cert b/resources/scripts/ssl/tls.cert new file mode 100644 index 000000000..6cf6e306a --- /dev/null +++ b/resources/scripts/ssl/tls.cert @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB8TCCAZagAwIBAgIUJDsR6mmY+TaO9pCfjtotlbOkzJMwCgYIKoZIzj0EAwIw +MjEfMB0GA1UECgwWbG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2Fy +bmV0MB4XDTI0MTExMTE2NTM1MFoXDTM0MTEwOTE2NTM1MFowMjEfMB0GA1UECgwW +bG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2FybmV0MFkwEwYHKoZI +zj0CAQYIKoZIzj0DAQcDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLP +tp0fxE7hmteSt6gjQriy90fP8j9OJXBNAjt915kLY4zVvqOBiTCBhjAOBgNVHQ8B +Af8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQU5d8QMrwhLgTkDjWA+eXZGz+dybUwLwYDVR0RBCgwJoIJbG9jYWxo +b3N0ggEqhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMAoGCCqGSM49BAMCA0kAMEYC +IQDPofN0fEl5gTwCYhk3nZbjMqJhZ8BsSJ6K8XRhxr7zbwIhAPsgQCFOqUWg632O +NEO53OQ6CIqnpxSskjsFNH4ZBQOE +-----END CERTIFICATE----- diff --git a/resources/scripts/ssl/tls.key b/resources/scripts/ssl/tls.key new file mode 100644 index 000000000..ca0118123 --- /dev/null +++ b/resources/scripts/ssl/tls.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIIcFtWTLQv5JaRRxdkPKkO98OrvgeztbZ7h8Ev/4UbE4oAoGCCqGSM49 +AwEHoUQDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLPtp0fxE7hmteS +t6gjQriy90fP8j9OJXBNAjt915kLY4zVvg== +-----END EC PRIVATE KEY----- diff --git a/src/warnet/constants.py b/src/warnet/constants.py index 46f33a3fe..017c9a749 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -1,4 +1,5 @@ import os +from enum import Enum from importlib.resources import files from pathlib import Path @@ -20,10 +21,34 @@ TANK_MISSION = "tank" COMMANDER_MISSION = "commander" +LIGHTNING_MISSION = "lightning" BITCOINCORE_CONTAINER = "bitcoincore" COMMANDER_CONTAINER = "commander" + +class HookValue(Enum): + PRE_DEPLOY = "preDeploy" + POST_DEPLOY = "postDeploy" + PRE_NODE = "preNode" + POST_NODE = "postNode" + PRE_NETWORK = "preNetwork" + POST_NETWORK = "postNetwork" + + +class WarnetContent(Enum): + HOOK_VALUE = "hook_value" + NAMESPACE = "namespace" + ANNEX = "annex" + + +class AnnexMember(Enum): + NODE_NAME = "node_name" + + +PLUGIN_ANNEX = "annex" + + # Directories and files for non-python assets, e.g., helm charts, example scenarios, default configs SRC_DIR = files("warnet") RESOURCES_DIR = files("resources") @@ -32,6 +57,7 @@ SCENARIOS_DIR = RESOURCES_DIR.joinpath("scenarios") CHARTS_DIR = RESOURCES_DIR.joinpath("charts") MANIFESTS_DIR = RESOURCES_DIR.joinpath("manifests") +PLUGINS_DIR = RESOURCES_DIR.joinpath("plugins") NETWORK_FILE = "network.yaml" DEFAULTS_FILE = "node-defaults.yaml" NAMESPACES_FILE = "namespaces.yaml" @@ -39,6 +65,7 @@ # Helm charts BITCOIN_CHART_LOCATION = str(CHARTS_DIR.joinpath("bitcoincore")) +LND_CHART_LOCATION = str(CHARTS_DIR.joinpath("lnd")) FORK_OBSERVER_CHART = str(CHARTS_DIR.joinpath("fork-observer")) COMMANDER_CHART = str(CHARTS_DIR.joinpath("commander")) NAMESPACES_CHART_LOCATION = CHARTS_DIR.joinpath("namespaces") @@ -96,6 +123,12 @@ }, } +LOGGING_CRD_COMMANDS = [ + "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", + "helm repo update", + "helm upgrade --install prometheus-operator-crds prometheus-community/prometheus-operator-crds", +] + # Helm commands for logging setup # TODO: also lots of hardcode stuff in these helm commands, will need to fix this when moving to helm charts LOGGING_HELM_COMMANDS = [ diff --git a/src/warnet/control.py b/src/warnet/control.py index 83d358a4e..b907f7449 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -6,6 +6,7 @@ import time import zipapp from concurrent.futures import ThreadPoolExecutor, as_completed +from multiprocessing import Pool from pathlib import Path from typing import Optional @@ -111,11 +112,37 @@ def stop_scenario(scenario_name): ) -def stop_all_scenarios(scenarios): - """Stop all active scenarios using Helm""" - with console.status("[bold yellow]Stopping all scenarios...[/bold yellow]"): - for scenario in scenarios: - stop_scenario(scenario) +def _stop_single(scenario: str) -> str: + """ + Stop a single scenario + + Args: + scenario: Name of the scenario to stop + + Returns: + str: Message indicating the scenario has been stopped + """ + stop_scenario(scenario) + return f"Stopped scenario: {scenario}" + + +def stop_all_scenarios(scenarios) -> None: + """ + Stop all active scenarios in parallel using multiprocessing + + Args: + scenarios: List of scenario names to stop + + Returns: + None + """ + + with console.status("[bold yellow]Stopping all scenarios...[/bold yellow]"), Pool() as pool: + results = pool.map(_stop_single, scenarios) + + for result in results: + console.print(f"[bold green]{result}[/bold green]") + console.print("[bold green]All scenarios have been stopped.[/bold green]") @@ -231,18 +258,31 @@ def get_active_network(namespace): "--source_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True), required=False ) @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) +@click.option("--admin", is_flag=True, default=False, show_default=False) @click.option("--namespace", default=None, show_default=True) def run( scenario_file: str, debug: bool, source_dir, additional_args: tuple[str], + admin: bool, namespace: Optional[str], ): """ Run a scenario from a file. Pass `-- --help` to get individual scenario help """ + return _run(scenario_file, debug, source_dir, additional_args, admin, namespace) + + +def _run( + scenario_file: str, + debug: bool, + source_dir, + additional_args: tuple[str], + admin: bool, + namespace: Optional[str], +) -> str: namespace = get_default_namespace_or(namespace) scenario_path = Path(scenario_file).resolve() @@ -252,24 +292,7 @@ def run( if additional_args and ("--help" in additional_args or "-h" in additional_args): return subprocess.run([sys.executable, scenario_path, "--help"]) - # Collect tank data for warnet.json name = f"commander-{scenario_name.replace('_', '')}-{int(time.time())}" - tankpods = get_mission("tank") - tanks = [ - { - "tank": tank.metadata.name, - "chain": tank.metadata.labels["chain"], - "rpc_host": tank.status.pod_ip, - "rpc_port": int(tank.metadata.labels["RPCPort"]), - "rpc_user": "user", - "rpc_password": tank.metadata.labels["rpcpassword"], - "init_peers": [], - } - for tank in tankpods - ] - - # Encode tank data for warnet.json - warnet_data = json.dumps(tanks).encode() # Create in-memory buffer to store python archive instead of writing to disk archive_buffer = io.BytesIO() @@ -280,7 +303,13 @@ def filter(path): return False if any( needle in str(path) - for needle in ["__init__.py", "commander.py", "test_framework", scenario_path.name] + for needle in [ + "__init__.py", + "commander.py", + "test_framework", + "ln_framework", + scenario_path.name, + ] ): print(f"Including: {path}") return True @@ -321,6 +350,8 @@ def filter(path): ] # Add additional arguments + if admin: + helm_command.extend(["--set", "admin=true"]) if additional_args: helm_command.extend(["--set", f"args={' '.join(additional_args)}"]) @@ -339,12 +370,11 @@ def filter(path): except subprocess.CalledProcessError as e: print(f"Failed to deploy scenario commander: {scenario_name}") print(f"Error: {e.stderr}") + return None # upload scenario files and network data to the init container wait_for_init(name, namespace=namespace) if write_file_to_container( - name, "init", "/shared/warnet.json", warnet_data, namespace=namespace - ) and write_file_to_container( name, "init", "/shared/archive.pyz", archive_data, namespace=namespace ): print(f"Successfully uploaded scenario data to commander: {scenario_name}") @@ -356,6 +386,8 @@ def filter(path): print("Deleting pod...") delete_pod(name, namespace=namespace) + return name + @click.command() @click.argument("pod_name", type=str, default="") diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index d9b5a45b5..ad4d9936a 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -1,6 +1,8 @@ +import json import subprocess import sys import tempfile +from multiprocessing import Process from pathlib import Path from typing import Optional @@ -15,22 +17,30 @@ FORK_OBSERVER_CHART, HELM_COMMAND, INGRESS_HELM_COMMANDS, + LOGGING_CRD_COMMANDS, LOGGING_HELM_COMMANDS, LOGGING_NAMESPACE, NAMESPACES_CHART_LOCATION, NAMESPACES_FILE, NETWORK_FILE, + PLUGIN_ANNEX, + SCENARIOS_DIR, WARGAMES_NAMESPACE_PREFIX, + AnnexMember, + HookValue, + WarnetContent, ) +from .control import _logs, _run from .k8s import ( get_default_namespace, get_default_namespace_or, get_mission, get_namespaces_by_type, wait_for_ingress_controller, + wait_for_pod, wait_for_pod_ready, ) -from .process import stream_command +from .process import run_command, stream_command HINT = "\nAre you trying to run a scenario? See `warnet run --help`" @@ -61,12 +71,7 @@ def deploy(directory, debug, namespace, to_all_users, unknown_args): if unknown_args: raise click.BadParameter(f"Unknown args: {unknown_args}{HINT}") - if to_all_users: - namespaces = get_namespaces_by_type(WARGAMES_NAMESPACE_PREFIX) - for namespace in namespaces: - _deploy(directory, debug, namespace.metadata.name, False) - else: - _deploy(directory, debug, namespace, to_all_users) + _deploy(directory, debug, namespace, to_all_users) def _deploy(directory, debug, namespace, to_all_users): @@ -75,17 +80,55 @@ def _deploy(directory, debug, namespace, to_all_users): if to_all_users: namespaces = get_namespaces_by_type(WARGAMES_NAMESPACE_PREFIX) + processes = [] for namespace in namespaces: - deploy(directory, debug, namespace.metadata.name, False) + p = Process(target=_deploy, args=(directory, debug, namespace.metadata.name, False)) + p.start() + processes.append(p) + for p in processes: + p.join() return if (directory / NETWORK_FILE).exists(): - dl = deploy_logging_stack(directory, debug) - deploy_network(directory, debug, namespace=namespace) - df = deploy_fork_observer(directory, debug) - if dl | df: - deploy_ingress(debug) - deploy_caddy(directory, debug) + run_plugins(directory, HookValue.PRE_DEPLOY, namespace) + + processes = [] + # Deploy logging CRD first to avoid synchronisation issues + deploy_logging_crd(directory, debug) + + logging_process = Process(target=deploy_logging_stack, args=(directory, debug)) + logging_process.start() + processes.append(logging_process) + + run_plugins(directory, HookValue.PRE_NETWORK, namespace) + + network_process = Process(target=deploy_network, args=(directory, debug, namespace)) + network_process.start() + + ingress_process = Process(target=deploy_ingress, args=(directory, debug)) + ingress_process.start() + processes.append(ingress_process) + + caddy_process = Process(target=deploy_caddy, args=(directory, debug)) + caddy_process.start() + processes.append(caddy_process) + + # Wait for the network process to complete + network_process.join() + + run_plugins(directory, HookValue.POST_NETWORK, namespace) + + # Start the fork observer process immediately after network process completes + fork_observer_process = Process(target=deploy_fork_observer, args=(directory, debug)) + fork_observer_process.start() + processes.append(fork_observer_process) + + # Wait for all other processes to complete + for p in processes: + p.join() + + run_plugins(directory, HookValue.POST_DEPLOY, namespace) + elif (directory / NAMESPACES_FILE).exists(): deploy_namespaces(directory) else: @@ -94,6 +137,63 @@ def _deploy(directory, debug, namespace, to_all_users): ) +def run_plugins(directory, hook_value: HookValue, namespace, annex: Optional[dict] = None): + """Run the plugin commands within a given hook value""" + + network_file_path = directory / NETWORK_FILE + + with network_file_path.open() as f: + network_file = yaml.safe_load(f) or {} + if not isinstance(network_file, dict): + raise ValueError(f"Invalid network file structure: {network_file_path}") + + processes = [] + + plugins_section = network_file.get("plugins", {}) + hook_section = plugins_section.get(hook_value.value, {}) + for plugin_name, plugin_content in hook_section.items(): + match (plugin_name, plugin_content): + case (str(), dict()): + try: + entrypoint_path = Path(plugin_content.get("entrypoint")) + except Exception as err: + raise SyntaxError("Each plugin must have an 'entrypoint'") from err + + warnet_content = { + WarnetContent.HOOK_VALUE.value: hook_value.value, + WarnetContent.NAMESPACE.value: namespace, + PLUGIN_ANNEX: annex, + } + + cmd = ( + f"{network_file_path.parent / entrypoint_path / Path('plugin.py')} entrypoint " + f"'{json.dumps(plugin_content)}' '{json.dumps(warnet_content)}'" + ) + print( + f"Queuing {hook_value.value} plugin command: {plugin_name} with {plugin_content}" + ) + + process = Process(target=run_command, args=(cmd,)) + processes.append(process) + + case _: + print( + f"The following plugin command does not match known plugin command structures: {plugin_name} {plugin_content}" + ) + sys.exit(1) + + if processes: + print(f"Starting {hook_value.value} plugins") + + for process in processes: + process.start() + + for process in processes: + process.join() + + print(f"Completed {hook_value.value} plugins") + + def check_logging_required(directory: Path): # check if node-defaults has logging or metrics enabled default_file_path = directory / DEFAULTS_FILE @@ -108,7 +208,8 @@ def check_logging_required(directory: Path): network_file_path = directory / NETWORK_FILE with network_file_path.open() as f: network_file = yaml.safe_load(f) - nodes = network_file.get("nodes", []) + + nodes = network_file.get("nodes") or [] for node in nodes: if node.get("collectLogs", False): return True @@ -118,11 +219,30 @@ def check_logging_required(directory: Path): return False +def deploy_logging_crd(directory: Path, debug: bool) -> bool: + """ + This function exists so we can parallelise the rest of the loggin stack + installation + """ + if not check_logging_required(directory): + return False + + click.echo( + "Found collectLogs or metricsExport in network definition, Deploying logging stack CRD" + ) + + for command in LOGGING_CRD_COMMANDS: + if not stream_command(command): + print(f"Failed to run Helm command: {command}") + return False + return True + + def deploy_logging_stack(directory: Path, debug: bool) -> bool: if not check_logging_required(directory): return False - click.echo("Found collectLogs or metricsExport in network definition, Deploying logging stack") + click.echo("Deploying logging stack") for command in LOGGING_HELM_COMMANDS: if not stream_command(command): @@ -144,7 +264,7 @@ def deploy_caddy(directory: Path, debug: bool): if not network_file.get(name, {}).get("enabled", False): return - cmd = f"{HELM_COMMAND} {name} {CADDY_CHART} --namespace {namespace}" + cmd = f"{HELM_COMMAND} {name} {CADDY_CHART} --namespace {namespace} --create-namespace" if debug: cmd += " --debug" @@ -156,7 +276,15 @@ def deploy_caddy(directory: Path, debug: bool): click.echo("\nTo access the warnet dashboard run:\n warnet dashboard") -def deploy_ingress(debug: bool): +def deploy_ingress(directory: Path, debug: bool): + # Deploy ingress if either logging or fork observer is enabled + network_file_path = directory / NETWORK_FILE + with network_file_path.open() as f: + network_file = yaml.safe_load(f) + fo_enabled = network_file.get("fork_observer", {}).get("enabled", False) + logging_enabled = check_logging_required(directory) + if not (fo_enabled or logging_enabled): + return click.echo("Deploying ingress controller") for command in INGRESS_HELM_COMMANDS: @@ -231,41 +359,79 @@ def deploy_fork_observer(directory: Path, debug: bool) -> bool: def deploy_network(directory: Path, debug: bool = False, namespace: Optional[str] = None): network_file_path = directory / NETWORK_FILE - defaults_file_path = directory / DEFAULTS_FILE - namespace = get_default_namespace_or(namespace) with network_file_path.open() as f: network_file = yaml.safe_load(f) + needs_ln_init = False for node in network_file["nodes"]: - click.echo(f"Deploying node: {node.get('name')}") - try: - temp_override_file_path = "" - node_name = node.get("name") - node_config_override = {k: v for k, v in node.items() if k != "name"} - - cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" - if debug: - cmd += " --debug" - - if node_config_override: - with tempfile.NamedTemporaryFile( - mode="w", suffix=".yaml", delete=False - ) as temp_file: - yaml.dump(node_config_override, temp_file) - temp_override_file_path = Path(temp_file.name) - cmd = f"{cmd} -f {temp_override_file_path}" - - if not stream_command(cmd): - click.echo(f"Failed to run Helm command: {cmd}") - return - except Exception as e: - click.echo(f"Error: {e}") + if "lnd" in node and "channels" in node["lnd"] and len(node["lnd"]["channels"]) > 0: + needs_ln_init = True + break + + processes = [] + for node in network_file["nodes"]: + p = Process(target=deploy_single_node, args=(node, directory, debug, namespace)) + p.start() + processes.append(p) + + for p in processes: + p.join() + + if needs_ln_init: + name = _run( + scenario_file=SCENARIOS_DIR / "ln_init.py", + debug=False, + source_dir=SCENARIOS_DIR, + additional_args=None, + admin=False, + namespace=namespace, + ) + wait_for_pod(name, namespace=namespace) + _logs(pod_name=name, follow=True, namespace=namespace) + + +def deploy_single_node(node, directory: Path, debug: bool, namespace: str): + defaults_file_path = directory / DEFAULTS_FILE + click.echo(f"Deploying node: {node.get('name')}") + temp_override_file_path = "" + try: + node_name = node.get("name") + node_config_override = {k: v for k, v in node.items() if k != "name"} + + defaults_file_path = directory / DEFAULTS_FILE + cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" + if debug: + cmd += " --debug" + + if node_config_override: + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: + yaml.dump(node_config_override, temp_file) + temp_override_file_path = Path(temp_file.name) + cmd = f"{cmd} -f {temp_override_file_path}" + + run_plugins( + directory, HookValue.PRE_NODE, namespace, annex={AnnexMember.NODE_NAME.value: node_name} + ) + + if not stream_command(cmd): + click.echo(f"Failed to run Helm command: {cmd}") return - finally: - if temp_override_file_path: - Path(temp_override_file_path).unlink() + + run_plugins( + directory, + HookValue.POST_NODE, + namespace, + annex={AnnexMember.NODE_NAME.value: node_name}, + ) + + except Exception as e: + click.echo(f"Error: {e}") + return + finally: + if temp_override_file_path: + Path(temp_override_file_path).unlink() def deploy_namespaces(directory: Path): @@ -284,32 +450,40 @@ def deploy_namespaces(directory: Path): ) return + processes = [] for namespace in namespaces_file["namespaces"]: - click.echo(f"Deploying namespace: {namespace.get('name')}") - try: - temp_override_file_path = "" - namespace_name = namespace.get("name") - namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} - - cmd = f"{HELM_COMMAND} {namespace_name} {NAMESPACES_CHART_LOCATION} -f {defaults_file_path}" - - if namespace_config_override: - with tempfile.NamedTemporaryFile( - mode="w", suffix=".yaml", delete=False - ) as temp_file: - yaml.dump(namespace_config_override, temp_file) - temp_override_file_path = Path(temp_file.name) - cmd = f"{cmd} -f {temp_override_file_path}" - - if not stream_command(cmd): - click.echo(f"Failed to run Helm command: {cmd}") - return - except Exception as e: - click.echo(f"Error: {e}") + p = Process(target=deploy_single_namespace, args=(namespace, defaults_file_path)) + p.start() + processes.append(p) + + for p in processes: + p.join() + + +def deploy_single_namespace(namespace, defaults_file_path: Path): + click.echo(f"Deploying namespace: {namespace.get('name')}") + temp_override_file_path = "" + try: + namespace_name = namespace.get("name") + namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} + + cmd = f"{HELM_COMMAND} {namespace_name} {NAMESPACES_CHART_LOCATION} -f {defaults_file_path}" + + if namespace_config_override: + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: + yaml.dump(namespace_config_override, temp_file) + temp_override_file_path = Path(temp_file.name) + cmd = f"{cmd} -f {temp_override_file_path}" + + if not stream_command(cmd): + click.echo(f"Failed to run Helm command: {cmd}") return - finally: - if temp_override_file_path: - temp_override_file_path.unlink() + except Exception as e: + click.echo(f"Error: {e}") + return + finally: + if temp_override_file_path: + Path(temp_override_file_path).unlink() def is_windows(): diff --git a/src/warnet/graph.py b/src/warnet/graph.py index 390686486..d06387710 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -1,3 +1,4 @@ +import json import os import random import sys @@ -8,6 +9,8 @@ import inquirer import yaml +from resources.scenarios.ln_framework.ln import Policy + from .constants import DEFAULT_TAG, SUPPORTED_TAGS @@ -226,3 +229,73 @@ def create(): fg="yellow", ) return False + + +@click.command() +@click.argument("graph_file_path", type=click.Path(exists=True, file_okay=True, dir_okay=False)) +@click.argument("output_path", type=click.Path(exists=False, file_okay=False, dir_okay=True)) +def import_network(graph_file_path: str, output_path: str): + """Create a network from an imported lightning network graph JSON""" + print(_import_network(graph_file_path, output_path)) + + +def _import_network(graph_file_path, output_path): + output_path = Path(output_path) + graph_file_path = Path(graph_file_path).resolve() + with open(graph_file_path) as graph_file: + graph = json.loads(graph_file.read()) + + tanks = {} + pk_to_tank = {} + tank_to_pk = {} + index = 0 + for node in graph["nodes"]: + tank = f"tank-{index:04d}" + pk_to_tank[node["pub_key"]] = tank + tank_to_pk[tank] = node["pub_key"] + tanks[tank] = {"name": tank, "ln": {"lnd": True}, "lnd": {"channels": []}} + index += 1 + print(f"Imported {index} nodes") + + sorted_edges = sorted(graph["edges"], key=lambda x: int(x["channel_id"])) + + # By default we start including channel open txs in block 300 + block = 300 + # Coinbase occupies the 0 position! + index = 1 + count = 0 + for edge in sorted_edges: + source = pk_to_tank[edge["node1_pub"]] + channel = { + "id": {"block": block, "index": index}, + "target": pk_to_tank[edge["node2_pub"]] + "-ln", + "capacity": int(edge["capacity"]), + "push_amt": int(edge["capacity"]) // 2, + "source_policy": Policy.from_lnd_describegraph(edge["node1_policy"]).to_dict(), + "target_policy": Policy.from_lnd_describegraph(edge["node2_policy"]).to_dict(), + } + tanks[source]["lnd"]["channels"].append(channel) + index += 1 + if index > 1000: + index = 1 + block += 1 + count += 1 + + print(f"Imported {count} channels") + + network = {"nodes": []} + prev_node_name = list(tanks.keys())[-1] + for name, obj in tanks.items(): + obj["name"] = name + obj["addnode"] = [prev_node_name] + prev_node_name = name + network["nodes"].append(obj) + + output_path.mkdir(parents=True, exist_ok=True) + # This file must exist and must contain at least one line of valid yaml + with open(output_path / "node-defaults.yaml", "w") as f: + f.write(f"imported_from: {graph_file_path}\n") + # Here's the good stuff + with open(output_path / "network.yaml", "w") as f: + f.write(yaml.dump(network, sort_keys=False)) + return f"Network created in {output_path.resolve()}" diff --git a/src/warnet/k8s.py b/src/warnet/k8s.py index 9354eb903..c12e4de1b 100644 --- a/src/warnet/k8s.py +++ b/src/warnet/k8s.py @@ -1,6 +1,7 @@ import json import os import sys +import tarfile import tempfile from pathlib import Path from time import sleep @@ -83,11 +84,19 @@ def get_pod_exit_status(pod_name, namespace: Optional[str] = None): return None -def get_edges(namespace: Optional[str] = None) -> any: +def get_channels(namespace: Optional[str] = None) -> any: namespace = get_default_namespace_or(namespace) sclient = get_static_client() - configmap = sclient.read_namespaced_config_map(name="edges", namespace=namespace) - return json.loads(configmap.data["data"]) + config_maps = sclient.list_namespaced_config_map( + namespace=namespace, label_selector="channels=true" + ) + channels = [] + for cm in config_maps.items: + channel_jsons = json.loads(cm.data["channels"]) + for channel_json in channel_jsons: + channel_json["source"] = cm.data["source"] + channels.append(channel_json) + return channels def create_kubernetes_object( @@ -294,7 +303,7 @@ def wait_for_pod_ready(name, namespace, timeout=300): return False -def wait_for_init(pod_name, timeout=300, namespace: Optional[str] = None): +def wait_for_init(pod_name, timeout=300, namespace: Optional[str] = None, quiet: bool = False): namespace = get_default_namespace_or(namespace) sclient = get_static_client() w = watch.Watch() @@ -307,10 +316,12 @@ def wait_for_init(pod_name, timeout=300, namespace: Optional[str] = None): continue for init_container_status in pod.status.init_container_statuses: if init_container_status.state.running: - print(f"initContainer in pod {pod_name} ({namespace}) is ready") + if not quiet: + print(f"initContainer in pod {pod_name} ({namespace}) is ready") w.stop() return True - print(f"Timeout waiting for initContainer in {pod_name} ({namespace})to be ready.") + if not quiet: + print(f"Timeout waiting for initContainer in {pod_name} ({namespace}) to be ready.") return False @@ -364,7 +375,7 @@ def wait_for_pod(pod_name, timeout_seconds=10, namespace: Optional[str] = None): def write_file_to_container( - pod_name, container_name, dst_path, data, namespace: Optional[str] = None + pod_name, container_name, dst_path, data, namespace: Optional[str] = None, quiet: bool = False ): namespace = get_default_namespace_or(namespace) sclient = get_static_client() @@ -396,7 +407,8 @@ def write_file_to_container( stdout=True, tty=False, ) - print(f"Successfully copied data to {pod_name}({container_name}):{dst_path}") + if not quiet: + print(f"Successfully copied data to {pod_name}({container_name}):{dst_path}") return True except Exception as e: print(f"Failed to copy data to {pod_name}({container_name}):{dst_path}:\n{e}") @@ -537,3 +549,50 @@ def write_kubeconfig(kube_config: dict, kubeconfig_path: str) -> None: except Exception as e: os.remove(temp_file.name) raise K8sError(f"Error writing kubeconfig: {kubeconfig_path}") from e + + +def download( + pod_name: str, + source_path: Path, + destination_path: Path = Path("."), + namespace: Optional[str] = None, +) -> Path: + """Download the item from the `source_path` to the `destination_path`""" + + namespace = get_default_namespace_or(namespace) + + v1 = get_static_client() + + target_folder = destination_path / source_path.stem + os.makedirs(target_folder, exist_ok=True) + + command = ["tar", "cf", "-", "-C", str(source_path.parent), str(source_path.name)] + + resp = stream( + v1.connect_get_namespaced_pod_exec, + name=pod_name, + namespace=namespace, + command=command, + stderr=True, + stdin=False, + stdout=True, + tty=False, + _preload_content=False, + ) + + tar_file = target_folder.with_suffix(".tar") + with open(tar_file, "wb") as f: + while resp.is_open(): + resp.update(timeout=1) + if resp.peek_stdout(): + f.write(resp.read_stdout().encode("utf-8")) + if resp.peek_stderr(): + print(resp.read_stderr()) + resp.close() + + with tarfile.open(tar_file, "r") as tar: + tar.extractall(path=destination_path) + + os.remove(tar_file) + + return destination_path diff --git a/src/warnet/ln.py b/src/warnet/ln.py index ade55759e..a1f7c1eb2 100644 --- a/src/warnet/ln.py +++ b/src/warnet/ln.py @@ -1,6 +1,13 @@ +import json +from typing import Optional + import click -from .rpc import rpc_call +from .k8s import ( + get_default_namespace_or, + get_pod, +) +from .process import run_command @click.group(name="ln") @@ -9,31 +16,56 @@ def ln(): @ln.command(context_settings={"ignore_unknown_options": True}) -@click.argument("node", type=int) -@click.argument("command", type=str, required=True, nargs=-1) -@click.option("--network", default="warnet", show_default=True, type=str) -def rpc(node: int, command: tuple, network: str): +@click.argument("pod", type=str) +@click.argument("method", type=str) +@click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments +@click.option("--namespace", default=None, show_default=True) +def rpc(pod: str, method: str, params: str, namespace: Optional[str]): """ - Call lightning cli rpc on in [network] + Call lightning cli rpc on """ - print( - rpc_call( - "tank_lncli", - {"network": network, "node": node, "command": command}, - ) - ) + print(_rpc(pod, method, params, namespace)) -@ln.command(context_settings={"ignore_unknown_options": True}) -@click.argument("node", type=int) -@click.option("--network", default="warnet", show_default=True, type=str) -def pubkey(node: int, network: str): +def _rpc(pod_name: str, method: str, params: str = "", namespace: Optional[str] = None): + pod = get_pod(pod_name) + namespace = get_default_namespace_or(namespace) + chain = pod.metadata.labels["chain"] + cmd = f"kubectl -n {namespace} exec {pod_name} -- lncli --network {chain} {method} {' '.join(map(str, params))}" + return run_command(cmd) + + +@ln.command() +@click.argument("pod", type=str) +def pubkey( + pod: str, +): + """ + Get lightning node pub key from + """ + print(_pubkey(pod)) + + +def _pubkey(pod: str): + info = _rpc(pod, "getinfo") + return json.loads(info)["identity_pubkey"] + + +@ln.command() +@click.argument("pod", type=str) +def host( + pod: str, +): """ - Get lightning node pub key on in [network] + Get lightning node host from """ - print( - rpc_call( - "tank_ln_pub_key", - {"network": network, "node": node}, - ) - ) + print(_host(pod)) + + +def _host(pod): + info = _rpc(pod, "getinfo") + uris = json.loads(info)["uris"] + if uris and len(uris) >= 0: + return uris[0].split("@")[1] + else: + return "" diff --git a/src/warnet/main.py b/src/warnet/main.py index 76893575c..768a82f96 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -5,8 +5,9 @@ from .control import down, logs, run, snapshot, stop from .dashboard import dashboard from .deploy import deploy -from .graph import create, graph +from .graph import create, graph, import_network from .image import image +from .ln import ln from .project import init, new, setup from .status import status from .users import auth @@ -17,6 +18,49 @@ def cli(): pass +@click.command() +def version() -> None: + """Display the installed version of warnet""" + try: + from warnet._version import __version__ + + # For PyPI releases, this will be the exact tag (e.g. "1.1.11") + # For dev installs, it will be something like "1.1.11.post1.dev17+g27af3a7.d20250309" + # Which is .post.dev+g.d + # is the number of local commits since the checkout commit + # is the number of commits since the last tag + raw_version = __version__ + + # Format the version string to our desired format + if "+" in raw_version: + version_part, git_date_part = raw_version.split("+", 1) + + # Get just the git commit hash + commit_hash = ( + git_date_part[1:].split(".", 1)[0] + if git_date_part.startswith("g") + else git_date_part.split(".", 1)[0] + ) + + # Remove .dev component (from "no-guess-dev" scheme) + clean_version = version_part + if ".dev" in clean_version: + clean_version = clean_version.split(".dev")[0] + + # Apply dirty status (from "no-guess-dev" scheme) + if ".post" in clean_version: + base = clean_version.split(".post")[0] + version_str = f"{base}-{commit_hash}-dirty" + else: + version_str = f"{clean_version}-{commit_hash}" + else: + version_str = raw_version + + click.echo(f"warnet version {version_str}") + except ImportError: + click.echo("warnet version unknown") + + cli.add_command(admin) cli.add_command(auth) cli.add_command(bitcoin) @@ -24,9 +68,11 @@ def cli(): cli.add_command(down) cli.add_command(dashboard) cli.add_command(graph) +cli.add_command(import_network) cli.add_command(image) cli.add_command(init) cli.add_command(logs) +cli.add_command(ln) cli.add_command(new) cli.add_command(run) cli.add_command(setup) @@ -34,7 +80,7 @@ def cli(): cli.add_command(status) cli.add_command(stop) cli.add_command(create) - +cli.add_command(version) if __name__ == "__main__": cli() diff --git a/src/warnet/network.py b/src/warnet/network.py index a894cafc9..e6658ae8c 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -7,6 +7,7 @@ from .bitcoin import _rpc from .constants import ( NETWORK_DIR, + PLUGINS_DIR, SCENARIOS_DIR, ) from .k8s import get_mission @@ -48,6 +49,16 @@ def copy_scenario_defaults(directory: Path): ) +def copy_plugins_defaults(directory: Path): + """Create the project structure for a warnet project's scenarios""" + copy_defaults( + directory, + PLUGINS_DIR.name, + PLUGINS_DIR, + ["__pycache__", "__init__"], + ) + + def is_connection_manual(peer): # newer nodes specify a "connection_type" return bool(peer.get("connection_type") == "manual" or peer.get("addnode") is True) diff --git a/src/warnet/project.py b/src/warnet/project.py index 67b063fcd..badc5aa83 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -26,7 +26,7 @@ KUBECTL_DOWNLOAD_URL_STUB, ) from .graph import inquirer_create_network -from .network import copy_network_defaults, copy_scenario_defaults +from .network import copy_network_defaults, copy_plugins_defaults, copy_scenario_defaults @click.command() @@ -387,6 +387,7 @@ def create_warnet_project(directory: Path, check_empty: bool = False): try: copy_network_defaults(directory) copy_scenario_defaults(directory) + copy_plugins_defaults(directory) click.echo(f"Copied network example files to {directory}/networks") click.echo(f"Created warnet project structure in {directory}") except Exception as e: @@ -430,6 +431,12 @@ def new_internal(directory: Path, from_init=False): if proj_answers["custom_network"]: click.secho("\nGenerating custom network...", fg="yellow", bold=True) custom_network_path = inquirer_create_network(directory) + else: + click.echo( + f"No custom network specified, see example network files in {project_path}/networks/" + ) + click.echo("Deploy any of these networks by running:") + click.echo(f" warnet deploy {project_path}/networks/") if custom_network_path: click.echo( diff --git a/src/warnet/status.py b/src/warnet/status.py index df62ed2df..c94f014cc 100644 --- a/src/warnet/status.py +++ b/src/warnet/status.py @@ -8,6 +8,7 @@ from rich.text import Text from urllib3.exceptions import MaxRetryError +from .constants import COMMANDER_MISSION, TANK_MISSION from .k8s import get_mission from .network import _connected @@ -86,7 +87,7 @@ def status(): def _get_tank_status(): - tanks = get_mission("tank") + tanks = get_mission(TANK_MISSION) return [ { "name": tank.metadata.name, @@ -98,7 +99,7 @@ def _get_tank_status(): def _get_deployed_scenarios(): - commanders = get_mission("commander") + commanders = get_mission(COMMANDER_MISSION) return [ { "name": c.metadata.name, diff --git a/test/data/ln.graphml b/test/data/ln.graphml deleted file mode 100644 index efd0c359f..000000000 --- a/test/data/ln.graphml +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - - - - - - - - - - simln - - 27.0 - lnd - lightninglabs/lnd:v0.17.5-beta - true - - - 27.0 - lnd - pinheadmz/circuitbreaker:278737d - true - - - 27.0 - lnd - pinheadmz/circuitbreaker:278737d - --bitcoin.timelockdelta=33 - - - 27.0 - cln - --cltv-delta=33 - - - 27.0 - - - - - - - - - - --local_amt=100000 - --base_fee_msat=2200 --fee_rate_ppm=13 --time_lock_delta=20 - - - --local_amt=100000 --push_amt=50000 - --base_fee_msat=5500 --fee_rate_ppm=3 --time_lock_delta=40 - - - amount=100000 push_msat=50000000 - feebase=5500 feeppm=3 - - - \ No newline at end of file diff --git a/test/data/ln/network.yaml b/test/data/ln/network.yaml new file mode 100644 index 000000000..792861da2 --- /dev/null +++ b/test/data/ln/network.yaml @@ -0,0 +1,54 @@ +nodes: + - name: tank-0000 + addnode: + - tank-0001 + ln: + lnd: true + + - name: tank-0001 + addnode: + - tank-0002 + ln: + lnd: true + + - name: tank-0002 + addnode: + - tank-0000 + ln: + lnd: true + + - name: tank-0003 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + config: | + bitcoin.timelockdelta=33 + channels: + - id: + block: 300 + index: 1 + target: tank-0004-ln + capacity: 100000 + push_amt: 50000 + + - name: tank-0004 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + channels: + - id: + block: 300 + index: 2 + target: tank-0005-ln + capacity: 50000 + push_amt: 25000 + + - name: tank-0005 + addnode: + - tank-0000 + ln: + lnd: true diff --git a/test/data/ln/node-defaults.yaml b/test/data/ln/node-defaults.yaml new file mode 100644 index 000000000..884ad1343 --- /dev/null +++ b/test/data/ln/node-defaults.yaml @@ -0,0 +1,8 @@ +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: "27.0" + +lnd: + defaultConfig: | + color=#000000 \ No newline at end of file diff --git a/test/data/signet/node-defaults.yaml b/test/data/signet/node-defaults.yaml index aea980d6a..941f03881 100644 --- a/test/data/signet/node-defaults.yaml +++ b/test/data/signet/node-defaults.yaml @@ -3,7 +3,8 @@ image: pullPolicy: Always tag: "27.0" -chain: signet +global: + chain: signet spec: restartPolicy: Never diff --git a/test/data/wargames/namespaces/armies/namespace-defaults.yaml b/test/data/wargames/namespaces/armies/namespace-defaults.yaml new file mode 100644 index 000000000..b33b260b1 --- /dev/null +++ b/test/data/wargames/namespaces/armies/namespace-defaults.yaml @@ -0,0 +1,5 @@ +users: + - name: warnet-user + roles: + - pod-viewer + - pod-manager diff --git a/test/data/wargames/namespaces/armies/namespaces.yaml b/test/data/wargames/namespaces/armies/namespaces.yaml new file mode 100644 index 000000000..86cde68af --- /dev/null +++ b/test/data/wargames/namespaces/armies/namespaces.yaml @@ -0,0 +1,2 @@ +namespaces: +- name: wargames-red \ No newline at end of file diff --git a/test/data/wargames/networks/armada/network.yaml b/test/data/wargames/networks/armada/network.yaml new file mode 100644 index 000000000..9cb614810 --- /dev/null +++ b/test/data/wargames/networks/armada/network.yaml @@ -0,0 +1,6 @@ +nodes: +- name: armada + image: + tag: '27.0' + addnode: + - miner.default \ No newline at end of file diff --git a/test/data/wargames/networks/armada/node-defaults.yaml b/test/data/wargames/networks/armada/node-defaults.yaml new file mode 100644 index 000000000..bb219cf19 --- /dev/null +++ b/test/data/wargames/networks/armada/node-defaults.yaml @@ -0,0 +1,6 @@ +global: + chain: regtest +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: '27.0' \ No newline at end of file diff --git a/test/data/wargames/networks/battlefield/network.yaml b/test/data/wargames/networks/battlefield/network.yaml new file mode 100644 index 000000000..5cf96504f --- /dev/null +++ b/test/data/wargames/networks/battlefield/network.yaml @@ -0,0 +1,10 @@ +nodes: +- name: miner + image: + tag: '27.0' +- name: target-red + addnode: + - miner + image: + tag: '27.0' + \ No newline at end of file diff --git a/test/data/wargames/networks/battlefield/node-defaults.yaml b/test/data/wargames/networks/battlefield/node-defaults.yaml new file mode 100644 index 000000000..7399f5c34 --- /dev/null +++ b/test/data/wargames/networks/battlefield/node-defaults.yaml @@ -0,0 +1,6 @@ +global: + chain: regtest +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: '27.0' diff --git a/test/ln_basic_test.py b/test/ln_basic_test.py new file mode 100755 index 000000000..fdb479dbd --- /dev/null +++ b/test/ln_basic_test.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 + +import json +import os +from pathlib import Path +from time import sleep + +from test_base import TestBase + +from warnet.process import stream_command + + +class LNBasicTest(TestBase): + def __init__(self): + super().__init__() + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "ln" + self.scen_dir = Path(os.path.dirname(__file__)).parent / "resources" / "scenarios" + self.lns = [ + "tank-0000-ln", + "tank-0001-ln", + "tank-0002-ln", + "tank-0003-ln", + "tank-0004-ln", + "tank-0005-ln", + ] + + def run_test(self): + try: + # Wait for all nodes to wake up. ln_init will start automatically + self.setup_network() + + # Send a payment across channels opened automatically by ln_init + self.pay_invoice(sender="tank-0005-ln", recipient="tank-0003-ln") + + # Manually open two more channels between first three nodes + # and send a payment using warnet RPC + self.manual_open_channels() + self.wait_for_gossip_sync(self.lns[:3], 2 + 2) + self.pay_invoice(sender="tank-0000-ln", recipient="tank-0002-ln") + + finally: + self.cleanup() + + def setup_network(self): + self.log.info("Setting up network") + stream_command(f"warnet deploy {self.network_dir}") + + def fund_wallets(self): + outputs = "" + for lnd in self.lns: + addr = json.loads(self.warnet(f"ln rpc {lnd} newaddress p2wkh"))["address"] + outputs += f',"{addr}":10' + # trim first comma + outputs = outputs[1:] + + self.warnet("bitcoin rpc tank-0000 sendmany '' '{" + outputs + "}'") + self.warnet("bitcoin rpc tank-0000 -generate 1") + + def wait_for_two_txs(self): + self.wait_for_predicate( + lambda: json.loads(self.warnet("bitcoin rpc tank-0000 getmempoolinfo"))["size"] == 2 + ) + + def manual_open_channels(self): + # 0 -> 1 -> 2 + pk1 = self.warnet("ln pubkey tank-0001-ln") + pk2 = self.warnet("ln pubkey tank-0002-ln") + + host1 = "" + host2 = "" + + while not host1 or not host2: + if not host1: + host1 = self.warnet("ln host tank-0001-ln") + if not host2: + host2 = self.warnet("ln host tank-0002-ln") + sleep(1) + + print( + self.warnet( + f"ln rpc tank-0000-ln openchannel --node_key {pk1} --local_amt 100000 --connect {host1}" + ) + ) + print( + self.warnet( + f"ln rpc tank-0001-ln openchannel --node_key {pk2} --local_amt 100000 --connect {host2}" + ) + ) + + self.wait_for_two_txs() + + self.warnet("bitcoin rpc tank-0000 -generate 10") + + def wait_for_gossip_sync(self, nodes, expected): + while len(nodes) > 0: + for node in nodes: + chs = json.loads(self.warnet(f"ln rpc {node} describegraph"))["edges"] + if len(chs) >= expected: + nodes.remove(node) + sleep(1) + + def pay_invoice(self, sender: str, recipient: str): + init_balance = int(json.loads(self.warnet(f"ln rpc {recipient} channelbalance"))["balance"]) + inv = json.loads(self.warnet(f"ln rpc {recipient} addinvoice --amt 1000")) + print(inv) + print(self.warnet(f"ln rpc {sender} payinvoice -f {inv['payment_request']}")) + + def wait_for_success(): + return ( + int(json.loads(self.warnet(f"ln rpc {recipient} channelbalance"))["balance"]) + == init_balance + 1000 + ) + + self.wait_for_predicate(wait_for_success) + + def scenario_open_channels(self): + # 2 -> 3 + # connecting all six ln nodes in the graph + scenario_file = self.scen_dir / "test_scenarios" / "ln_init.py" + self.log.info(f"Running scenario from: {scenario_file}") + self.warnet(f"run {scenario_file} --source_dir={self.scen_dir} --debug") + + +if __name__ == "__main__": + test = LNBasicTest() + test.run_test() diff --git a/test/ln_test.py b/test/ln_test.py index 576846b6b..ee27b6256 100755 --- a/test/ln_test.py +++ b/test/ln_test.py @@ -1,126 +1,122 @@ #!/usr/bin/env python3 - +import ast import json import os from pathlib import Path +from typing import Optional from test_base import TestBase -from warnet.services import ServiceType +from warnet.k8s import wait_for_pod +from warnet.process import run_command, stream_command class LNTest(TestBase): def __init__(self): super().__init__() - self.graph_file_path = Path(os.path.dirname(__file__)) / "data" / "ln.graphml" + self.graph_file = Path(os.path.dirname(__file__)) / "data" / "LN_10.json" + self.imported_network_dir = self.tmpdir / "imported_network" + self.scen_dir = Path(os.path.dirname(__file__)).parent / "resources" / "scenarios" + self.plugins_dir = Path(os.path.dirname(__file__)).parent / "resources" / "plugins" + self.simln_exec = Path("simln/plugin.py") def run_test(self): - self.start_server() try: + self.import_network() self.setup_network() - self.run_ln_init_scenario() self.test_channel_policies() - self.test_ln_payment_0_to_2() - self.test_ln_payment_2_to_0() - self.test_simln() + self.test_payments() + self.run_simln() finally: self.cleanup() + def import_network(self): + self.log.info("Importing network graph from JSON...") + res = self.warnet(f"import-network {self.graph_file} {self.imported_network_dir}") + self.log.info(f"\n{res}") + def setup_network(self): - self.log.info("Setting up network") - self.log.info(self.warnet(f"network start {self.graph_file_path}")) - self.wait_for_all_tanks_status(target="running") - self.wait_for_all_edges() - - def get_cb_forwards(self, index): - cmd = "wget -q -O - 127.0.0.1:9235/api/forwarding_history" - res = self.wait_for_rpc( - "exec_run", [index, ServiceType.CIRCUITBREAKER.value, cmd, self.network_name] - ) - return json.loads(res) - - def run_ln_init_scenario(self): - self.log.info("Running LN Init scenario") - self.warnet("bitcoin rpc 0 getblockcount") - self.warnet("scenarios run ln_init") - self.wait_for_all_scenarios() - scenario_return_code = self.get_scenario_return_code("ln_init") - if scenario_return_code != 0: - raise Exception("LN Init scenario failed") + self.log.info("Setting up network...") + stream_command(f"warnet deploy {self.imported_network_dir}") def test_channel_policies(self): self.log.info("Ensuring node-level channel policy settings") - node2pub, node2host = json.loads(self.warnet("ln rpc 2 getinfo"))["uris"][0].split("@") - chan_id = json.loads(self.warnet("ln rpc 2 listchannels"))["channels"][0]["chan_id"] - chan = json.loads(self.warnet(f"ln rpc 2 getchaninfo {chan_id}")) - - # node_1 or node_2 is tank 2 with its non-default --bitcoin.timelockdelta=33 - if chan["node1_policy"]["time_lock_delta"] != 33: - assert ( - chan["node2_policy"]["time_lock_delta"] == 33 - ), "Expected time_lock_delta to be 33" - - self.log.info("Ensuring no circuit breaker forwards yet") - assert len(self.get_cb_forwards(1)["forwards"]) == 0, "Expected no circuit breaker forwards" - - def test_ln_payment_0_to_2(self): - self.log.info("Test LN payment from 0 -> 2") - inv = json.loads(self.warnet("ln rpc 2 addinvoice --amt=2000"))["payment_request"] - self.log.info(f"Got invoice from node 2: {inv}") - self.log.info("Paying invoice from node 0...") - self.log.info(self.warnet(f"ln rpc 0 payinvoice -f {inv}")) - - self.wait_for_predicate(self.check_invoice_settled) - - self.log.info("Ensuring channel-level channel policy settings: source") - payment = json.loads(self.warnet("ln rpc 0 listpayments"))["payments"][0] - assert ( - payment["fee_msat"] == "5506" - ), f"Expected fee_msat to be 5506, got {payment['fee_msat']}" - - self.log.info("Ensuring circuit breaker tracked payment") - assert len(self.get_cb_forwards(1)["forwards"]) == 1, "Expected one circuit breaker forward" - - def test_ln_payment_2_to_0(self): - self.log.info("Test LN payment from 2 -> 0") - inv = json.loads(self.warnet("ln rpc 0 addinvoice --amt=1000"))["payment_request"] - self.log.info(f"Got invoice from node 0: {inv}") - self.log.info("Paying invoice from node 2...") - self.log.info(self.warnet(f"ln rpc 2 payinvoice -f {inv}")) - - self.wait_for_predicate(lambda: self.check_invoices(0) == 1) - - self.log.info("Ensuring channel-level channel policy settings: target") - payment = json.loads(self.warnet("ln rpc 2 listpayments"))["payments"][0] - assert ( - payment["fee_msat"] == "2213" - ), f"Expected fee_msat to be 2213, got {payment['fee_msat']}" - - def test_simln(self): - self.log.info("Engaging simln") - node2pub, _ = json.loads(self.warnet("ln rpc 2 getinfo"))["uris"][0].split("@") - activity = [ - {"source": "ln-0", "destination": node2pub, "interval_secs": 1, "amount_msat": 2000} - ] - self.warnet( - f"network export --exclude=[1] --activity={json.dumps(activity).replace(' ', '')}" - ) - self.wait_for_predicate(lambda: self.check_invoices(2) > 1) - assert self.check_invoices(0) == 1, "Expected one invoice for node 0" - assert self.check_invoices(1) == 0, "Expected no invoices for node 1" - - def check_invoice_settled(self): - invs = json.loads(self.warnet("ln rpc 2 listinvoices"))["invoices"] - if len(invs) > 0 and invs[0]["state"] == "SETTLED": - self.log.info("Invoice settled") - return True - return False - - def check_invoices(self, index): - invs = json.loads(self.warnet(f"ln rpc {index} listinvoices"))["invoices"] - settled = sum(1 for inv in invs if inv["state"] == "SETTLED") - self.log.debug(f"Node {index} has {settled} settled invoices") - return settled + graphs = [] + for n in range(10): + ln = f"tank-{n:04d}-ln" + res = self.warnet(f"ln rpc {ln} describegraph") + graphs.append(json.loads(res)["edges"]) + + def check_policy(node: int, index: int, field: str, values: tuple): + self.log.info(f"Checking policy: Node={node} ch={index} Expected={field}:{values}") + graph = graphs[node] + assert len(graph) == 13 + ch = graph[index] + a = int(ch["node1_policy"][field]) + b = int(ch["node2_policy"][field]) + assert values == (a, b) or values == ( + b, + a, + ), f"policy check failed:\nActual:\n{ch}\nExpected:\n{field}:{values}" + + # test one property of one channel from each node + check_policy(0, 0, "fee_base_msat", (250, 1000)) + check_policy(1, 1, "time_lock_delta", (40, 100)) + check_policy(2, 2, "fee_rate_milli_msat", (1, 4000)) + check_policy(3, 3, "fee_rate_milli_msat", (499, 4000)) + check_policy(4, 4, "time_lock_delta", (40, 144)) + check_policy(5, 5, "max_htlc_msat", (1980000000, 1500000000)) + check_policy(6, 6, "fee_rate_milli_msat", (550, 71)) + check_policy(7, 7, "min_htlc", (1000, 1)) + check_policy(8, 8, "time_lock_delta", (80, 144)) + check_policy(9, 9, "fee_base_msat", (616, 1000)) + + def test_payments(self): + def get_and_pay(src, tgt): + src = f"tank-{src:04d}-ln" + tgt = f"tank-{tgt:04d}-ln" + invoice = json.loads(self.warnet(f"ln rpc {tgt} addinvoice --amt 230118"))[ + "payment_request" + ] + print(self.warnet(f"ln rpc {src} payinvoice {invoice} --force")) + + get_and_pay(0, 5) + get_and_pay(2, 3) + get_and_pay(1, 9) + get_and_pay(8, 7) + get_and_pay(4, 6) + + def run_simln(self): + self.log.info("Running SimLN...") + activity_cmd = f"{self.plugins_dir}/{self.simln_exec} get-example-activity" + activity = run_command(activity_cmd) + launch_cmd = f"{self.plugins_dir}/{self.simln_exec} launch-activity '{activity}'" + pod = run_command(launch_cmd).strip() + wait_for_pod(pod) + self.log.info("Checking SimLN...") + self.wait_for_predicate(self.found_results_remotely) + self.log.info("SimLN was successful.") + + def found_results_remotely(self, pod: Optional[str] = None) -> bool: + if pod is None: + pod = self.get_first_simln_pod() + self.log.info(f"Checking for results file in {pod}") + results_file = run_command( + f"{self.plugins_dir}/{self.simln_exec} sh {pod} ls /working/results" + ).strip() + self.log.info(f"Results file: {results_file}") + results = run_command( + f"{self.plugins_dir}/{self.simln_exec} sh {pod} cat /working/results/{results_file}" + ).strip() + self.log.info(results) + return results.find("Success") > 0 + + def get_first_simln_pod(self): + command = f"{self.plugins_dir}/{self.simln_exec} list-pod-names" + pod_names_literal = run_command(command) + self.log.info(f"{command}: {pod_names_literal}") + pod_names = ast.literal_eval(pod_names_literal) + return pod_names[0] if __name__ == "__main__": diff --git a/test/simln_test.py b/test/simln_test.py new file mode 100755 index 000000000..ac309faf4 --- /dev/null +++ b/test/simln_test.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +import ast +import os +from functools import partial +from pathlib import Path +from typing import Optional + +import pexpect +from test_base import TestBase + +from warnet.k8s import download, wait_for_pod +from warnet.process import run_command + + +class SimLNTest(TestBase): + def __init__(self): + super().__init__() + self.network_dir = ( + Path(os.path.dirname(__file__)).parent / "resources" / "networks" / "hello" + ) + self.plugins_dir = Path(os.path.dirname(__file__)).parent / "resources" / "plugins" + self.simln_exec = "plugins/simln/plugin.py" + + def run_test(self): + try: + os.chdir(self.tmpdir) + self.init_directory() + self.deploy_with_plugin() + self.copy_results() + self.assert_hello_plugin() + finally: + self.cleanup() + + def init_directory(self): + self.log.info("Initializing SimLN plugin...") + self.sut = pexpect.spawn("warnet init") + self.sut.expect("network", timeout=10) + self.sut.sendline("n") + self.sut.close() + + def deploy_with_plugin(self): + self.log.info("Deploy the ln network with a SimLN plugin") + results = self.warnet(f"deploy {self.network_dir}") + self.log.info(results) + wait_for_pod(self.get_first_simln_pod()) + + def copy_results(self): + pod = self.get_first_simln_pod() + partial_func = partial(self.found_results_remotely, pod) + self.wait_for_predicate(partial_func) + + download(pod, Path("/working/results"), Path(".")) + self.wait_for_predicate(self.found_results_locally) + + def found_results_remotely(self, pod: Optional[str] = None) -> bool: + if pod is None: + pod = self.get_first_simln_pod() + self.log.info(f"Checking for results file in {pod}") + results_file = run_command(f"{self.simln_exec} sh {pod} ls /working/results").strip() + self.log.info(f"Results file: {results_file}") + results = run_command( + f"{self.simln_exec} sh {pod} cat /working/results/{results_file}" + ).strip() + self.log.info(results) + return results.find("Success") > 0 + + def get_first_simln_pod(self): + command = f"{self.simln_exec} list-pod-names" + pod_names_literal = run_command(command) + self.log.info(f"{command}: {pod_names_literal}") + pod_names = ast.literal_eval(pod_names_literal) + return pod_names[0] + + def found_results_locally(self) -> bool: + directory = "results" + self.log.info(f"Searching {directory}") + for root, _dirs, files in os.walk(Path(directory)): + for file_name in files: + file_path = os.path.join(root, file_name) + + with open(file_path) as file: + content = file.read() + if "Success" in content: + self.log.info(f"Found downloaded results in directory: {directory}.") + return True + self.log.info(f"Did not find downloaded results in directory: {directory}.") + return False + + def assert_hello_plugin(self): + self.log.info("Waiting for the 'hello' plugin pods.") + wait_for_pod("hello-pre-deploy") + wait_for_pod("hello-post-deploy") + wait_for_pod("hello-pre-network") + wait_for_pod("hello-post-network") + wait_for_pod("tank-0000-post-hello-pod") + wait_for_pod("tank-0000-pre-hello-pod") + wait_for_pod("tank-0001-post-hello-pod") + wait_for_pod("tank-0001-pre-hello-pod") + wait_for_pod("tank-0002-post-hello-pod") + wait_for_pod("tank-0002-pre-hello-pod") + wait_for_pod("tank-0003-post-hello-pod") + wait_for_pod("tank-0003-pre-hello-pod") + wait_for_pod("tank-0004-post-hello-pod") + wait_for_pod("tank-0004-pre-hello-pod") + wait_for_pod("tank-0005-post-hello-pod") + wait_for_pod("tank-0005-pre-hello-pod") + + +if __name__ == "__main__": + test = SimLNTest() + test.run_test() diff --git a/test/test_base.py b/test/test_base.py index 2b024da64..51d5935d6 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -139,13 +139,6 @@ def check_scenarios(): self.wait_for_predicate(check_scenarios) - def get_scenario_return_code(self, scenario_name): - scns = self.rpc("scenarios_list_running") - scns = [scn for scn in scns if scn["cmd"].strip() == scenario_name] - if len(scns) == 0: - raise Exception(f"Scenario {scenario_name} not found in running scenarios") - return scns[0]["return_code"] - def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args): diff --git a/test/wargames_test.py b/test/wargames_test.py new file mode 100755 index 000000000..05e5c63fd --- /dev/null +++ b/test/wargames_test.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 + +import os +from pathlib import Path + +import pexpect +from test_base import TestBase + +from warnet.k8s import get_kubeconfig_value +from warnet.process import stream_command + + +class WargamesTest(TestBase): + def __init__(self): + super().__init__() + self.wargame_dir = Path(os.path.dirname(__file__)) / "data" / "wargames" + self.scen_src_dir = Path(os.path.dirname(__file__)).parent / "resources" / "scenarios" + self.scen_test_dir = ( + Path(os.path.dirname(__file__)).parent / "resources" / "scenarios" / "test_scenarios" + ) + self.initial_context = get_kubeconfig_value("{.current-context}") + + def run_test(self): + try: + self.setup_battlefield() + self.setup_armies() + self.check_scenario_permissions() + finally: + self.log.info("Restoring initial_context") + stream_command(f"kubectl config use-context {self.initial_context}") + self.cleanup() + + def setup_battlefield(self): + self.log.info("Setting up battlefield") + self.log.info(self.warnet(f"deploy {self.wargame_dir / 'networks' / 'battlefield'}")) + self.wait_for_all_tanks_status(target="running") + self.wait_for_all_edges() + + def setup_armies(self): + self.log.info("Deploying namespaces and armadas") + self.log.info(self.warnet(f"deploy {self.wargame_dir / 'namespaces' / 'armies'}")) + self.log.info( + self.warnet(f"deploy {self.wargame_dir / 'networks' / 'armada'} --to-all-users") + ) + self.wait_for_all_tanks_status(target="running") + self.wait_for_all_edges() + + def check_scenario_permissions(self): + self.log.info("Admin without --admin can not command a node outside of default namespace") + stream_command( + f"warnet run {self.scen_test_dir / 'generate_one_allnodes.py'} --source_dir={self.scen_src_dir} --debug" + ) + # Only miner.default and target-red.default were accesible + assert self.warnet("bitcoin rpc miner getblockcount") == "2" + + self.log.info("Admin with --admin can command all nodes in any namespace") + stream_command( + f"warnet run {self.scen_test_dir / 'generate_one_allnodes.py'} --source_dir={self.scen_src_dir} --admin --debug" + ) + # armada.wargames-red, miner.default and target-red.default were accesible + assert self.warnet("bitcoin rpc miner getblockcount") == "5" + + self.log.info("Switch to wargames player context") + self.log.info(self.warnet("admin create-kubeconfigs")) + clicker = pexpect.spawn("warnet auth kubeconfigs/warnet-user-wargames-red-kubeconfig") + while clicker.expect(["Overwrite", "Updated kubeconfig"]) == 0: + print(clicker.before, clicker.after) + clicker.sendline("y") + print(clicker.before, clicker.after) + + self.log.info("Player without --admin can only command the node inside their own namespace") + stream_command( + f"warnet run {self.scen_test_dir / 'generate_one_allnodes.py'} --source_dir={self.scen_src_dir} --debug" + ) + # Only armada.wargames-red was (and is) accesible + assert self.warnet("bitcoin rpc armada getblockcount") == "6" + + self.log.info("Player attempting to use --admin is gonna have a bad time") + stream_command( + f"warnet run {self.scen_test_dir / 'generate_one_allnodes.py'} --source_dir={self.scen_src_dir} --admin --debug" + ) + # Nothing was accesible + assert self.warnet("bitcoin rpc armada getblockcount") == "6" + + self.log.info("Restore admin context") + stream_command(f"kubectl config use-context {self.initial_context}") + # Sanity check + assert self.warnet("bitcoin rpc miner getblockcount") == "6" + + +if __name__ == "__main__": + test = WargamesTest() + test.run_test()