diff --git a/.dockerignore b/.dockerignore index d19d3d622..8daa6b84f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,10 @@ .git +.github **/__pycache__ .trunk .venv +.envrc build dist **/*.egg-info diff --git a/.github/workflows/deploy-app.yml b/.github/workflows/deploy-app.yml new file mode 100644 index 000000000..649c78489 --- /dev/null +++ b/.github/workflows/deploy-app.yml @@ -0,0 +1,91 @@ +name: Publish Warnet app bundle image +on: + push: + branches: + - main + tags: + - '*' + pull_request: + branches: + - main +env: + REGISTRY_IMAGE: ${{ secrets.DOCKERHUB_USERNAME }}/warnet-app +jobs: + build: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + platform: + - linux/amd64 + - linux/arm64 + steps: + - name: Prepare + run: | + platform=${{ matrix.platform }} + echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY_IMAGE }} + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push by digest + id: build + uses: docker/build-push-action@v6 + with: + file: resources/images/warnet-bundle/Dockerfile + platforms: ${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ env.PLATFORM_PAIR }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + merge: + runs-on: ubuntu-latest + needs: + - build + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY_IMAGE }} + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Create manifest list and push + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) + - name: Inspect image + run: | + docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml deleted file mode 100644 index ac363ab0b..000000000 --- a/.github/workflows/deploy.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Publish Commander Docker image - -on: - push: - branches: - - main - paths: - - resources/images/commander/Dockerfile - - resources/scenarios/commander.py - tags-ignore: - - "*" - -jobs: - push_to_registry: - name: Push commander Docker image to Docker Hub - runs-on: ubuntu-latest - permissions: - packages: write - contents: read - attestations: write - id-token: write - steps: - - name: Check out the repo - uses: actions/checkout@v4 - - - name: Log in to Docker Hub - uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: bitcoindevproject/warnet-commander - tags: | - type=ref,event=tag - type=ref,event=pr - type=raw,value=latest,enable={{is_default_branch}} - labels: | - maintainer=bitcoindevproject - org.opencontainers.image.title=warnet-commander - org.opencontainers.image.description=Warnet Commander - - - name: Build and push Docker image - id: push - uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 - with: - context: . - file: resources/images/commander/Dockerfile - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - - - name: Generate artifact attestation - uses: actions/attest-build-provenance@v1 - with: - subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME}} - subject-digest: ${{ steps.push.outputs.digest }} - push-to-registry: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c717bddd4..acac68266 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,59 +16,39 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: eifinger/setup-uv@v1 - - run: uvx ruff check . + - name: Install a specific version of uv + uses: astral-sh/setup-uv@v3 + with: + version: "0.4.4" + enable-cache: true + - run: uvx ruff@0.6.8 check . ruff-format: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Install the latest version of uv - uses: astral-sh/setup-uv@v2 + - name: Install a specific version of uv + uses: astral-sh/setup-uv@v3 with: - version: "latest" + version: "0.4.4" enable-cache: true - - run: uvx ruff format . --check - - build-image: - needs: [ruff, ruff-format] - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Build and export - uses: docker/build-push-action@v5 - with: - file: resources/images/commander/Dockerfile - context: . - tags: bitcoindevproject/warnet-commander:latest - cache-from: type=gha - cache-to: type=gha,mode=max - outputs: type=docker,dest=/tmp/commander.tar - - - name: Upload artifact - uses: actions/upload-artifact@v4 - with: - name: commander - path: /tmp/commander.tar + - run: uvx ruff@0.6.8 format . --check test: - needs: [build-image] + needs: [ruff, ruff-format] runs-on: ubuntu-latest strategy: matrix: test: - conf_test.py - dag_connection_test.py + - graph_test.py - logging_test.py - rpc_test.py - services_test.py - signet_test.py - scenarios_test.py + - namespace_admin_test.py steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 @@ -78,11 +58,8 @@ jobs: with: cpus: max memory: 4000m - - name: Download commander artifact - uses: actions/download-artifact@v4 - with: - name: commander - path: /tmp + - name: Start minikube's loadbalancer tunnel + run: minikube tunnel &> /dev/null & - name: Install the latest version of uv uses: astral-sh/setup-uv@v2 with: @@ -92,12 +69,6 @@ jobs: run: uv python install $PYTHON_VERSION - name: Install project run: uv sync --all-extras --dev - - name: Install commander image - run: | - echo loading commander image into minikube docker - eval $(minikube -p minikube docker-env) - docker load --input /tmp/commander.tar - docker image ls -a - name: Run tests run: | source .venv/bin/activate @@ -122,24 +93,3 @@ jobs: name: kubernetes-logs-${{ matrix.test }} path: ./k8s-logs retention-days: 5 - test-without-mk: - runs-on: ubuntu-latest - strategy: - matrix: - test: - - graph_test.py - steps: - - uses: actions/checkout@v4 - - name: Install the latest version of uv - uses: astral-sh/setup-uv@v2 - with: - version: "latest" - enable-cache: true - - name: Install Python - run: uv python install $PYTHON_VERSION - - name: Install project - run: uv sync --all-extras --dev - - name: Run tests - run: | - source .venv/bin/activate - ./test/${{matrix.test}} diff --git a/.gitignore b/.gitignore index c42f6ba7f..f4b5d0076 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ warnet.egg-info .env dist/ build/ +**/kubeconfigs/ diff --git a/docs/admin.md b/docs/admin.md new file mode 100644 index 000000000..b888de3d8 --- /dev/null +++ b/docs/admin.md @@ -0,0 +1,69 @@ +# Admin + +## Connect to your cluster + +Ensure you are connected to your cluster because Warnet will use your current configuration to generate configurations for your users. + +```shell +$ warnet status +``` + +Observe that the output of the command matches your cluster. + +## Create an *admin* directory + +```shell +$ mkdir admin +$ cd admin +$ warnet admin init +``` + +Observe that there are now two folders within the *admin* directory: *namespaces* and *networks* + +## The *namespaces* directory +This directory contains a Helm chart named *two_namespaces_two_users*. + +Modify this chart based on the number of teams and users you have. + +Deploy the *two_namespaces_two_users* chart. + +```shell +$ warnet deploy namespaces/two_namespaces_two_users +``` + +Observe that this creates service accounts and namespaces in the cluster: + +```shell +$ kubectl get ns +$ kubectl get sa -A +``` + +### Creating Warnet invites +A Warnet invite is a Kubernetes config file. + +Create invites for each of your users. + +```shell +$ warnet admin create-kubeconfigs +``` + +Observe the *kubeconfigs* directory. It holds invites for each user. + +### Using Warnet invites +Users can connect to your wargame using their invite. + +```shell +$ warnet auth alice-wargames-red-team-kubeconfig +``` + +### Set up a network for your users +Before letting the users into your cluster, make sure to create a network of tanks for them to view. + + +```shell +$ warnet deploy networks/mynet --to-all-users +``` + +Observe that the *wargames-red-team* namespace now has tanks in it. + +**TODO**: What's the logging approach here? diff --git a/docs/developer-notes.md b/docs/developer-notes.md index 14d1b8d5b..061336d9a 100644 --- a/docs/developer-notes.md +++ b/docs/developer-notes.md @@ -21,15 +21,27 @@ pip install --upgrade pip pip install -e . ``` -## Lint +## Formatting & linting This project primarily uses the `uv` python packaging tool: https://docs.astral.sh/uv/ along with the sister formatter/linter `ruff` https://docs.astral.sh/ruff/ +Refer to the `uv` documentation for installation methods: https://docs.astral.sh/uv/getting-started/installation/ + With `uv` installed you can add/remove dependencies using `uv add ` or `uv remove . This will update the [`uv.lock`](https://docs.astral.sh/uv/guides/projects/#uvlock) file automatically. +We use ruff version 0.6.8 in this project currently. This can be installed as a stand-alone binary (see documentation), or via `uv` using: + +```bash +# install +$ uv tool install ruff@0.6.8 + +# lint +$ uvx ruff@0.6.8 check . -`uv` can also run tools (like `ruff`) without external installation, simply run `uvx ruff check .` or `uvx ruff format .` to use a uv-managed format/lint on the project. +# format +$ uvx ruff@0.6.8 format . +``` ## Release process @@ -59,4 +71,4 @@ python3 -m build ```bash # Upload to Pypi python3 -m twine upload dist/* -``` \ No newline at end of file +``` diff --git a/docs/warnet.md b/docs/warnet.md index 707dc8b5b..8c00840c8 100644 --- a/docs/warnet.md +++ b/docs/warnet.md @@ -14,12 +14,12 @@ parameters in . ## API Commands ### `warnet auth` -Authenticate with a warnet cluster using a kube config file +Authenticate with a Warnet cluster using a kubernetes config file options: | name | type | required | default | |-------------|--------|------------|-----------| -| kube_config | String | yes | | +| auth_config | String | yes | | ### `warnet create` Create a new warnet network @@ -33,14 +33,20 @@ Open the Warnet dashboard in default browser Deploy a warnet with topology loaded from \ options: -| name | type | required | default | -|-----------|--------|------------|-----------| -| directory | Path | yes | | -| debug | Bool | | False | +| name | type | required | default | +|--------------|--------|------------|-----------| +| directory | Path | yes | | +| debug | Bool | | False | +| namespace | String | | | +| to_all_users | Bool | | False | ### `warnet down` Bring down a running warnet quickly +options: +| name | type | required | default | +|--------|--------|------------|-----------| +| force | Bool | | False | ### `warnet init` Initialize a warnet project in the current directory @@ -50,10 +56,11 @@ Initialize a warnet project in the current directory Show the logs of a pod options: -| name | type | required | default | -|----------|--------|------------|-----------| -| pod_name | String | | "" | -| follow | Bool | | False | +| name | type | required | default | +|-----------|--------|------------|-----------| +| pod_name | String | | "" | +| follow | Bool | | False | +| namespace | String | | "default" | ### `warnet new` Create a new warnet project in the specified directory @@ -71,7 +78,10 @@ options: | name | type | required | default | |-----------------|--------|------------|-----------| | scenario_file | Path | yes | | +| debug | Bool | | False | +| source_dir | Path | | | | additional_args | String | | | +| namespace | String | | | ### `warnet setup` Setup warnet @@ -102,6 +112,15 @@ options: ## Admin +### `warnet admin create-kubeconfigs` +Create kubeconfig files for ServiceAccounts + +options: +| name | type | required | default | +|----------------|--------|------------|---------------| +| kubeconfig_dir | String | | "kubeconfigs" | +| token_duration | Int | | 172800 | + ### `warnet admin init` Initialize a warnet project in the current directory @@ -116,9 +135,10 @@ Namespaces commands Fetch the Bitcoin Core debug log from \ options: -| name | type | required | default | -|--------|--------|------------|-----------| -| tank | String | yes | | +| name | type | required | default | +|-----------|--------|------------|-----------| +| tank | String | yes | | +| namespace | String | | | ### `warnet bitcoin grep-logs` Grep combined bitcoind logs using regex \ @@ -133,6 +153,8 @@ options: ### `warnet bitcoin messages` Fetch messages sent between \ and \ in [chain] + Optionally, include a namespace like so: tank-name.namespace + options: | name | type | required | default | |--------|--------|------------|-----------| @@ -144,11 +166,12 @@ options: Call bitcoin-cli \ [params] on \ options: -| name | type | required | default | -|--------|--------|------------|-----------| -| tank | String | yes | | -| method | String | yes | | -| params | String | | | +| name | type | required | default | +|-----------|--------|------------|-----------| +| tank | String | yes | | +| method | String | yes | | +| params | String | | | +| namespace | String | | | ## Graph diff --git a/pyproject.toml b/pyproject.toml index 44e69dc38..b0f2320b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "warnet" -version = "1.1.2" +version = "1.1.9" description = "Monitor and analyze the emergent behaviours of bitcoin networks" readme = "README.md" requires-python = ">=3.9" @@ -51,7 +51,7 @@ build-backend = "setuptools.build_meta" include-package-data = true [tool.setuptools.packages.find] -where = ["src", "."] +where = ["src", ".", "resources/scenarios"] include = ["warnet*", "test_framework*", "resources*"] [tool.setuptools.package-data] diff --git a/resources/charts/bitcoincore/templates/_helpers.tpl b/resources/charts/bitcoincore/templates/_helpers.tpl index dc3bf9ce6..26258b5de 100644 --- a/resources/charts/bitcoincore/templates/_helpers.tpl +++ b/resources/charts/bitcoincore/templates/_helpers.tpl @@ -58,11 +58,13 @@ Create the name of the service account to use {{/* -Add network section heading in bitcoin.conf after v0.17.0 +Add network section heading in bitcoin.conf +Always add for custom semver, check version for valid semver */}} {{- define "bitcoincore.check_semver" -}} -{{- $version := semverCompare ">=0.17.0" .Values.image.tag -}} -{{- if $version -}} +{{- $custom := contains "-" .Values.image.tag -}} +{{- $newer := semverCompare ">=0.17.0" .Values.image.tag -}} +{{- if or $newer $custom -}} [{{ .Values.chain }}] {{- end -}} {{- end -}} diff --git a/resources/charts/bitcoincore/templates/configmap.yaml b/resources/charts/bitcoincore/templates/configmap.yaml index 10985e49c..36c5ab389 100644 --- a/resources/charts/bitcoincore/templates/configmap.yaml +++ b/resources/charts/bitcoincore/templates/configmap.yaml @@ -11,9 +11,11 @@ data: {{ template "bitcoincore.check_semver" . }} {{- .Values.baseConfig | nindent 4 }} rpcport={{ index .Values .Values.chain "RPCPort" }} + rpcpassword={{ .Values.rpcpassword }} zmqpubrawblock=tcp://0.0.0.0:{{ .Values.ZMQBlockPort }} zmqpubrawtx=tcp://0.0.0.0:{{ .Values.ZMQTxPort }} + {{- .Values.defaultConfig | nindent 4 }} {{- .Values.config | nindent 4 }} - {{- range .Values.connect }} - {{- print "connect=" . | nindent 4}} - {{- end }} \ No newline at end of file + {{- range .Values.addnode }} + {{- print "addnode=" . | nindent 4}} + {{- end }} diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index bde1c9ea6..d7076e6e9 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -9,12 +9,13 @@ metadata: {{- end }} chain: {{ .Values.chain }} RPCPort: "{{ index .Values .Values.chain "RPCPort" }}" + rpcpassword: {{ .Values.rpcpassword }} app: {{ include "bitcoincore.fullname" . }} {{- if .Values.collectLogs }} collect_logs: "true" {{- end }} annotations: - init_peers: "{{ .Values.connect | len }}" + init_peers: "{{ .Values.addnode | len }}" spec: restartPolicy: "{{ .Values.restartPolicy }}" {{- with .Values.imagePullSecrets }} @@ -89,7 +90,7 @@ spec: - name: BITCOIN_RPC_USER value: user - name: BITCOIN_RPC_PASSWORD - value: password + value: {{ .Values.rpcpassword }} {{- if .Values.metrics }} - name: METRICS value: {{ .Values.metrics }} diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 320d59c62..6314ae32c 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -124,16 +124,20 @@ baseConfig: | fallbackfee=0.00001000 listen=1 rpcuser=user - rpcpassword=password + # rpcpassword MUST be set as a chart value rpcallowip=0.0.0.0/0 rpcbind=0.0.0.0 rest=1 # rpcport and zmq endpoints are configured by chain in configmap.yaml +rpcpassword: gn0cchi config: "" -connect: [] +defaultConfig: "" + +addnode: [] + loadSnapshot: enabled: false url: "" diff --git a/resources/charts/caddy/templates/ingress.yaml b/resources/charts/caddy/templates/ingress.yaml new file mode 100644 index 000000000..79c9ca105 --- /dev/null +++ b/resources/charts/caddy/templates/ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: caddy-ingress + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" +spec: + ingressClassName: nginx + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ include "caddy.fullname" . }} + port: + number: {{ .Values.port }} \ No newline at end of file diff --git a/resources/charts/commander/templates/configmap.yaml b/resources/charts/commander/templates/configmap.yaml deleted file mode 100644 index 9c45ea0d2..000000000 --- a/resources/charts/commander/templates/configmap.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "commander.fullname" . }}-scenario - labels: - {{- include "commander.labels" . | nindent 4 }} -binaryData: - scenario.py: {{ .Values.scenario }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "commander.fullname" . }}-warnet - labels: - {{- include "commander.labels" . | nindent 4 }} -binaryData: - warnet.json: {{ .Values.warnet }} diff --git a/resources/charts/commander/templates/pod.yaml b/resources/charts/commander/templates/pod.yaml index 94c79205f..1a9bb9310 100644 --- a/resources/charts/commander/templates/pod.yaml +++ b/resources/charts/commander/templates/pod.yaml @@ -8,25 +8,30 @@ metadata: mission: commander spec: restartPolicy: {{ .Values.restartPolicy }} + initContainers: + - name: init + image: busybox + command: ["/bin/sh", "-c"] + args: + - | + while [ ! -f /shared/archive.pyz ]; do + echo "Waiting for /shared/archive.pyz to exist..." + sleep 2 + done + volumeMounts: + - name: shared-volume + mountPath: /shared containers: - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} + image: python:3.12-slim + imagePullPolicy: IfNotPresent command: ["/bin/sh", "-c"] args: - | - python3 /scenario.py {{ .Values.args }} + python3 /shared/archive.pyz {{ .Values.args }} volumeMounts: - - name: scenario - mountPath: /scenario.py - subPath: scenario.py - - name: warnet - mountPath: /warnet.json - subPath: warnet.json + - name: shared-volume + mountPath: /shared volumes: - - name: scenario - configMap: - name: {{ include "commander.fullname" . }}-scenario - - name: warnet - configMap: - name: {{ include "commander.fullname" . }}-warnet + - name: shared-volume + emptyDir: {} diff --git a/resources/charts/commander/values.yaml b/resources/charts/commander/values.yaml index fc7e8233d..55ad80f7a 100644 --- a/resources/charts/commander/values.yaml +++ b/resources/charts/commander/values.yaml @@ -5,12 +5,6 @@ namespace: warnet restartPolicy: Never -image: - repository: bitcoindevproject/warnet-commander - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "latest" - imagePullSecrets: [] nameOverride: "" fullnameOverride: "" @@ -71,8 +65,4 @@ volumeMounts: [] port: -scenario: "" - -warnet: "" - args: "" diff --git a/resources/charts/namespaces/values.yaml b/resources/charts/namespaces/values.yaml index 61f946879..23ef66754 100644 --- a/resources/charts/namespaces/values.yaml +++ b/resources/charts/namespaces/values.yaml @@ -7,34 +7,37 @@ roles: - name: pod-viewer rules: - apiGroups: [""] - resources: ["pods"] + resources: ["pods", "services"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] verbs: ["get"] - apiGroups: [""] resources: ["configmaps", "secrets"] - verbs: ["get"] + verbs: ["get", "list"] - apiGroups: [""] - resources: ["persistentvolumeclaims"] + resources: ["persistentvolumeclaims", "namespaces"] verbs: ["get", "list"] - apiGroups: [""] - resources: ["events"] + resources: ["events", "pods/status"] verbs: ["get"] - name: pod-manager rules: - apiGroups: [""] - resources: ["pods"] + resources: ["pods", "services"] verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] - apiGroups: [""] resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] verbs: ["get", "create"] - apiGroups: [""] resources: ["configmaps", "secrets"] - verbs: ["get", "create"] + verbs: ["get", "list", "create", "update"] - apiGroups: [""] - resources: ["persistentvolumeclaims"] + resources: ["persistentvolumeclaims", "namespaces"] verbs: ["get", "list"] - apiGroups: [""] - resources: ["events"] - verbs: ["get"] \ No newline at end of file + resources: ["events", "pods/status"] + verbs: ["get"] diff --git a/resources/graphs/64tanks_txrate.graphml b/resources/graphs/64tanks_txrate.graphml deleted file mode 100644 index 98112d70e..000000000 --- a/resources/graphs/64tanks_txrate.graphml +++ /dev/null @@ -1,1165 +0,0 @@ - - - - - - - - - - - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - bitcoindevproject/bitcoin:27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - 27.0 - - - - True - txrate=getchaintxstats(10)["txrate"] blocks=getblockcount() mempool_size=getmempoolinfo()["size"] - False - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/resources/graphs/default.graphml b/resources/graphs/default.graphml deleted file mode 100644 index aad5448ad..000000000 --- a/resources/graphs/default.graphml +++ /dev/null @@ -1,90 +0,0 @@ - - - - - - - - - - - - - - - - - - - - 27.0 - uacomment=w0,debug=net - true - true - - - 27.0 - uacomment=w1 - true - true - - - bitcoindevproject/bitcoin:26.0 - uacomment=w2,debug=mempool - true - true - - - 27.0 - uacomment=w3 - true - - - 27.0 - uacomment=w4 - true - - - 27.0 - uacomment=w5 - true - - - 27.0 - uacomment=w6 - - - 27.0 - uacomment=w7 - - - 27.0 - uacomment=w8 - - - 27.0 - uacomment=w9 - - - 27.0 - uacomment=w10 - - - 27.0 - uacomment=w11 - - - - - - - - - - - - - - - - diff --git a/resources/graphs/tx_relay/15tps-100nodes-withoutpr.graphml b/resources/graphs/tx_relay/15tps-100nodes-withoutpr.graphml deleted file mode 100644 index 02bfd30b2..000000000 --- a/resources/graphs/tx_relay/15tps-100nodes-withoutpr.graphml +++ /dev/null @@ -1,1688 +0,0 @@ - - - - - - - - - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - True - True - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - bitcoindevproject/tx-relay-test:without-pr - -blockmaxweight=3996000 -debug=cmpctblock - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/resources/images/bitcoin/insecure/Dockerfile b/resources/images/bitcoin/insecure/Dockerfile index e8bcb986c..6f59a4c2e 100644 --- a/resources/images/bitcoin/insecure/Dockerfile +++ b/resources/images/bitcoin/insecure/Dockerfile @@ -110,7 +110,7 @@ RUN --mount=type=cache,target=/ccache \ && ./autogen.sh \ && ./configure \ LDFLAGS=-L`ls -d /opt/db*`/lib/ \ - CPPFLAGS="-g0 -I`ls -d /opt/db*`/include/ --param ggc-min-expand=1 --param ggc-min-heapsize=32768" \ + CPPFLAGS="-I`ls -d /opt/db*`/include/ --param ggc-min-expand=1 --param ggc-min-heapsize=32768" \ --prefix=${BITCOIN_PREFIX} \ ${BUILD_ARGS} \ ${EXTRA_BUILD_ARGS} \ diff --git a/resources/images/bitcoin/insecure/build.md b/resources/images/bitcoin/insecure/build.md index 68502d51b..a824a8316 100644 --- a/resources/images/bitcoin/insecure/build.md +++ b/resources/images/bitcoin/insecure/build.md @@ -87,3 +87,112 @@ docker buildx build \ --tag bitcoindevproject/bitcoin:0.16.1 \ resources/images/bitcoin/insecure ``` + +## unknown p2p message crash + +Will crash when sent an "unknown" P2P message is received from a node using protocol version >= 70016 + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.20" \ + --build-arg BITCOIN_VERSION="28.1.1" \ + --build-arg EXTRA_PACKAGES="sqlite-dev" \ + --build-arg EXTRA_RUNTIME_PACKAGES="" \ + --build-arg REPO="willcl-ark/bitcoin" \ + --build-arg COMMIT_SHA="df1768325cca49bb867b7919675ae06c964b5ffa" \ + --tag bitcoindevproject/bitcoin:99.1.0-unknown-message \ + resources/images/bitcoin/insecure +``` + +## invalid blocks crash + +Will crash when sent an invalid block + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.20" \ + --build-arg BITCOIN_VERSION="28.1.1" \ + --build-arg EXTRA_PACKAGES="sqlite-dev" \ + --build-arg EXTRA_RUNTIME_PACKAGES="" \ + --build-arg REPO="willcl-ark/bitcoin" \ + --build-arg COMMIT_SHA="f72bc595fc762c7afcbd156f4f84bf48f7ff4fdb" \ + --tag bitcoindevproject/bitcoin:99.1.0-invalid-blocks \ + resources/images/bitcoin/insecure +``` + +## too many orphans crash + +Will crash when we have 50 orphans in the orphanage + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.20" \ + --build-arg BITCOIN_VERSION="28.1.1" \ + --build-arg EXTRA_PACKAGES="sqlite-dev" \ + --build-arg EXTRA_RUNTIME_PACKAGES="" \ + --build-arg REPO="willcl-ark/bitcoin" \ + --build-arg COMMIT_SHA="38aff9d695f5aa187fc3b75f08228248963372ee" \ + --tag bitcoindevproject/bitcoin:99.1.0-50-orphans \ + resources/images/bitcoin/insecure +``` + +## full mempool crash + +Will crash when we would normally trim the mempool size. +Mempool set to 50MB by default. + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.20" \ + --build-arg BITCOIN_VERSION="28.1.1" \ + --build-arg EXTRA_PACKAGES="sqlite-dev" \ + --build-arg EXTRA_RUNTIME_PACKAGES="" \ + --build-arg REPO="willcl-ark/bitcoin" \ + --build-arg COMMIT_SHA="d30f8112611c4732ccb01f0a0216eb7ed10e04a7" \ + --tag bitcoindevproject/bitcoin:99.1.0-no-mp-trim\ + resources/images/bitcoin/insecure +``` + +## disabled opcodes crash + +Will crash when processing a disabled opcode + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.20" \ + --build-arg BITCOIN_VERSION="28.1.1" \ + --build-arg EXTRA_PACKAGES="sqlite-dev" \ + --build-arg EXTRA_RUNTIME_PACKAGES="" \ + --build-arg REPO="willcl-ark/bitcoin" \ + --build-arg COMMIT_SHA="51e068ed42727eee08af62e09eb5789d8b910f61" \ + --tag bitcoindevproject/bitcoin:99.1.0-disabled-opcodes \ + resources/images/bitcoin/insecure +``` + +## crash when 5k inv messages received + +Will crash when we receive a total of 5k `INV` p2p messages are received from a single peer. + +```bash +docker buildx build \ + --platform linux/amd64,linux/armhf \ + --build-context bitcoin-src="." \ + --build-arg ALPINE_VERSION="3.20" \ + --build-arg BITCOIN_VERSION="28.1.1" \ + --build-arg EXTRA_PACKAGES="sqlite-dev" \ + --build-arg EXTRA_RUNTIME_PACKAGES="" \ + --build-arg REPO="willcl-ark/bitcoin" \ + --build-arg COMMIT_SHA="3e1ce7de0d19f791315fa87e0d29504ee0c80fe8" \ + --tag bitcoindevproject/bitcoin:99.1.0-5k-inv \ + resources/images/bitcoin/insecure +``` diff --git a/resources/images/commander/Dockerfile b/resources/images/commander/Dockerfile deleted file mode 100644 index 3a8314c21..000000000 --- a/resources/images/commander/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -# Use an official Python runtime as the base image -FROM python:3.12-slim - -# Python dependencies -#RUN pip install --no-cache-dir prometheus_client - -COPY resources/scenarios/commander.py / -COPY src/test_framework /test_framework - -# -u: force the stdout and stderr streams to be unbuffered -ENTRYPOINT ["python", "-u", "/scenario.py"] diff --git a/resources/images/warnet-bundle/Dockerfile b/resources/images/warnet-bundle/Dockerfile new file mode 100644 index 000000000..e78f7811c --- /dev/null +++ b/resources/images/warnet-bundle/Dockerfile @@ -0,0 +1,21 @@ +FROM python:3.11-alpine + +# Install uv and ktop by copying from their docker images +COPY --from=ghcr.io/astral-sh/uv:0.4.12 /uv /bin/uv +COPY --from=ghcr.io/vladimirvivien/ktop:latest /ko-app/ktop /bin/ktop + +RUN apk add --no-cache bash curl ca-certificates git kubectl helm k9s fish vim mc nano + +# Setup venv and install warnet +ADD . /warnet +WORKDIR /warnet +RUN uv sync + +# Setup autocomplete +RUN mkdir -p /root/.config/fish/completions +RUN cat < /root/.config/fish/completions/warnet.fish +_WARNET_COMPLETE=fish_source warnet | source +EOF + +# Start +CMD ["/usr/bin/fish", "-c", "source /warnet/.venv/bin/activate.fish; cd /root; exec fish"] diff --git a/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml b/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml index 91ac2fc67..75cc8e42c 100644 --- a/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml +++ b/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml @@ -3,14 +3,16 @@ users: roles: - pod-viewer - pod-manager -roles: - - name: pod-viewer - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] - - name: pod-manager - rules: - - apiGroups: [""] - resources: ["pods", "configmaps"] - verbs: ["get", "list", "watch", "create", "update", "delete"] +# the pod-viewer and pod-manager roles are the default +# roles defined in values.yaml for the namespaces charts +# +# if you need a different set of roles for a particular namespaces +# deployment, you can override values.yaml by providing your own +# role definitions below +# +# roles: +# - name: my-custom-role +# rules: +# - apiGroups: "" +# resources: "" +# verbs: "" diff --git a/resources/namespaces/two_namespaces_two_users/namespaces.yaml b/resources/namespaces/two_namespaces_two_users/namespaces.yaml index 4172657b8..542456ef6 100644 --- a/resources/namespaces/two_namespaces_two_users/namespaces.yaml +++ b/resources/namespaces/two_namespaces_two_users/namespaces.yaml @@ -1,5 +1,5 @@ namespaces: - - name: warnet-red-team + - name: wargames-red-team users: - name: alice roles: @@ -8,42 +8,7 @@ namespaces: roles: - pod-viewer - pod-manager - roles: - - name: pod-viewer - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] - verbs: ["get"] - - apiGroups: [""] - resources: ["configmaps", "secrets"] - verbs: ["get"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get"] - - name: pod-manager - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch", "create", "delete", "update"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] - verbs: ["get", "create"] - - apiGroups: [""] - resources: ["configmaps", "secrets"] - verbs: ["get", "create"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get"] - - name: warnet-blue-team + - name: wargames-blue-team users: - name: mallory roles: @@ -52,38 +17,3 @@ namespaces: roles: - pod-viewer - pod-manager - roles: - - name: pod-viewer - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] - verbs: ["get"] - - apiGroups: [""] - resources: ["configmaps", "secrets"] - verbs: ["get"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get"] - - name: pod-manager - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch", "create", "delete", "update"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] - verbs: ["get", "create"] - - apiGroups: [""] - resources: ["configmaps", "secrets"] - verbs: ["get", "create"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get"] diff --git a/resources/networks/6_node_bitcoin/network.yaml b/resources/networks/6_node_bitcoin/network.yaml index 21b05875d..f27007931 100644 --- a/resources/networks/6_node_bitcoin/network.yaml +++ b/resources/networks/6_node_bitcoin/network.yaml @@ -1,11 +1,13 @@ nodes: - name: tank-0001 + config: uacomment=tank0001 image: tag: "26.0" - connect: + addnode: - tank-0002 - tank-0003 - name: tank-0002 + config: uacomment=tank0002 resources: limits: cpu: 100m @@ -13,22 +15,23 @@ nodes: requests: cpu: 100m memory: 128Mi - connect: + addnode: - tank-0003 - tank-0004 - name: tank-0003 - connect: + config: uacomment=tank0003 + addnode: - tank-0004 - tank-0005 - name: tank-0004 - connect: + config: uacomment=tank0004 + addnode: - tank-0005 - tank-0006 - name: tank-0005 - connect: + config: uacomment=tank0005 + addnode: - tank-0006 - name: tank-0006 -fork_observer: - enabled: true caddy: enabled: true diff --git a/resources/networks/6_node_bitcoin/node-defaults.yaml b/resources/networks/6_node_bitcoin/node-defaults.yaml index a1454d8a1..3b0dabf61 100644 --- a/resources/networks/6_node_bitcoin/node-defaults.yaml +++ b/resources/networks/6_node_bitcoin/node-defaults.yaml @@ -21,6 +21,6 @@ image: # Overrides the image tag whose default is the chart appVersion. tag: "27.0" -config: | +defaultConfig: | dns=1 debug=rpc diff --git a/resources/networks/node-defaults.yaml b/resources/networks/fork_observer/node-defaults.yaml similarity index 98% rename from resources/networks/node-defaults.yaml rename to resources/networks/fork_observer/node-defaults.yaml index 55fdbbd04..c8b3bb46a 100644 --- a/resources/networks/node-defaults.yaml +++ b/resources/networks/fork_observer/node-defaults.yaml @@ -21,7 +21,7 @@ image: # Overrides the image tag whose default is the chart appVersion. tag: "27.0" -config: | +defaultConfig: | dns=1 debug=rpc rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c diff --git a/resources/scenarios/commander.py b/resources/scenarios/commander.py index 1ecf0b6c4..1f7d34a80 100644 --- a/resources/scenarios/commander.py +++ b/resources/scenarios/commander.py @@ -8,7 +8,6 @@ import signal import sys import tempfile -from pathlib import Path from typing import Dict from test_framework.authproxy import AuthServiceProxy @@ -21,7 +20,7 @@ from test_framework.test_node import TestNode from test_framework.util import PortSeed, get_rpc_proxy -WARNET_FILE = Path(os.path.dirname(__file__)) / "warnet.json" +WARNET_FILE = "/shared/warnet.json" try: with open(WARNET_FILE) as file: diff --git a/resources/scenarios/ln_init.py b/resources/scenarios/ln_init.py index 59df5e38e..82745a123 100644 --- a/resources/scenarios/ln_init.py +++ b/resources/scenarios/ln_init.py @@ -2,11 +2,7 @@ from time import sleep -# The base class exists inside the commander container -try: - from commander import Commander -except ImportError: - from resources.scenarios.commander import Commander +from commander import Commander class LNInit(Commander): @@ -185,5 +181,9 @@ def funded_lnnodes(): ) -if __name__ == "__main__": +def main(): LNInit().main() + + +if __name__ == "__main__": + main() diff --git a/resources/scenarios/miner_std.py b/resources/scenarios/miner_std.py index 5aa368d40..3fa06c7d3 100755 --- a/resources/scenarios/miner_std.py +++ b/resources/scenarios/miner_std.py @@ -2,11 +2,7 @@ from time import sleep -# The base class exists inside the commander container -try: - from commander import Commander -except ImportError: - from resources.scenarios.commander import Commander +from commander import Commander class Miner: @@ -45,15 +41,21 @@ def add_options(self, parser): action="store_true", help="When true, generate 101 blocks ONCE per miner", ) + parser.add_argument( + "--tank", + dest="tank", + type=str, + help="Select one tank by name as the only miner", + ) def run_test(self): self.log.info("Starting miners.") - - max_miners = 1 - if self.options.allnodes: - max_miners = len(self.nodes) - for index in range(max_miners): - self.miners.append(Miner(self.nodes[index], self.options.mature)) + if self.options.tank: + self.miners = [Miner(self.tanks[self.options.tank], self.options.mature)] + else: + max_miners = len(self.nodes) if self.options.allnodes else 1 + for index in range(max_miners): + self.miners.append(Miner(self.nodes[index], self.options.mature)) while True: for miner in self.miners: @@ -72,5 +74,9 @@ def run_test(self): sleep(self.options.interval) -if __name__ == "__main__": +def main(): MinerStd().main() + + +if __name__ == "__main__": + main() diff --git a/resources/scenarios/reconnaissance.py b/resources/scenarios/reconnaissance.py index 3fc2269e4..8c3f683cb 100755 --- a/resources/scenarios/reconnaissance.py +++ b/resources/scenarios/reconnaissance.py @@ -2,12 +2,7 @@ import socket -# The base class exists inside the commander container when deployed, -# but requires a relative path inside the python source code for other functions. -try: - from commander import Commander -except ImportError: - from resources.scenarios.commander import Commander +from commander import Commander # The entire Bitcoin Core test_framework directory is available as a library from test_framework.messages import MSG_TX, CInv, hash256, msg_getdata @@ -85,5 +80,9 @@ def run_test(self): self.log.info(f"Got notfound message from {dstaddr}:{dstport}") -if __name__ == "__main__": +def main(): Reconnaissance().main() + + +if __name__ == "__main__": + main() diff --git a/resources/scenarios/signet_miner.py b/resources/scenarios/signet_miner.py index 0edc635e3..e4375515b 100644 --- a/resources/scenarios/signet_miner.py +++ b/resources/scenarios/signet_miner.py @@ -11,11 +11,7 @@ # we use the authproxy from the test framework. ### -# The base class exists inside the commander container -try: - from commander import Commander -except ImportError: - from resources.scenarios.commander import Commander +from commander import Commander import json import logging @@ -566,5 +562,8 @@ def get_args(parser): return args -if __name__ == "__main__": +def main(): SignetMinerScenario().main() + +if __name__ == "__main__": + main() diff --git a/resources/graphs/__init__.py b/resources/scenarios/test_framework/__init__.py similarity index 100% rename from resources/graphs/__init__.py rename to resources/scenarios/test_framework/__init__.py diff --git a/src/test_framework/address.py b/resources/scenarios/test_framework/address.py similarity index 100% rename from src/test_framework/address.py rename to resources/scenarios/test_framework/address.py diff --git a/src/test_framework/authproxy.py b/resources/scenarios/test_framework/authproxy.py similarity index 100% rename from src/test_framework/authproxy.py rename to resources/scenarios/test_framework/authproxy.py diff --git a/src/test_framework/bdb.py b/resources/scenarios/test_framework/bdb.py similarity index 100% rename from src/test_framework/bdb.py rename to resources/scenarios/test_framework/bdb.py diff --git a/src/test_framework/bip340_test_vectors.csv b/resources/scenarios/test_framework/bip340_test_vectors.csv similarity index 100% rename from src/test_framework/bip340_test_vectors.csv rename to resources/scenarios/test_framework/bip340_test_vectors.csv diff --git a/src/test_framework/blockfilter.py b/resources/scenarios/test_framework/blockfilter.py similarity index 100% rename from src/test_framework/blockfilter.py rename to resources/scenarios/test_framework/blockfilter.py diff --git a/src/test_framework/blocktools.py b/resources/scenarios/test_framework/blocktools.py similarity index 100% rename from src/test_framework/blocktools.py rename to resources/scenarios/test_framework/blocktools.py diff --git a/src/test_framework/coverage.py b/resources/scenarios/test_framework/coverage.py similarity index 100% rename from src/test_framework/coverage.py rename to resources/scenarios/test_framework/coverage.py diff --git a/src/test_framework/descriptors.py b/resources/scenarios/test_framework/descriptors.py similarity index 100% rename from src/test_framework/descriptors.py rename to resources/scenarios/test_framework/descriptors.py diff --git a/src/test_framework/ellswift.py b/resources/scenarios/test_framework/ellswift.py similarity index 100% rename from src/test_framework/ellswift.py rename to resources/scenarios/test_framework/ellswift.py diff --git a/src/test_framework/ellswift_decode_test_vectors.csv b/resources/scenarios/test_framework/ellswift_decode_test_vectors.csv similarity index 100% rename from src/test_framework/ellswift_decode_test_vectors.csv rename to resources/scenarios/test_framework/ellswift_decode_test_vectors.csv diff --git a/src/test_framework/key.py b/resources/scenarios/test_framework/key.py similarity index 100% rename from src/test_framework/key.py rename to resources/scenarios/test_framework/key.py diff --git a/src/test_framework/messages.py b/resources/scenarios/test_framework/messages.py similarity index 100% rename from src/test_framework/messages.py rename to resources/scenarios/test_framework/messages.py diff --git a/src/test_framework/muhash.py b/resources/scenarios/test_framework/muhash.py similarity index 100% rename from src/test_framework/muhash.py rename to resources/scenarios/test_framework/muhash.py diff --git a/src/test_framework/netutil.py b/resources/scenarios/test_framework/netutil.py similarity index 100% rename from src/test_framework/netutil.py rename to resources/scenarios/test_framework/netutil.py diff --git a/src/test_framework/p2p.py b/resources/scenarios/test_framework/p2p.py similarity index 100% rename from src/test_framework/p2p.py rename to resources/scenarios/test_framework/p2p.py diff --git a/src/test_framework/psbt.py b/resources/scenarios/test_framework/psbt.py similarity index 100% rename from src/test_framework/psbt.py rename to resources/scenarios/test_framework/psbt.py diff --git a/src/test_framework/ripemd160.py b/resources/scenarios/test_framework/ripemd160.py similarity index 100% rename from src/test_framework/ripemd160.py rename to resources/scenarios/test_framework/ripemd160.py diff --git a/src/test_framework/script.py b/resources/scenarios/test_framework/script.py similarity index 100% rename from src/test_framework/script.py rename to resources/scenarios/test_framework/script.py diff --git a/src/test_framework/script_util.py b/resources/scenarios/test_framework/script_util.py similarity index 100% rename from src/test_framework/script_util.py rename to resources/scenarios/test_framework/script_util.py diff --git a/src/test_framework/secp256k1.py b/resources/scenarios/test_framework/secp256k1.py similarity index 100% rename from src/test_framework/secp256k1.py rename to resources/scenarios/test_framework/secp256k1.py diff --git a/src/test_framework/segwit_addr.py b/resources/scenarios/test_framework/segwit_addr.py similarity index 100% rename from src/test_framework/segwit_addr.py rename to resources/scenarios/test_framework/segwit_addr.py diff --git a/src/test_framework/siphash.py b/resources/scenarios/test_framework/siphash.py similarity index 100% rename from src/test_framework/siphash.py rename to resources/scenarios/test_framework/siphash.py diff --git a/src/test_framework/socks5.py b/resources/scenarios/test_framework/socks5.py similarity index 100% rename from src/test_framework/socks5.py rename to resources/scenarios/test_framework/socks5.py diff --git a/src/test_framework/test_framework.py b/resources/scenarios/test_framework/test_framework.py similarity index 100% rename from src/test_framework/test_framework.py rename to resources/scenarios/test_framework/test_framework.py diff --git a/src/test_framework/test_node.py b/resources/scenarios/test_framework/test_node.py similarity index 100% rename from src/test_framework/test_node.py rename to resources/scenarios/test_framework/test_node.py diff --git a/src/test_framework/test_shell.py b/resources/scenarios/test_framework/test_shell.py similarity index 100% rename from src/test_framework/test_shell.py rename to resources/scenarios/test_framework/test_shell.py diff --git a/src/test_framework/util.py b/resources/scenarios/test_framework/util.py similarity index 100% rename from src/test_framework/util.py rename to resources/scenarios/test_framework/util.py diff --git a/src/test_framework/wallet.py b/resources/scenarios/test_framework/wallet.py similarity index 100% rename from src/test_framework/wallet.py rename to resources/scenarios/test_framework/wallet.py diff --git a/src/test_framework/wallet_util.py b/resources/scenarios/test_framework/wallet_util.py similarity index 100% rename from src/test_framework/wallet_util.py rename to resources/scenarios/test_framework/wallet_util.py diff --git a/src/test_framework/xswiftec_inv_test_vectors.csv b/resources/scenarios/test_framework/xswiftec_inv_test_vectors.csv similarity index 100% rename from src/test_framework/xswiftec_inv_test_vectors.csv rename to resources/scenarios/test_framework/xswiftec_inv_test_vectors.csv diff --git a/src/test_framework/__init__.py b/resources/scenarios/test_scenarios/__init__.py similarity index 100% rename from src/test_framework/__init__.py rename to resources/scenarios/test_scenarios/__init__.py diff --git a/test/data/scenario_buggy_failure.py b/resources/scenarios/test_scenarios/buggy_failure.py similarity index 95% rename from test/data/scenario_buggy_failure.py rename to resources/scenarios/test_scenarios/buggy_failure.py index 0867218d0..e982680d5 100644 --- a/test/data/scenario_buggy_failure.py +++ b/resources/scenarios/test_scenarios/buggy_failure.py @@ -20,5 +20,9 @@ def run_test(self): raise Exception("Failed execution!") -if __name__ == "__main__": +def main(): Failure().main() + + +if __name__ == "__main__": + main() diff --git a/test/data/scenario_connect_dag.py b/resources/scenarios/test_scenarios/connect_dag.py similarity index 99% rename from test/data/scenario_connect_dag.py rename to resources/scenarios/test_scenarios/connect_dag.py index 95e50ea28..5747291cb 100644 --- a/test/data/scenario_connect_dag.py +++ b/resources/scenarios/test_scenarios/connect_dag.py @@ -117,5 +117,9 @@ def assert_connection(self, connector, connectee_index, connection_type: Connect raise ValueError("ConnectionType must be of type DNS or IP") -if __name__ == "__main__": +def main(): ConnectDag().main() + + +if __name__ == "__main__": + main() diff --git a/test/data/scenario_p2p_interface.py b/resources/scenarios/test_scenarios/p2p_interface.py similarity index 97% rename from test/data/scenario_p2p_interface.py rename to resources/scenarios/test_scenarios/p2p_interface.py index b9d0ff65f..64636267c 100644 --- a/test/data/scenario_p2p_interface.py +++ b/resources/scenarios/test_scenarios/p2p_interface.py @@ -49,8 +49,12 @@ def run_test(self): good_getdata = msg_getdata() good_getdata.inv.append(CInv(t=2, h=best_block)) p2p_block_store.send_and_ping(good_getdata) - p2p_block_store.wait_until(lambda: p2p_block_store.blocks[best_block] == 1) + p2p_block_store.wait_until(lambda: p2p_block_store.blocks[best_block] >= 1) -if __name__ == "__main__": +def main(): GetdataTest().main() + + +if __name__ == "__main__": + main() diff --git a/resources/scenarios/tx_flood.py b/resources/scenarios/tx_flood.py index a4896e958..7a60bccc5 100755 --- a/resources/scenarios/tx_flood.py +++ b/resources/scenarios/tx_flood.py @@ -1,13 +1,10 @@ #!/usr/bin/env python3 + import threading from random import choice, randrange from time import sleep -# The base class exists inside the commander container -try: - from commander import Commander -except ImportError: - from resources.scenarios.commander import Commander +from commander import Commander class TXFlood(Commander): @@ -70,5 +67,9 @@ def run_test(self): sleep(30) -if __name__ == "__main__": +def main(): TXFlood().main() + + +if __name__ == "__main__": + main() diff --git a/resources/scripts/apidocs.py b/resources/scripts/apidocs.py index eb64fce64..cca6fdce7 100755 --- a/resources/scripts/apidocs.py +++ b/resources/scripts/apidocs.py @@ -29,7 +29,7 @@ def print_cmd(cmd, super=""): format_default_value(p["default"], p["type"]["param_type"]), ] for p in cmd["params"] - if p["name"] != "help" + if p["name"] != "help" and p["name"] != "unknown_args" ] doc += tabulate(data, headers=headers, tablefmt="github") doc += "\n\n" diff --git a/resources/scripts/setup_minikube.sh b/resources/scripts/setup_minikube.sh deleted file mode 100755 index f835eed7c..000000000 --- a/resources/scripts/setup_minikube.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail -set +x -set +v - -if [ -z "${WAR_RPC+x}" ]; then - echo "WAR_RPC is unset. Please provide a path to warnet RPC images." - exit 1 -fi - -ERROR_CODE=0 - -# Colors and styles -RESET='\033[0m' -BOLD='\033[1m' - -# Use colors if we can and have the color space -if command -v tput &> /dev/null; then - ncolors=$(tput colors) - if [ -n "$ncolors" ] && [ "$ncolors" -ge 8 ]; then - RESET=$(tput sgr0) - BOLD=$(tput bold) - fi -fi - -print_message() { - local color="$1" - local message="$2" - local format="${3:-}" - echo -e "${format}${color}${message}${RESET}" -} - -print_partial_message() { - local pre_message="$1" - local formatted_part="$2" - local post_message="$3" - local format="${4:-}" # Default to empty string if not provided - local color="${5:-$RESET}" - - echo -e "${color}${pre_message}${format}${formatted_part}${RESET}${color}${post_message}${RESET}" -} - -docker_path=$(command -v docker || true) -if [ -n "$docker_path" ]; then - print_partial_message " ⭐️ Found " "docker" ": $docker_path" "$BOLD" -else - print_partial_message " 💥 Could not find " "docker" ". Please follow this link to install Docker Engine..." "$BOLD" - print_message "" " https://docs.docker.com/engine/install/" "$BOLD" - ERROR_CODE=127 -fi - -current_user=$(whoami) -current_context=$(docker context show) -if id -nG "$current_user" | grep -qw "docker"; then - print_partial_message " ⭐️ Found " "$current_user" " in the docker group" "$BOLD" -elif [ "$current_context" == "rootless" ]; then - print_message " " "⭐️ Running Docker as rootless" "$BOLD" -elif [[ "$(uname)" == "Darwin" ]]; then - print_message " " "⭐️ Running Docker on Darwin" "$BOLD" -else - print_partial_message " 💥 Could not find " "$current_user" " in the docker group. Please add it like this..." "$BOLD" - print_message "" " sudo usermod -aG docker $current_user && newgrp docker" "$BOLD" - ERROR_CODE=1 -fi - -minikube_path=$(command -v minikube || true) -if [ -n "$minikube_path" ]; then - print_partial_message " ⭐️ Found " "minikube" ": $minikube_path " "$BOLD" -else - print_partial_message " 💥 Could not find " "minikube" ". Please follow this link to install it..." "$BOLD" - print_message "" " https://minikube.sigs.k8s.io/docs/start/" "$BOLD" - ERROR_CODE=127 -fi - -kubectl_path=$(command -v kubectl || true) -if [ -n "$kubectl_path" ]; then - print_partial_message " ⭐️ Found " "kubectl" ": $kubectl_path " "$BOLD" -else - print_partial_message " 💥 Could not find " "kubectl" ". Please follow this link to install it..." "$BOLD" - print_message "" " https://kubernetes.io/docs/tasks/tools/" "$BOLD" - ERROR_CODE=127 -fi - -helm_path=$(command -v helm || true) -if [ -n "$helm_path" ]; then - print_partial_message " ⭐️ Found " "helm" ": $helm_path" "$BOLD" -else - print_partial_message " 💥 Could not find " "helm" ". Please follow this link to install it..." "$BOLD" - print_message "" " https://helm.sh/docs/intro/install/" "$BOLD" - ERROR_CODE=127 -fi - -if [ $ERROR_CODE -ne 0 ]; then - print_message "" "There were errors in the setup process. Please fix them and try again." "$BOLD" - exit $ERROR_CODE -fi - -# Check minikube status -minikube delete - -# Prepare minikube start command -MINIKUBE_CMD="minikube start --driver=docker --container-runtime=containerd --mount --mount-string=\"$PWD:/mnt/src\"" - -# Check for WAR_CPU and add to command if set -if [ -n "${WAR_CPU:-}" ]; then - MINIKUBE_CMD="$MINIKUBE_CMD --cpus=$WAR_CPU" -fi - -# Check for WAR_MEM and add to command if set -if [ -n "${WAR_MEM:-}" ]; then - MINIKUBE_CMD="$MINIKUBE_CMD --memory=${WAR_MEM}m" -fi - -# Start minikube with the constructed command -eval "$MINIKUBE_CMD" - -echo -print_message "" "Next, run the following command to deploy warnet" "" -print_message "" " warnet cluster deploy" "$BOLD" -print_partial_message " After that, run " "warnet network start" " to start the network." "$BOLD" - diff --git a/resources/scripts/setup_user_contexts.sh b/resources/scripts/setup_user_contexts.sh deleted file mode 100755 index 5a4b631b2..000000000 --- a/resources/scripts/setup_user_contexts.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bash - -# Function to check if a command exists -command_exists() { - command -v "$1" >/dev/null 2>&1 -} - -# Function to display usage information -usage() { - echo "Usage: $0 [kubeconfig_directory] [token_duration]" - echo " namespace: The Kubernetes namespace" - echo " kubeconfig_directory: Directory to store kubeconfig files (default: kubeconfigs)" - echo " token_duration: Duration of the token in seconds (default: 600 seconds / 10 minutes)" - exit 1 -} - -# Check for required commands -if ! command_exists kubectl; then - echo "kubectl is not installed. Please install it and try again." - exit 1 -fi - -# Check if namespace argument is provided -if [ $# -eq 0 ]; then - usage -fi - -NAMESPACE=$1 -KUBECONFIG_DIR=${2:-"kubeconfigs"} -TOKEN_DURATION=${3:-600} - -CLUSTER_NAME=$(kubectl config view --minify -o jsonpath='{.clusters[0].name}') -CLUSTER_SERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}') -CLUSTER_CA=$(kubectl config view --minify --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}') - -# Create the directory to store the kubeconfig files -mkdir -p "$KUBECONFIG_DIR" - -# Get all ServiceAccounts in the namespace -SERVICE_ACCOUNTS=$(kubectl get serviceaccounts -n $NAMESPACE -o jsonpath='{.items[*].metadata.name}') - -for SA in $SERVICE_ACCOUNTS; do - echo "Processing ServiceAccount: $SA" - - # Create a token for the ServiceAccount with specified duration - TOKEN=$(kubectl create token $SA -n $NAMESPACE --duration="${TOKEN_DURATION}s") - - if [ -z "$TOKEN" ]; then - echo "Failed to create token for ServiceAccount $SA. Skipping..." - continue - fi - - # Create a kubeconfig file for the user - KUBECONFIG_FILE="$KUBECONFIG_DIR/${SA}-${NAMESPACE}-kubeconfig" - - cat << EOF > "$KUBECONFIG_FILE" -apiVersion: v1 -kind: Config -clusters: -- name: ${CLUSTER_NAME} - cluster: - server: ${CLUSTER_SERVER} - certificate-authority-data: ${CLUSTER_CA} -users: -- name: ${SA} - user: - token: ${TOKEN} -contexts: -- name: ${SA}-${NAMESPACE} - context: - cluster: ${CLUSTER_NAME} - namespace: ${NAMESPACE} - user: ${SA} -current-context: ${SA}-${NAMESPACE} -EOF - - echo "Created kubeconfig file for $SA: $KUBECONFIG_FILE" - echo "Token duration: ${TOKEN_DURATION} seconds" - echo "To use this config, run: kubectl --kubeconfig=$KUBECONFIG_FILE get pods" - echo "---" -done - -echo "All kubeconfig files have been created in the '$KUBECONFIG_DIR' directory." -echo "Distribute these files to the respective users." -echo "Users can then use them with kubectl by specifying the --kubeconfig flag or by setting the KUBECONFIG environment variable." -echo "Note: The tokens will expire after ${TOKEN_DURATION} seconds." diff --git a/ruff.toml b/ruff.toml index 1e17fe2d6..1c20c0bc2 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,5 +1,6 @@ +required-version = "==0.6.8" extend-exclude = [ - "resources/images/commander/src/test_framework", + "resources/scenarios/test_framework", "resources/images/exporter/authproxy.py", "resources/scenarios/signet_miner.py", "src/test_framework/*", diff --git a/src/warnet/admin.py b/src/warnet/admin.py index f194e16bd..233a220e9 100644 --- a/src/warnet/admin.py +++ b/src/warnet/admin.py @@ -1,12 +1,22 @@ import os +import sys from pathlib import Path import click +import yaml from rich import print as richprint -from .constants import NETWORK_DIR +from .constants import KUBECONFIG, NETWORK_DIR, WARGAMES_NAMESPACE_PREFIX +from .k8s import ( + K8sError, + get_cluster_of_current_context, + get_namespaces_by_type, + get_service_accounts_in_namespace, + open_kubeconfig, +) from .namespaces import copy_namespaces_defaults, namespaces from .network import copy_network_defaults +from .process import run_command @click.group(name="admin", hidden=True) @@ -33,3 +43,94 @@ def init(): f"[green]Copied network and namespace example files to {Path(current_dir) / NETWORK_DIR.name}[/green]" ) richprint(f"[green]Created warnet project structure in {current_dir}[/green]") + + +@admin.command() +@click.option( + "--kubeconfig-dir", + default="kubeconfigs", + help="Directory to store kubeconfig files (default: kubeconfigs)", +) +@click.option( + "--token-duration", + default=172800, + type=int, + help="Duration of the token in seconds (default: 48 hours)", +) +def create_kubeconfigs(kubeconfig_dir, token_duration): + """Create kubeconfig files for ServiceAccounts""" + kubeconfig_dir = os.path.expanduser(kubeconfig_dir) + + try: + kubeconfig_data = open_kubeconfig(KUBECONFIG) + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not open auth_config: {KUBECONFIG}", fg="red") + sys.exit(1) + + cluster = get_cluster_of_current_context(kubeconfig_data) + + os.makedirs(kubeconfig_dir, exist_ok=True) + + # Get all namespaces that start with prefix + # This assumes when deploying multiple namespaces for the purpose of team games, all namespaces start with a prefix, + # e.g., tabconf-wargames-*. Currently, this is a bit brittle, but we can improve on this in the future + # by automatically applying a TEAM_PREFIX when creating the get_warnet_namespaces + # TODO: choose a prefix convention and have it managed by the helm charts instead of requiring the + # admin user to pipe through the correct string in multiple places. Another would be to use + # labels instead of namespace naming conventions + warnet_namespaces = get_namespaces_by_type(WARGAMES_NAMESPACE_PREFIX) + + for v1namespace in warnet_namespaces: + namespace = v1namespace.metadata.name + click.echo(f"Processing namespace: {namespace}") + service_accounts = get_service_accounts_in_namespace(namespace) + + for sa in service_accounts: + # Create a token for the ServiceAccount with specified duration + command = f"kubectl create token {sa} -n {namespace} --duration={token_duration}s" + try: + token = run_command(command) + except Exception as e: + click.echo( + f"Failed to create token for ServiceAccount {sa} in namespace {namespace}. Error: {str(e)}. Skipping..." + ) + continue + + # Create a kubeconfig file for the user + kubeconfig_file = os.path.join(kubeconfig_dir, f"{sa}-{namespace}-kubeconfig") + + # TODO: move yaml out of python code to resources/manifests/ + # + # might not be worth it since we are just reading the yaml to then create a bunch of values and its not + # actually used to deploy anything into the cluster + # Then benefit would be making this code a bit cleaner and easy to follow, fwiw + kubeconfig_dict = { + "apiVersion": "v1", + "kind": "Config", + "clusters": [cluster], + "users": [{"name": sa, "user": {"token": token}}], + "contexts": [ + { + "name": f"{sa}-{namespace}", + "context": {"cluster": cluster["name"], "namespace": namespace, "user": sa}, + } + ], + "current-context": f"{sa}-{namespace}", + } + + # Write to a YAML file + with open(kubeconfig_file, "w") as f: + yaml.dump(kubeconfig_dict, f, default_flow_style=False) + + click.echo(f" Created kubeconfig file for {sa}: {kubeconfig_file}") + + click.echo("---") + click.echo( + f"All kubeconfig files have been created in the '{kubeconfig_dir}' directory with a duration of {token_duration} seconds." + ) + click.echo("Distribute these files to the respective users.") + click.echo( + "Users can then use by running `warnet auth ` or with kubectl by specifying the --kubeconfig flag or by setting the KUBECONFIG environment variable." + ) + click.echo(f"Note: The tokens will expire after {token_duration} seconds.") diff --git a/src/warnet/bitcoin.py b/src/warnet/bitcoin.py index a27da3bc7..9d0c54f50 100644 --- a/src/warnet/bitcoin.py +++ b/src/warnet/bitcoin.py @@ -3,14 +3,15 @@ import sys from datetime import datetime from io import BytesIO +from typing import Optional import click -from urllib3.exceptions import MaxRetryError - from test_framework.messages import ser_uint256 from test_framework.p2p import MESSAGEMAP +from urllib3.exceptions import MaxRetryError -from .k8s import get_default_namespace, get_mission +from .constants import BITCOINCORE_CONTAINER +from .k8s import get_default_namespace_or, get_mission, pod_log from .process import run_command @@ -23,36 +24,39 @@ def bitcoin(): @click.argument("tank", type=str) @click.argument("method", type=str) @click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments -def rpc(tank: str, method: str, params: str): +@click.option("--namespace", default=None, show_default=True) +def rpc(tank: str, method: str, params: str, namespace: Optional[str]): """ Call bitcoin-cli [params] on """ try: - result = _rpc(tank, method, params) + result = _rpc(tank, method, params, namespace) except Exception as e: print(f"{e}") sys.exit(1) print(result) -def _rpc(tank: str, method: str, params: str): +def _rpc(tank: str, method: str, params: str, namespace: Optional[str] = None): # bitcoin-cli should be able to read bitcoin.conf inside the container # so no extra args like port, chain, username or password are needed - namespace = get_default_namespace() + namespace = get_default_namespace_or(namespace) if params: - cmd = f"kubectl -n {namespace} exec {tank} -- bitcoin-cli {method} {' '.join(map(str, params))}" + cmd = f"kubectl -n {namespace} exec {tank} --container {BITCOINCORE_CONTAINER} -- bitcoin-cli {method} {' '.join(map(str, params))}" else: - cmd = f"kubectl -n {namespace} exec {tank} -- bitcoin-cli {method}" + cmd = f"kubectl -n {namespace} exec {tank} --container {BITCOINCORE_CONTAINER} -- bitcoin-cli {method}" return run_command(cmd) @bitcoin.command() @click.argument("tank", type=str, required=True) -def debug_log(tank: str): +@click.option("--namespace", default=None, show_default=True) +def debug_log(tank: str, namespace: Optional[str]): """ Fetch the Bitcoin Core debug log from """ - cmd = f"kubectl logs {tank}" + namespace = get_default_namespace_or(namespace) + cmd = f"kubectl logs {tank} --namespace {namespace}" try: print(run_command(cmd)) except Exception as e: @@ -75,35 +79,32 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): sys.exit(1) matching_logs = [] + longest_namespace_len = 0 for tank in tanks: - pod_name = tank.metadata.name - # Get container names for this pod - containers = tank.spec.containers - if not containers: - continue + if len(tank.metadata.namespace) > longest_namespace_len: + longest_namespace_len = len(tank.metadata.namespace) - # Use the first container name - container_name = containers[0].name - if not container_name: - continue - - # Get logs from the specific container - command = f"kubectl logs {pod_name} -c {container_name} --timestamps" - logs = run_command(command) + pod_name = tank.metadata.name + logs = pod_log(pod_name, BITCOINCORE_CONTAINER) if logs is not False: - # Process logs - for log_entry in logs.splitlines(): - if re.search(pattern, log_entry): - matching_logs.append((log_entry, pod_name)) + try: + for line in logs: + log_entry = line.decode("utf-8").rstrip() + if re.search(pattern, log_entry): + matching_logs.append((log_entry, tank.metadata.namespace, pod_name)) + except Exception as e: + print(e) + except KeyboardInterrupt: + print("Interrupted streaming log!") # Sort logs if needed if not no_sort: matching_logs.sort(key=lambda x: x[0]) # Print matching logs - for log_entry, pod_name in matching_logs: + for log_entry, namespace, pod_name in matching_logs: try: # Split the log entry into Kubernetes timestamp, Bitcoin timestamp, and the rest of the log k8s_timestamp, rest = log_entry.split(" ", 1) @@ -111,9 +112,13 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): # Format the output based on the show_k8s_timestamps option if show_k8s_timestamps: - print(f"{pod_name}: {k8s_timestamp} {bitcoin_timestamp} {log_message}") + print( + f"{pod_name} {namespace:<{longest_namespace_len}} {k8s_timestamp} {bitcoin_timestamp} {log_message}" + ) else: - print(f"{pod_name}: {bitcoin_timestamp} {log_message}") + print( + f"{pod_name} {namespace:<{longest_namespace_len}} {bitcoin_timestamp} {log_message}" + ) except ValueError: # If we can't parse the timestamps, just print the original log entry print(f"{pod_name}: {log_entry}") @@ -128,13 +133,41 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): def messages(tank_a: str, tank_b: str, chain: str): """ Fetch messages sent between and in [chain] + + Optionally, include a namespace like so: tank-name.namespace """ + + def parse_name_and_namespace(tank: str) -> tuple[str, Optional[str]]: + tank_split = tank.split(".") + try: + namespace = tank_split[1] + except IndexError: + namespace = None + return tank_split[0], namespace + + tank_a_split = tank_a.split(".") + tank_b_split = tank_b.split(".") + if len(tank_a_split) > 2 or len(tank_b_split) > 2: + click.secho("Accepted formats: tank-name OR tank-name.namespace") + click.secho(f"Foramts found: {tank_a} {tank_b}") + sys.exit(1) + + tank_a, namespace_a = parse_name_and_namespace(tank_a) + tank_b, namespace_b = parse_name_and_namespace(tank_b) + try: + namespace_a = get_default_namespace_or(namespace_a) + namespace_b = get_default_namespace_or(namespace_b) + # Get the messages - messages = get_messages(tank_a, tank_b, chain) + messages = get_messages( + tank_a, tank_b, chain, namespace_a=namespace_a, namespace_b=namespace_b + ) if not messages: - print(f"No messages found between {tank_a} and {tank_b}") + print( + f"No messages found between {tank_a} ({namespace_a}) and {tank_b} ({namespace_b})" + ) return # Process and print messages @@ -159,7 +192,7 @@ def messages(tank_a: str, tank_b: str, chain: str): print(f"Error fetching messages between nodes {tank_a} and {tank_b}: {e}") -def get_messages(tank_a: str, tank_b: str, chain: str): +def get_messages(tank_a: str, tank_b: str, chain: str, namespace_a: str, namespace_b: str): """ Fetch messages from the message capture files """ @@ -167,15 +200,17 @@ def get_messages(tank_a: str, tank_b: str, chain: str): base_dir = f"/root/.bitcoin/{subdir}message_capture" # Get the IP of node_b - cmd = f"kubectl get pod {tank_b} -o jsonpath='{{.status.podIP}}'" + cmd = f"kubectl get pod {tank_b} -o jsonpath='{{.status.podIP}}' --namespace {namespace_b}" tank_b_ip = run_command(cmd).strip() # Get the service IP of node_b - cmd = f"kubectl get service {tank_b} -o jsonpath='{{.spec.clusterIP}}'" + cmd = ( + f"kubectl get service {tank_b} -o jsonpath='{{.spec.clusterIP}}' --namespace {namespace_b}" + ) tank_b_service_ip = run_command(cmd).strip() # List directories in the message capture folder - cmd = f"kubectl exec {tank_a} -- ls {base_dir}" + cmd = f"kubectl exec {tank_a} --namespace {namespace_a} -- ls {base_dir}" dirs = run_command(cmd).splitlines() @@ -186,7 +221,7 @@ def get_messages(tank_a: str, tank_b: str, chain: str): for file, outbound in [["msgs_recv.dat", False], ["msgs_sent.dat", True]]: file_path = f"{base_dir}/{dir_name}/{file}" # Fetch the file contents from the container - cmd = f"kubectl exec {tank_a} -- cat {file_path}" + cmd = f"kubectl exec {tank_a} --namespace {namespace_a} -- cat {file_path}" import subprocess blob = subprocess.run( diff --git a/src/warnet/constants.py b/src/warnet/constants.py index 4ce1ef2a5..46f33a3fe 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -11,9 +11,18 @@ tag for index, tag in enumerate(reversed(SUPPORTED_TAGS)) for _ in range(index + 1) ] -DEFAULT_NAMESPACE = "warnet" +DEFAULT_NAMESPACE = "default" LOGGING_NAMESPACE = "warnet-logging" -HELM_COMMAND = "helm upgrade --install --create-namespace" +INGRESS_NAMESPACE = "ingress" +WARGAMES_NAMESPACE_PREFIX = "wargames-" +KUBE_INTERNAL_NAMESPACES = ["kube-node-lease", "kube-public", "kube-system", "kubernetes-dashboard"] +HELM_COMMAND = "helm upgrade --install" + +TANK_MISSION = "tank" +COMMANDER_MISSION = "commander" + +BITCOINCORE_CONTAINER = "bitcoincore" +COMMANDER_CONTAINER = "commander" # Directories and files for non-python assets, e.g., helm charts, example scenarios, default configs SRC_DIR = files("warnet") @@ -35,6 +44,7 @@ NAMESPACES_CHART_LOCATION = CHARTS_DIR.joinpath("namespaces") FORK_OBSERVER_CHART = str(files("resources.charts").joinpath("fork-observer")) CADDY_CHART = str(files("resources.charts").joinpath("caddy")) +CADDY_INGRESS_NAME = "caddy-ingress" DEFAULT_NETWORK = Path("6_node_bitcoin") DEFAULT_NAMESPACES = Path("two_namespaces_two_users") @@ -93,8 +103,94 @@ "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", "helm repo update", f"helm upgrade --install --namespace warnet-logging --create-namespace --values {MANIFESTS_DIR}/loki_values.yaml loki grafana/loki --version 5.47.2", - "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", - "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", - f"helm upgrade --install grafana-dashboards {CHARTS_DIR}/grafana-dashboards --namespace warnet-logging", - f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {MANIFESTS_DIR}/grafana_values.yaml", + "helm upgrade --install --namespace warnet-logging promtail grafana/promtail --create-namespace", + "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --create-namespace --set grafana.enabled=false", + f"helm upgrade --install grafana-dashboards {CHARTS_DIR}/grafana-dashboards --namespace warnet-logging --create-namespace", + f"helm upgrade --install --namespace warnet-logging --create-namespace loki-grafana grafana/grafana --values {MANIFESTS_DIR}/grafana_values.yaml", +] + + +INGRESS_HELM_COMMANDS = [ + "helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx", + "helm repo update", + f"helm upgrade --install ingress-nginx ingress-nginx/ingress-nginx --namespace {INGRESS_NAMESPACE} --create-namespace", +] + +# Helm binary +HELM_DOWNLOAD_URL_STUB = "https://get.helm.sh/" +HELM_BINARY_NAME = "helm" +HELM_BLESSED_VERSION = "v3.16.1" +HELM_BLESSED_NAME_AND_CHECKSUMS = [ + { + "name": "helm-v3.16.1-darwin-amd64.tar.gz", + "checksum": "1b194824e36da3e3889920960a93868b541c7888c905a06757e88666cfb562c9", + }, + { + "name": "helm-v3.16.1-darwin-arm64.tar.gz", + "checksum": "405a3b13f0e194180f7b84010dfe86689d7703e80612729882ad71e2a4ef3504", + }, + { + "name": "helm-v3.16.1-linux-amd64.tar.gz", + "checksum": "e57e826410269d72be3113333dbfaac0d8dfdd1b0cc4e9cb08bdf97722731ca9", + }, + { + "name": "helm-v3.16.1-linux-arm.tar.gz", + "checksum": "a15a8ddfc373628b13cd2a987206756004091a1f6a91c3b9ee8de6f0b1e2ce90", + }, + { + "name": "helm-v3.16.1-linux-arm64.tar.gz", + "checksum": "780b5b86f0db5546769b3e9f0204713bbdd2f6696dfdaac122fbe7f2f31541d2", + }, + { + "name": "helm-v3.16.1-linux-386.tar.gz", + "checksum": "92d7a47a90734b50528ffffc99cd1b2d4b9fc0f4291bac92c87ef03406a5a7b2", + }, + { + "name": "helm-v3.16.1-linux-ppc64le.tar.gz", + "checksum": "9f0178957c94516eff9a3897778edb93d78fab1f76751bd282883f584ea81c23", + }, + { + "name": "helm-v3.16.1-linux-s390x.tar.gz", + "checksum": "357f8b441cc535240f1b0ba30a42b44571d4c303dab004c9e013697b97160360", + }, + { + "name": "helm-v3.16.1-linux-riscv64.tar.gz", + "checksum": "9a2cab45b7d9282e9be7b42f86d8034dcaa2e81ab338642884843676c2f6929f", + }, + { + "name": "helm-v3.16.1-windows-amd64.zip", + "checksum": "89952ea1bace0a9498053606296ea03cf743c48294969dfc731e7f78d1dc809a", + }, + { + "name": "helm-v3.16.1-windows-arm64.zip", + "checksum": "fc370a291ed926da5e77acf42006de48e7fd5ff94d20c3f6aa10c04fea66e53c", + }, +] + + +# Kubectl binary +KUBECTL_BINARY_NAME = "kubectl" +KUBECTL_BLESSED_VERSION = "v1.31.1" +KUBECTL_DOWNLOAD_URL_STUB = f"https://dl.k8s.io/release/{KUBECTL_BLESSED_VERSION}/bin" +KUBECTL_BLESSED_NAME_AND_CHECKSUMS = [ + { + "system": "linux", + "arch": "amd64", + "checksum": "57b514a7facce4ee62c93b8dc21fda8cf62ef3fed22e44ffc9d167eab843b2ae", + }, + { + "system": "linux", + "arch": "arm64", + "checksum": "3af2451191e27ecd4ac46bb7f945f76b71e934d54604ca3ffc7fe6f5dd123edb", + }, + { + "system": "darwin", + "arch": "amd64", + "checksum": "4b86d3fb8dee8dd61f341572f1ba13c1030d493f4dc1b4831476f61f3cbb77d0", + }, + { + "system": "darwin", + "arch": "arm64", + "checksum": "08909b92e62004f4f1222dfd39214085383ea368bdd15c762939469c23484634", + }, ] diff --git a/src/warnet/control.py b/src/warnet/control.py index 929b2b187..83d358a4e 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -1,27 +1,44 @@ -import base64 +import io import json import os import subprocess import sys import time +import zipapp from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path +from typing import Optional import click import inquirer from inquirer.themes import GreenPassion +from kubernetes.client.models import V1Pod from rich import print from rich.console import Console from rich.prompt import Confirm, Prompt from rich.table import Table -from .constants import COMMANDER_CHART, LOGGING_NAMESPACE -from .deploy import _port_stop_internal +from .constants import ( + BITCOINCORE_CONTAINER, + COMMANDER_CHART, + COMMANDER_CONTAINER, + COMMANDER_MISSION, + TANK_MISSION, +) from .k8s import ( + can_delete_pods, + delete_pod, get_default_namespace, + get_default_namespace_or, get_mission, + get_namespaces, + get_pod, get_pods, + pod_log, snapshot_bitcoin_datadir, + wait_for_init, + wait_for_pod, + write_file_to_container, ) from .process import run_command, stream_command @@ -102,12 +119,15 @@ def stop_all_scenarios(scenarios): console.print("[bold green]All scenarios have been stopped.[/bold green]") +@click.option( + "--force", + is_flag=True, + default=False, + help="Skip confirmations", +) @click.command() -def down(): +def down(force): """Bring down a running warnet quickly""" - console.print("[bold yellow]Bringing down the warnet...[/bold yellow]") - - namespaces = [get_default_namespace(), LOGGING_NAMESPACE] def uninstall_release(namespace, release_name): cmd = f"helm uninstall {release_name} --namespace {namespace} --wait=false" @@ -119,29 +139,67 @@ def delete_pod(pod_name, namespace): subprocess.Popen(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return f"Initiated deletion of pod: {pod_name} in namespace {namespace}" + if not can_delete_pods(): + click.secho("You do not have permission to bring down the network.", fg="red") + return + + namespaces = get_namespaces() + release_list: list[dict[str, str]] = [] + for v1namespace in namespaces: + namespace = v1namespace.metadata.name + command = f"helm list --namespace {namespace} -o json" + result = run_command(command) + if result: + releases = json.loads(result) + for release in releases: + release_list.append({"namespace": namespace, "name": release["name"]}) + + if not force: + affected_namespaces = set([entry["namespace"] for entry in release_list]) + namespace_listing = "\n ".join(affected_namespaces) + confirmed = "confirmed" + click.secho("Preparing to bring down the running Warnet...", fg="yellow") + click.secho("The listed namespaces will be affected:", fg="yellow") + click.secho(f" {namespace_listing}", fg="blue") + + proj_answers = inquirer.prompt( + [ + inquirer.Confirm( + confirmed, + message=click.style( + "Do you want to bring down the running Warnet?", fg="yellow", bold=False + ), + default=False, + ), + ] + ) + if not proj_answers: + click.secho("Operation cancelled by user.", fg="yellow") + sys.exit(0) + if proj_answers[confirmed]: + click.secho("Bringing down the warnet...", fg="yellow") + else: + click.secho("Operation cancelled by user", fg="yellow") + sys.exit(0) + with ThreadPoolExecutor(max_workers=10) as executor: futures = [] # Uninstall Helm releases - for namespace in namespaces: - command = f"helm list --namespace {namespace} -o json" - result = run_command(command) - if result: - releases = json.loads(result) - for release in releases: - futures.append(executor.submit(uninstall_release, namespace, release["name"])) + for release in release_list: + futures.append( + executor.submit(uninstall_release, release["namespace"], release["name"]) + ) # Delete remaining pods pods = get_pods() - for pod in pods.items: + for pod in pods: futures.append(executor.submit(delete_pod, pod.metadata.name, pod.metadata.namespace)) # Wait for all tasks to complete and print results for future in as_completed(futures): console.print(f"[yellow]{future.result()}[/yellow]") - # Shutdown any port forwarding - _port_stop_internal("caddy", namespaces[1]) console.print("[bold yellow]Teardown process initiated for all components.[/bold yellow]") console.print("[bold yellow]Note: Some processes may continue in the background.[/bold yellow]") console.print("[bold green]Warnet teardown process completed.[/bold green]") @@ -163,20 +221,39 @@ def get_active_network(namespace): @click.command(context_settings={"ignore_unknown_options": True}) @click.argument("scenario_file", type=click.Path(exists=True, file_okay=True, dir_okay=False)) +@click.option( + "--debug", + is_flag=True, + default=False, + help="Stream scenario output and delete container when stopped", +) +@click.option( + "--source_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True), required=False +) @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) -def run(scenario_file: str, additional_args: tuple[str]): +@click.option("--namespace", default=None, show_default=True) +def run( + scenario_file: str, + debug: bool, + source_dir, + additional_args: tuple[str], + namespace: Optional[str], +): """ Run a scenario from a file. Pass `-- --help` to get individual scenario help """ + namespace = get_default_namespace_or(namespace) + scenario_path = Path(scenario_file).resolve() + scenario_dir = scenario_path.parent if not source_dir else Path(source_dir).resolve() scenario_name = scenario_path.stem - with open(scenario_path, "rb") as file: - scenario_data = base64.b64encode(file.read()).decode() + if additional_args and ("--help" in additional_args or "-h" in additional_args): + return subprocess.run([sys.executable, scenario_path, "--help"]) + # Collect tank data for warnet.json name = f"commander-{scenario_name.replace('_', '')}-{int(time.time())}" - namespace = get_default_namespace() tankpods = get_mission("tank") tanks = [ { @@ -185,15 +262,52 @@ def run(scenario_file: str, additional_args: tuple[str]): "rpc_host": tank.status.pod_ip, "rpc_port": int(tank.metadata.labels["RPCPort"]), "rpc_user": "user", - "rpc_password": "password", + "rpc_password": tank.metadata.labels["rpcpassword"], "init_peers": [], } for tank in tankpods ] - # Encode warnet data - warnet_data = base64.b64encode(json.dumps(tanks).encode()).decode() + # Encode tank data for warnet.json + warnet_data = json.dumps(tanks).encode() + + # Create in-memory buffer to store python archive instead of writing to disk + archive_buffer = io.BytesIO() + + # No need to copy the entire scenarios/ directory into the archive + def filter(path): + if any(needle in str(path) for needle in [".pyc", ".csv", ".DS_Store"]): + return False + if any( + needle in str(path) + for needle in ["__init__.py", "commander.py", "test_framework", scenario_path.name] + ): + print(f"Including: {path}") + return True + return False + + # In case the scenario file is not in the root of the archive directory, + # we need to specify its relative path as a submodule + # First get the path of the file relative to the source directory + relative_path = scenario_path.relative_to(scenario_dir) + # Remove the '.py' extension + relative_name = relative_path.with_suffix("") + # Replace path separators with dots and pray the user included __init__.py + module_name = ".".join(relative_name.parts) + # Compile python archive + zipapp.create_archive( + source=scenario_dir, + target=archive_buffer, + main=f"{module_name}:main", + compressed=True, + filter=filter, + ) + + # Encode the binary data as Base64 + archive_buffer.seek(0) + archive_data = archive_buffer.read() + # Start the commander pod with python and init containers try: # Construct Helm command helm_command = [ @@ -204,17 +318,11 @@ def run(scenario_file: str, additional_args: tuple[str]): namespace, "--set", f"fullnameOverride={name}", - "--set", - f"scenario={scenario_data}", - "--set", - f"warnet={warnet_data}", ] # Add additional arguments if additional_args: helm_command.extend(["--set", f"args={' '.join(additional_args)}"]) - if "--help" in additional_args or "-h" in additional_args: - return subprocess.run([sys.executable, scenario_path, "--help"]) helm_command.extend([name, COMMANDER_CHART]) @@ -222,62 +330,107 @@ def run(scenario_file: str, additional_args: tuple[str]): result = subprocess.run(helm_command, check=True, capture_output=True, text=True) if result.returncode == 0: - print(f"Successfully started scenario: {scenario_name}") + print(f"Successfully deployed scenario commander: {scenario_name}") print(f"Commander pod name: {name}") else: - print(f"Failed to start scenario: {scenario_name}") + print(f"Failed to deploy scenario commander: {scenario_name}") print(f"Error: {result.stderr}") except subprocess.CalledProcessError as e: - print(f"Failed to start scenario: {scenario_name}") + print(f"Failed to deploy scenario commander: {scenario_name}") print(f"Error: {e.stderr}") + # upload scenario files and network data to the init container + wait_for_init(name, namespace=namespace) + if write_file_to_container( + name, "init", "/shared/warnet.json", warnet_data, namespace=namespace + ) and write_file_to_container( + name, "init", "/shared/archive.pyz", archive_data, namespace=namespace + ): + print(f"Successfully uploaded scenario data to commander: {scenario_name}") + + if debug: + print("Waiting for commander pod to start...") + wait_for_pod(name, namespace=namespace) + _logs(pod_name=name, follow=True, namespace=namespace) + print("Deleting pod...") + delete_pod(name, namespace=namespace) + @click.command() @click.argument("pod_name", type=str, default="") @click.option("--follow", "-f", is_flag=True, default=False, help="Follow logs") -def logs(pod_name: str, follow: bool): +@click.option("--namespace", type=str, default="default", show_default=True) +def logs(pod_name: str, follow: bool, namespace: str): """Show the logs of a pod""" - follow_flag = "--follow" if follow else "" - namespace = get_default_namespace() + return _logs(pod_name, follow, namespace) - if pod_name: + +def _logs(pod_name: str, follow: bool, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) + + def format_pods(pods: list[V1Pod]) -> list[str]: + sorted_pods = sorted(pods, key=lambda pod: pod.metadata.creation_timestamp, reverse=True) + return [f"{pod.metadata.name}: {pod.metadata.namespace}" for pod in sorted_pods] + + if pod_name == "": try: - command = f"kubectl logs pod/{pod_name} -n {namespace} {follow_flag}" - stream_command(command) - return + pod_list = [] + formatted_commanders = format_pods(get_mission(COMMANDER_MISSION)) + formatted_tanks = format_pods(get_mission(TANK_MISSION)) + pod_list.extend(formatted_commanders) + pod_list.extend(formatted_tanks) + except Exception as e: - print(f"Could not find the pod {pod_name}: {e}") + print(f"Could not fetch any pods in namespace ({namespace}): {e}") + return + + if not pod_list: + print(f"Could not fetch any pods in namespace ({namespace})") + return + + q = [ + inquirer.List( + name="pod", + message="Please choose a pod", + choices=pod_list, + ) + ] + selected = inquirer.prompt(q, theme=GreenPassion()) + if selected: + pod_name, namespace = selected["pod"].split(": ") + else: + return # cancelled by user try: - pods = run_command(f"kubectl get pods -n {namespace} -o json") - pods = json.loads(pods) - pod_list = [item["metadata"]["name"] for item in pods["items"]] + pod = get_pod(pod_name, namespace=namespace) + eligible_container_names = [BITCOINCORE_CONTAINER, COMMANDER_CONTAINER] + available_container_names = [container.name for container in pod.spec.containers] + container_name = next( + ( + container_name + for container_name in available_container_names + if container_name in eligible_container_names + ), + None, + ) + if not container_name: + print("Could not determine primary container.") + return except Exception as e: - print(f"Could not fetch any pods in namespace {namespace}: {e}") + print(f"Error getting pods. Could not determine primary container: {e}") return - if not pod_list: - print(f"Could not fetch any pods in namespace {namespace}") - return - - q = [ - inquirer.List( - name="pod", - message="Please choose a pod", - choices=pod_list, + try: + stream = pod_log( + pod_name, container_name=container_name, namespace=namespace, follow=follow ) - ] - selected = inquirer.prompt(q, theme=GreenPassion()) - if selected: - pod_name = selected["pod"] - try: - command = f"kubectl logs pod/{pod_name} -n {namespace} {follow_flag}" - stream_command(command) - except Exception as e: - print(f"Please consider waiting for the pod to become available. Encountered: {e}") - else: - pass # cancelled by user + for line in stream: + click.echo(line.decode("utf-8").rstrip()) + except Exception as e: + print(e) + except KeyboardInterrupt: + print("Interrupted streaming log!") @click.command() diff --git a/src/warnet/dashboard.py b/src/warnet/dashboard.py index 28ac0cba6..67efe286b 100644 --- a/src/warnet/dashboard.py +++ b/src/warnet/dashboard.py @@ -1,11 +1,27 @@ import click +from .k8s import get_ingress_ip_or_host, wait_for_ingress_controller + @click.command() def dashboard(): """Open the Warnet dashboard in default browser""" import webbrowser - url = "http://localhost:2019" + wait_for_ingress_controller() + ip = get_ingress_ip_or_host() + + if not ip: + click.echo("Error: Could not get the IP address of the dashboard") + click.echo( + "If you are running Minikube please run 'minikube tunnel' in a separate terminal" + ) + click.echo( + "If you are running in the cloud, you may need to wait a short while while the load balancer is provisioned" + ) + return + + url = f"http://{ip}" + webbrowser.open(url) - click.echo("warnet dashboard opened in default browser") + click.echo("Warnet dashboard opened in default browser") diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 0bcbd2c0f..d9b5a45b5 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -1,8 +1,8 @@ -import os import subprocess import sys import tempfile from pathlib import Path +from typing import Optional import click import yaml @@ -14,43 +14,77 @@ DEFAULTS_NAMESPACE_FILE, FORK_OBSERVER_CHART, HELM_COMMAND, + INGRESS_HELM_COMMANDS, LOGGING_HELM_COMMANDS, LOGGING_NAMESPACE, NAMESPACES_CHART_LOCATION, NAMESPACES_FILE, NETWORK_FILE, + WARGAMES_NAMESPACE_PREFIX, +) +from .k8s import ( + get_default_namespace, + get_default_namespace_or, + get_mission, + get_namespaces_by_type, + wait_for_ingress_controller, + wait_for_pod_ready, ) -from .k8s import get_default_namespace, wait_for_caddy_ready from .process import stream_command +HINT = "\nAre you trying to run a scenario? See `warnet run --help`" + def validate_directory(ctx, param, value): directory = Path(value) if not directory.is_dir(): - raise click.BadParameter(f"'{value}' is not a valid directory.") + raise click.BadParameter(f"'{value}' is not a valid directory.{HINT}") if not (directory / NETWORK_FILE).exists() and not (directory / NAMESPACES_FILE).exists(): raise click.BadParameter( - f"'{value}' does not contain a valid network.yaml or namespaces.yaml file." + f"'{value}' does not contain a valid network.yaml or namespaces.yaml file.{HINT}" ) return directory -@click.command() +@click.command(context_settings={"ignore_unknown_options": True}) @click.argument( "directory", - type=click.Path(exists=True, file_okay=False, dir_okay=True), + type=click.Path(exists=True), callback=validate_directory, ) @click.option("--debug", is_flag=True) -def deploy(directory, debug): +@click.option("--namespace", type=str, help="Specify a namespace in which to deploy the network") +@click.option("--to-all-users", is_flag=True, help="Deploy network to all user namespaces") +@click.argument("unknown_args", nargs=-1) +def deploy(directory, debug, namespace, to_all_users, unknown_args): + """Deploy a warnet with topology loaded from """ + if unknown_args: + raise click.BadParameter(f"Unknown args: {unknown_args}{HINT}") + + if to_all_users: + namespaces = get_namespaces_by_type(WARGAMES_NAMESPACE_PREFIX) + for namespace in namespaces: + _deploy(directory, debug, namespace.metadata.name, False) + else: + _deploy(directory, debug, namespace, to_all_users) + + +def _deploy(directory, debug, namespace, to_all_users): """Deploy a warnet with topology loaded from """ directory = Path(directory) + if to_all_users: + namespaces = get_namespaces_by_type(WARGAMES_NAMESPACE_PREFIX) + for namespace in namespaces: + deploy(directory, debug, namespace.metadata.name, False) + return + if (directory / NETWORK_FILE).exists(): dl = deploy_logging_stack(directory, debug) - deploy_network(directory, debug) + deploy_network(directory, debug, namespace=namespace) df = deploy_fork_observer(directory, debug) if dl | df: + deploy_ingress(debug) deploy_caddy(directory, debug) elif (directory / NAMESPACES_FILE).exists(): deploy_namespaces(directory) @@ -118,8 +152,21 @@ def deploy_caddy(directory: Path, debug: bool): click.echo(f"Failed to run Helm command: {cmd}") return - wait_for_caddy_ready(name, namespace) - _port_start_internal(name, namespace) + wait_for_pod_ready(name, namespace) + click.echo("\nTo access the warnet dashboard run:\n warnet dashboard") + + +def deploy_ingress(debug: bool): + click.echo("Deploying ingress controller") + + for command in INGRESS_HELM_COMMANDS: + if not stream_command(command): + print(f"Failed to run Helm command: {command}") + return False + + wait_for_ingress_controller() + + return True def deploy_fork_observer(directory: Path, debug: bool) -> bool: @@ -133,7 +180,7 @@ def deploy_fork_observer(directory: Path, debug: bool) -> bool: default_namespace = get_default_namespace() namespace = LOGGING_NAMESPACE - cmd = f"{HELM_COMMAND} 'fork-observer' {FORK_OBSERVER_CHART} --namespace {namespace}" + cmd = f"{HELM_COMMAND} 'fork-observer' {FORK_OBSERVER_CHART} --namespace {namespace} --create-namespace" if debug: cmd += " --debug" @@ -141,15 +188,22 @@ def deploy_fork_observer(directory: Path, debug: bool) -> bool: override_string = "" # Add an entry for each node in the graph - for i, node in enumerate(network_file["nodes"]): - node_name = node.get("name") + for i, tank in enumerate(get_mission("tank")): + node_name = tank.metadata.name + for container in tank.spec.containers: + if container.name == "bitcoincore": + for port in container.ports: + if port.name == "rpc": + rpcport = port.container_port + if port.name == "p2p": + p2pport = port.container_port node_config = f""" [[networks.nodes]] id = {i} name = "{node_name}" -description = "A node. Just A node." +description = "{node_name}.{default_namespace}.svc:{int(p2pport)}" rpc_host = "{node_name}.{default_namespace}.svc" -rpc_port = 18443 +rpc_port = {int(rpcport)} rpc_user = "forkobserver" rpc_password = "tabconf2024" """ @@ -175,15 +229,15 @@ def deploy_fork_observer(directory: Path, debug: bool) -> bool: return True -def deploy_network(directory: Path, debug: bool = False): +def deploy_network(directory: Path, debug: bool = False, namespace: Optional[str] = None): network_file_path = directory / NETWORK_FILE defaults_file_path = directory / DEFAULTS_FILE + namespace = get_default_namespace_or(namespace) + with network_file_path.open() as f: network_file = yaml.safe_load(f) - namespace = get_default_namespace() - for node in network_file["nodes"]: click.echo(f"Deploying node: {node.get('name')}") try: @@ -223,16 +277,17 @@ def deploy_namespaces(directory: Path): names = [n.get("name") for n in namespaces_file["namespaces"]] for n in names: - if not n.startswith("warnet-"): - click.echo( - f"Failed to create namespace: {n}. Namespaces must start with a 'warnet-' prefix." + if not n.startswith(WARGAMES_NAMESPACE_PREFIX): + click.secho( + f"Failed to create namespace: {n}. Namespaces must start with a '{WARGAMES_NAMESPACE_PREFIX}' prefix.", + fg="red", ) return for namespace in namespaces_file["namespaces"]: click.echo(f"Deploying namespace: {namespace.get('name')}") try: - temp_override_file_path = Path() + temp_override_file_path = "" namespace_name = namespace.get("name") namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} @@ -253,7 +308,7 @@ def deploy_namespaces(directory: Path): click.echo(f"Error: {e}") return finally: - if temp_override_file_path.exists(): + if temp_override_file_path: temp_override_file_path.unlink() @@ -279,19 +334,3 @@ def run_detached_process(command): subprocess.Popen(command, shell=True, stdin=None, stdout=None, stderr=None, close_fds=True) print(f"Started detached process: {command}") - - -def _port_start_internal(name, namespace): - click.echo("Starting port-forwarding to warnet dashboard") - command = f"kubectl port-forward -n {namespace} service/{name} 2019:80" - run_detached_process(command) - click.echo("Port forwarding on port 2019 started in the background.") - click.echo("\nTo access the warnet dashboard visit localhost:2019 or run:\n warnet dashboard") - - -def _port_stop_internal(name, namespace): - if is_windows(): - os.system("taskkill /F /IM kubectl.exe") - else: - os.system(f"pkill -f 'kubectl port-forward -n {namespace} service/{name} 2019:80'") - click.echo("Port forwarding stopped.") diff --git a/src/warnet/graph.py b/src/warnet/graph.py index 6e5b3fd6b..390686486 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -38,11 +38,11 @@ def custom_graph( connections = set() for i in range(num_nodes): - node = {"name": f"tank-{i:04d}", "connect": [], "image": {"tag": version}} + node = {"name": f"tank-{i:04d}", "addnode": [], "image": {"tag": version}} # Add round-robin connection next_node = (i + 1) % num_nodes - node["connect"].append(f"tank-{next_node:04d}") + node["addnode"].append(f"tank-{next_node:04d}") connections.add((i, next_node)) # Add random connections @@ -55,7 +55,7 @@ def custom_graph( random_node = random.choice(available_nodes) # Avoid circular loops of A -> B -> A if (random_node, i) not in connections: - node["connect"].append(f"tank-{random_node:04d}") + node["addnode"].append(f"tank-{random_node:04d}") connections.add((i, random_node)) available_nodes.remove(random_node) @@ -74,7 +74,9 @@ def custom_graph( yaml.dump(network_yaml_data, f, default_flow_style=False) # Generate node-defaults.yaml - default_yaml_path = files("resources.networks").joinpath("node-defaults.yaml") + default_yaml_path = ( + files("resources.networks").joinpath("fork_observer").joinpath("node-defaults.yaml") + ) with open(str(default_yaml_path)) as f: defaults_yaml_content = yaml.safe_load(f) diff --git a/src/warnet/k8s.py b/src/warnet/k8s.py index cacf8f65a..9354eb903 100644 --- a/src/warnet/k8s.py +++ b/src/warnet/k8s.py @@ -3,18 +3,33 @@ import sys import tempfile from pathlib import Path +from time import sleep +from typing import Optional import yaml from kubernetes import client, config, watch -from kubernetes.client.models import CoreV1Event, V1PodList +from kubernetes.client import CoreV1Api +from kubernetes.client.models import V1Namespace, V1Pod, V1PodList +from kubernetes.client.rest import ApiException from kubernetes.dynamic import DynamicClient from kubernetes.stream import stream -from .constants import DEFAULT_NAMESPACE, KUBECONFIG +from .constants import ( + CADDY_INGRESS_NAME, + DEFAULT_NAMESPACE, + INGRESS_NAMESPACE, + KUBE_INTERNAL_NAMESPACES, + KUBECONFIG, + LOGGING_NAMESPACE, +) from .process import run_command, stream_command -def get_static_client() -> CoreV1Event: +class K8sError(Exception): + pass + + +def get_static_client() -> CoreV1Api: config.load_kube_config(config_file=KUBECONFIG) return client.CoreV1Api() @@ -24,28 +39,41 @@ def get_dynamic_client() -> DynamicClient: return DynamicClient(client.ApiClient()) -def get_pods() -> V1PodList: +def get_pods() -> list[V1Pod]: sclient = get_static_client() - try: - pod_list: V1PodList = sclient.list_namespaced_pod(get_default_namespace()) - except Exception as e: - raise e - return pod_list + pods: list[V1Pod] = [] + namespaces = get_namespaces() + for ns in namespaces: + namespace = ns.metadata.name + try: + pod_list: V1PodList = sclient.list_namespaced_pod(namespace) + for pod in pod_list.items: + pods.append(pod) + except Exception as e: + raise e + return pods + + +def get_pod(name: str, namespace: Optional[str] = None) -> V1Pod: + namespace = get_default_namespace_or(namespace) + sclient = get_static_client() + return sclient.read_namespaced_pod(name=name, namespace=namespace) -def get_mission(mission: str) -> list[V1PodList]: +def get_mission(mission: str) -> list[V1Pod]: pods = get_pods() - crew = [] - for pod in pods.items: + crew: list[V1Pod] = [] + for pod in pods: if "mission" in pod.metadata.labels and pod.metadata.labels["mission"] == mission: crew.append(pod) return crew -def get_pod_exit_status(pod_name): +def get_pod_exit_status(pod_name, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) try: sclient = get_static_client() - pod = sclient.read_namespaced_pod(name=pod_name, namespace=get_default_namespace()) + pod = sclient.read_namespaced_pod(name=pod_name, namespace=namespace) for container_status in pod.status.container_statuses: if container_status.state.terminated: return container_status.state.terminated.exit_code @@ -55,9 +83,10 @@ def get_pod_exit_status(pod_name): return None -def get_edges() -> any: +def get_edges(namespace: Optional[str] = None) -> any: + namespace = get_default_namespace_or(namespace) sclient = get_static_client() - configmap = sclient.read_namespaced_config_map(name="edges", namespace="warnet") + configmap = sclient.read_namespaced_config_map(name="edges", namespace=namespace) return json.loads(configmap.data["data"]) @@ -109,8 +138,9 @@ def delete_namespace(namespace: str) -> bool: return run_command(command) -def delete_pod(pod_name: str) -> bool: - command = f"kubectl delete pod {pod_name}" +def delete_pod(pod_name: str, namespace: Optional[str] = None) -> bool: + namespace = get_default_namespace_or(namespace) + command = f"kubectl -n {namespace} delete pod {pod_name}" return stream_command(command) @@ -129,10 +159,18 @@ def get_default_namespace() -> str: return kubectl_namespace if kubectl_namespace else DEFAULT_NAMESPACE +def get_default_namespace_or(namespace: Optional[str]) -> str: + return namespace if namespace else get_default_namespace() + + def snapshot_bitcoin_datadir( - pod_name: str, chain: str, local_path: str = "./", filters: list[str] = None + pod_name: str, + chain: str, + local_path: str = "./", + filters: list[str] = None, + namespace: Optional[str] = None, ) -> None: - namespace = get_default_namespace() + namespace = get_default_namespace_or(namespace) sclient = get_static_client() try: @@ -239,7 +277,7 @@ def snapshot_bitcoin_datadir( print(f"An error occurred: {str(e)}") -def wait_for_caddy_ready(name, namespace, timeout=300): +def wait_for_pod_ready(name, namespace, timeout=300): sclient = get_static_client() w = watch.Watch() for event in w.stream( @@ -250,8 +288,252 @@ def wait_for_caddy_ready(name, namespace, timeout=300): conditions = pod.status.conditions or [] ready_condition = next((c for c in conditions if c.type == "Ready"), None) if ready_condition and ready_condition.status == "True": - print(f"Caddy pod {name} is ready.") w.stop() return True - print(f"Timeout waiting for Caddy pod {name} to be ready.") + print(f"Timeout waiting for pod {name} to be ready.") + return False + + +def wait_for_init(pod_name, timeout=300, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) + sclient = get_static_client() + w = watch.Watch() + for event in w.stream( + sclient.list_namespaced_pod, namespace=namespace, timeout_seconds=timeout + ): + pod = event["object"] + if pod.metadata.name == pod_name: + if not pod.status.init_container_statuses: + continue + for init_container_status in pod.status.init_container_statuses: + if init_container_status.state.running: + print(f"initContainer in pod {pod_name} ({namespace}) is ready") + w.stop() + return True + print(f"Timeout waiting for initContainer in {pod_name} ({namespace})to be ready.") return False + + +def wait_for_ingress_controller(timeout=300): + # get name of ingress controller pod + sclient = get_static_client() + pods = sclient.list_namespaced_pod(namespace=INGRESS_NAMESPACE) + for pod in pods.items: + if "ingress-nginx-controller" in pod.metadata.name: + return wait_for_pod_ready(pod.metadata.name, INGRESS_NAMESPACE, timeout) + + +def get_ingress_ip_or_host(): + config.load_kube_config() + networking_v1 = client.NetworkingV1Api() + try: + ingress = networking_v1.read_namespaced_ingress(CADDY_INGRESS_NAME, LOGGING_NAMESPACE) + if ingress.status.load_balancer.ingress[0].hostname: + return ingress.status.load_balancer.ingress[0].hostname + return ingress.status.load_balancer.ingress[0].ip + except Exception as e: + print(f"Error getting ingress IP: {e}") + return None + + +def pod_log(pod_name, container_name=None, follow=False, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) + sclient = get_static_client() + + try: + return sclient.read_namespaced_pod_log( + name=pod_name, + namespace=namespace, + container=container_name, + follow=follow, + _preload_content=False, + ) + except ApiException as e: + raise Exception(json.loads(e.body.decode("utf-8"))["message"]) from None + + +def wait_for_pod(pod_name, timeout_seconds=10, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) + sclient = get_static_client() + while timeout_seconds > 0: + pod = sclient.read_namespaced_pod_status(name=pod_name, namespace=namespace) + if pod.status.phase != "Pending": + return + sleep(1) + timeout_seconds -= 1 + + +def write_file_to_container( + pod_name, container_name, dst_path, data, namespace: Optional[str] = None +): + namespace = get_default_namespace_or(namespace) + sclient = get_static_client() + exec_command = ["sh", "-c", f"cat > {dst_path}.tmp && sync"] + try: + res = stream( + sclient.connect_get_namespaced_pod_exec, + pod_name, + namespace, + command=exec_command, + container=container_name, + stdin=True, + stderr=True, + stdout=True, + tty=False, + _preload_content=False, + ) + res.write_stdin(data) + res.close() + rename_command = ["sh", "-c", f"mv {dst_path}.tmp {dst_path}"] + stream( + sclient.connect_get_namespaced_pod_exec, + pod_name, + namespace, + command=rename_command, + container=container_name, + stdin=False, + stderr=True, + stdout=True, + tty=False, + ) + print(f"Successfully copied data to {pod_name}({container_name}):{dst_path}") + return True + except Exception as e: + print(f"Failed to copy data to {pod_name}({container_name}):{dst_path}:\n{e}") + + +def get_kubeconfig_value(jsonpath): + command = f"kubectl config view --minify --raw -o jsonpath={jsonpath}" + return run_command(command) + + +def get_cluster_of_current_context(kubeconfig_data: dict) -> dict: + # Get the current context name + current_context_name = kubeconfig_data.get("current-context") + + if not current_context_name: + raise K8sError("No current context found in kubeconfig.") + + # Find the context entry for the current context + context_entry = next( + ( + context + for context in kubeconfig_data.get("contexts", []) + if context["name"] == current_context_name + ), + None, + ) + + if not context_entry: + raise K8sError(f"Context '{current_context_name}' not found in kubeconfig.") + + # Get the cluster name from the context entry + cluster_name = context_entry.get("context", {}).get("cluster") + + if not cluster_name: + raise K8sError(f"Cluster not specified in context '{current_context_name}'.") + + # Find the cluster entry associated with the cluster name + cluster_entry = next( + ( + cluster + for cluster in kubeconfig_data.get("clusters", []) + if cluster["name"] == cluster_name + ), + None, + ) + + if not cluster_entry: + raise K8sError(f"Cluster '{cluster_name}' not found in kubeconfig.") + + return cluster_entry + + +def get_namespaces() -> list[V1Namespace]: + sclient = get_static_client() + try: + return [ + ns + for ns in sclient.list_namespace().items + if ns.metadata.name not in KUBE_INTERNAL_NAMESPACES + ] + + except ApiException as e: + if e.status == 403: + ns = sclient.read_namespace(name=get_default_namespace()) + return [ns] + else: + return [] + + +def get_namespaces_by_type(namespace_type: str) -> list[V1Namespace]: + """ + Get all namespaces beginning with `prefix`. Returns empty list of no namespaces with the specified prefix are found. + """ + namespaces = get_namespaces() + return [ns for ns in namespaces if ns.metadata.name.startswith(namespace_type)] + + +def get_service_accounts_in_namespace(namespace): + """ + Get all service accounts in a namespace. Returns an empty list if no service accounts are found in the specified namespace. + """ + command = f"kubectl get serviceaccounts -n {namespace} -o jsonpath={{.items[*].metadata.name}}" + # skip the default service account created by k8s + service_accounts = run_command(command).split() + return [sa for sa in service_accounts if sa != "default"] + + +def can_delete_pods(namespace: Optional[str] = None) -> bool: + namespace = get_default_namespace_or(namespace) + + get_static_client() + auth_api = client.AuthorizationV1Api() + + # Define the SelfSubjectAccessReview request for deleting pods + access_review = client.V1SelfSubjectAccessReview( + spec=client.V1SelfSubjectAccessReviewSpec( + resource_attributes=client.V1ResourceAttributes( + namespace=namespace, + verb="delete", # Action: 'delete' + resource="pods", # Resource: 'pods' + ) + ) + ) + + try: + # Perform the SelfSubjectAccessReview check + review_response = auth_api.create_self_subject_access_review(body=access_review) + + # Check the result and return + if review_response.status.allowed: + print(f"Service account can delete pods in namespace '{namespace}'.") + return True + else: + print(f"Service account CANNOT delete pods in namespace '{namespace}'.") + return False + + except ApiException as e: + print(f"An error occurred: {e}") + return False + + +def open_kubeconfig(kubeconfig_path: str) -> dict: + try: + with open(kubeconfig_path) as file: + return yaml.safe_load(file) + except FileNotFoundError as e: + raise K8sError(f"Kubeconfig file {kubeconfig_path} not found.") from e + except yaml.YAMLError as e: + raise K8sError(f"Error parsing kubeconfig: {e}") from e + + +def write_kubeconfig(kube_config: dict, kubeconfig_path: str) -> None: + dir_name = os.path.dirname(kubeconfig_path) + try: + with tempfile.NamedTemporaryFile("w", dir=dir_name, delete=False) as temp_file: + yaml.safe_dump(kube_config, temp_file) + os.replace(temp_file.name, kubeconfig_path) + except Exception as e: + os.remove(temp_file.name) + raise K8sError(f"Error writing kubeconfig: {kubeconfig_path}") from e diff --git a/src/warnet/namespaces.py b/src/warnet/namespaces.py index 45bcb7af5..12357525b 100644 --- a/src/warnet/namespaces.py +++ b/src/warnet/namespaces.py @@ -8,6 +8,7 @@ DEFAULTS_NAMESPACE_FILE, NAMESPACES_DIR, NAMESPACES_FILE, + WARGAMES_NAMESPACE_PREFIX, ) from .process import run_command, stream_command @@ -32,16 +33,15 @@ def namespaces(): """Namespaces commands""" -@click.argument( - "namespaces_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path) -) @namespaces.command() def list(): - """List all namespaces with 'warnet-' prefix""" + """List all namespaces with 'wargames-' prefix""" cmd = "kubectl get namespaces -o jsonpath='{.items[*].metadata.name}'" res = run_command(cmd) all_namespaces = res.split() - warnet_namespaces = [ns for ns in all_namespaces if ns.startswith("warnet-")] + warnet_namespaces = [ + ns for ns in all_namespaces if ns.startswith(f"{WARGAMES_NAMESPACE_PREFIX}") + ] if warnet_namespaces: print("Warnet namespaces:") @@ -55,14 +55,16 @@ def list(): @click.option("--all", "destroy_all", is_flag=True, help="Destroy all warnet- prefixed namespaces") @click.argument("namespace", required=False) def destroy(destroy_all: bool, namespace: str): - """Destroy a specific namespace or all warnet- prefixed namespaces""" + """Destroy a specific namespace or all 'wargames-' prefixed namespaces""" if destroy_all: cmd = "kubectl get namespaces -o jsonpath='{.items[*].metadata.name}'" res = run_command(cmd) # Get the list of namespaces all_namespaces = res.split() - warnet_namespaces = [ns for ns in all_namespaces if ns.startswith("warnet-")] + warnet_namespaces = [ + ns for ns in all_namespaces if ns.startswith(f"{WARGAMES_NAMESPACE_PREFIX}") + ] if not warnet_namespaces: print("No warnet namespaces found to destroy.") @@ -75,8 +77,8 @@ def destroy(destroy_all: bool, namespace: str): else: print(f"Destroyed namespace: {ns}") elif namespace: - if not namespace.startswith("warnet-"): - print("Error: Can only destroy namespaces with 'warnet-' prefix") + if not namespace.startswith(f"{WARGAMES_NAMESPACE_PREFIX}"): + print(f"Error: Can only destroy namespaces with '{WARGAMES_NAMESPACE_PREFIX}' prefix") return destroy_cmd = f"kubectl delete namespace {namespace}" diff --git a/src/warnet/network.py b/src/warnet/network.py index 18a064210..a894cafc9 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -18,17 +18,12 @@ def copy_defaults(directory: Path, target_subdir: str, source_path: Path, exclud target_dir.mkdir(parents=True, exist_ok=True) print(f"Creating directory: {target_dir}") - def should_copy(item: Path) -> bool: - return item.name not in exclude_list - - for item in source_path.iterdir(): - if should_copy(item): - if item.is_file(): - shutil.copy2(item, target_dir) - print(f"Copied file: {item.name}") - elif item.is_dir(): - shutil.copytree(item, target_dir / item.name, dirs_exist_ok=True) - print(f"Copied directory: {item.name}") + shutil.copytree( + src=source_path, + dst=target_dir, + dirs_exist_ok=True, + ignore=shutil.ignore_patterns(*exclude_list), + ) print(f"Finished copying files to {target_dir}") @@ -39,7 +34,7 @@ def copy_network_defaults(directory: Path): directory, NETWORK_DIR.name, NETWORK_DIR, - ["node-defaults.yaml", "__pycache__", "__init__.py"], + ["__pycache__", "__init__.py"], ) @@ -49,7 +44,7 @@ def copy_scenario_defaults(directory: Path): directory, SCENARIOS_DIR.name, SCENARIOS_DIR, - ["__init__.py", "__pycache__", "commander.py"], + ["__pycache__", "test_scenarios"], ) @@ -63,7 +58,9 @@ def _connected(end="\n"): for tank in tanks: # Get actual try: - peerinfo = json.loads(_rpc(tank.metadata.name, "getpeerinfo", "")) + peerinfo = json.loads( + _rpc(tank.metadata.name, "getpeerinfo", "", namespace=tank.metadata.namespace) + ) actual = 0 for peer in peerinfo: if is_connection_manual(peer): diff --git a/src/warnet/process.py b/src/warnet/process.py index 6161774b1..626124b71 100644 --- a/src/warnet/process.py +++ b/src/warnet/process.py @@ -18,12 +18,14 @@ def stream_command(command: str) -> bool: universal_newlines=True, ) + message = "" for line in iter(process.stdout.readline, ""): + message += line print(line, end="") process.stdout.close() return_code = process.wait() if return_code != 0: - raise Exception(process.stderr) + raise Exception(message) return True diff --git a/src/warnet/project.py b/src/warnet/project.py index 0ac431015..67b063fcd 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -1,15 +1,30 @@ +import hashlib import os import platform +import shutil import subprocess import sys +import tarfile +import tempfile from dataclasses import dataclass from enum import Enum, auto from pathlib import Path -from typing import Callable +from typing import Callable, Optional import click import inquirer - +import requests + +from .constants import ( + HELM_BINARY_NAME, + HELM_BLESSED_NAME_AND_CHECKSUMS, + HELM_BLESSED_VERSION, + HELM_DOWNLOAD_URL_STUB, + KUBECTL_BINARY_NAME, + KUBECTL_BLESSED_NAME_AND_CHECKSUMS, + KUBECTL_BLESSED_VERSION, + KUBECTL_DOWNLOAD_URL_STUB, +) from .graph import inquirer_create_network from .network import copy_network_defaults, copy_scenario_defaults @@ -136,7 +151,23 @@ def is_docker_desktop_running() -> tuple[bool, str]: except FileNotFoundError as err: return False, str(err) - def is_kubectl_installed() -> tuple[bool, str]: + def is_docker_desktop_kube_running() -> tuple[bool, str]: + try: + cluster_info = subprocess.run( + ["kubectl", "cluster-info", "--request-timeout=1"], + capture_output=True, + text=True, + ) + if cluster_info.returncode == 0: + indented_output = cluster_info.stdout.strip().replace("\n", "\n\t") + return True, f"\n\t{indented_output}" + else: + return False, "" + except Exception: + print() + return False, "Please enable kubernetes in Docker Desktop" + + def is_kubectl_installed_and_offer_if_not() -> tuple[bool, str]: try: version_result = subprocess.run( ["kubectl", "version", "--client"], @@ -152,10 +183,32 @@ def is_kubectl_installed() -> tuple[bool, str]: return True, location_result.stdout.strip() else: return False, "" - except FileNotFoundError as err: - return False, str(err) - - def is_helm_installed() -> tuple[bool, str]: + except FileNotFoundError: + print() + kubectl_answer = inquirer.prompt( + [ + inquirer.Confirm( + "install_kubectl", + message=click.style( + "Would you like Warnet to install Kubectl into your virtual environment?", + fg="blue", + bold=True, + ), + default=True, + ), + ] + ) + if kubectl_answer is None: + msg = "Setup cancelled by user." + click.secho(msg, fg="yellow") + return False, msg + if kubectl_answer["install_kubectl"]: + click.secho(" Installing Kubectl...", fg="yellow", bold=True) + install_kubectl_rootlessly_to_venv() + return is_kubectl_installed_and_offer_if_not() + return False, "Please install Kubectl." + + def is_helm_installed_and_offer_if_not() -> tuple[bool, str]: try: version_result = subprocess.run(["helm", "version"], capture_output=True, text=True) location_result = subprocess.run( @@ -167,8 +220,31 @@ def is_helm_installed() -> tuple[bool, str]: return version_result.returncode == 0, location_result.stdout.strip() else: return False, "" - except FileNotFoundError as err: - return False, str(err) + + except FileNotFoundError: + print() + helm_answer = inquirer.prompt( + [ + inquirer.Confirm( + "install_helm", + message=click.style( + "Would you like Warnet to install Helm into your virtual environment?", + fg="blue", + bold=True, + ), + default=True, + ), + ] + ) + if helm_answer is None: + msg = "Setup cancelled by user." + click.secho(msg, fg="yellow") + return False, msg + if helm_answer["install_helm"]: + click.secho(" Installing Helm...", fg="yellow", bold=True) + install_helm_rootlessly_to_venv() + return is_helm_installed_and_offer_if_not() + return False, "Please install Helm." def check_installation(tool_info: ToolInfo) -> ToolStatus: has_good_version, location = tool_info.is_installed_func() @@ -204,6 +280,12 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: install_instruction="Please make sure docker is running", install_url="https://docs.docker.com/engine/install/", ) + docker_desktop_kube_running = ToolInfo( + tool_name="Kubernetes Running in Docker Desktop", + is_installed_func=is_docker_desktop_kube_running, + install_instruction="Please enable the local kubernetes cluster in Docker Desktop", + install_url="https://docs.docker.com/desktop/kubernetes/", + ) minikube_running_info = ToolInfo( tool_name="Running Minikube", is_installed_func=is_minikube_running, @@ -212,14 +294,14 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: ) kubectl_info = ToolInfo( tool_name="Kubectl", - is_installed_func=is_kubectl_installed, + is_installed_func=is_kubectl_installed_and_offer_if_not, install_instruction="Install kubectl.", install_url="https://kubernetes.io/docs/tasks/tools/install-kubectl/", ) helm_info = ToolInfo( tool_name="Helm", - is_installed_func=is_helm_installed, - install_instruction="Install Helm from Helm's official site.", + is_installed_func=is_helm_installed_and_offer_if_not, + install_instruction="Install Helm from Helm's official site, or rootlessly install Helm using Warnet's downloader when prompted.", install_url="https://helm.sh/docs/intro/install/", ) minikube_info = ToolInfo( @@ -259,10 +341,13 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: check_results: list[ToolStatus] = [] if answers: + check_results.append(check_installation(kubectl_info)) + check_results.append(check_installation(helm_info)) if answers["platform"] == "Docker Desktop": check_results.append(check_installation(docker_info)) check_results.append(check_installation(docker_desktop_info)) check_results.append(check_installation(docker_running_info)) + check_results.append(check_installation(docker_desktop_kube_running)) elif answers["platform"] == "Minikube": check_results.append(check_installation(docker_info)) check_results.append(check_installation(docker_running_info)) @@ -270,8 +355,6 @@ def check_installation(tool_info: ToolInfo) -> ToolStatus: if is_platform_darwin(): check_results.append(check_installation(minikube_version_info)) check_results.append(check_installation(minikube_running_info)) - check_results.append(check_installation(kubectl_info)) - check_results.append(check_installation(helm_info)) else: click.secho("Please re-run setup.", fg="yellow") sys.exit(1) @@ -361,3 +444,202 @@ def init(): """Initialize a warnet project in the current directory""" current_dir = Path.cwd() new_internal(directory=current_dir, from_init=True) + + +def get_os_name_for_helm() -> Optional[str]: + """Return a short operating system name suitable for downloading a helm binary.""" + uname_sys = platform.system().lower() + if "linux" in uname_sys: + return "linux" + elif uname_sys == "darwin": + return "darwin" + elif "win" in uname_sys: + return "windows" + return None + + +def is_in_virtualenv() -> bool: + """Check if the user is in a virtual environment.""" + return hasattr(sys, "real_prefix") or ( + hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix + ) + + +def download_file(url, destination): + click.secho(f" Downloading {url}", fg="blue") + response = requests.get(url, stream=True) + if response.status_code == 200: + with open(destination, "wb") as f: + for chunk in response.iter_content(1024): + f.write(chunk) + else: + raise Exception(f"Failed to download {url} (status code {response.status_code})") + + +def query_arch_from_uname(arch: str) -> Optional[str]: + if arch.startswith("armv5"): + return "armv5" + elif arch.startswith("armv6"): + return "armv6" + elif arch.startswith("armv7"): + return "arm" + elif arch == "aarch64" or arch == "arm64": + return "arm64" + elif arch == "x86": + return "386" + elif arch == "x86_64": + return "amd64" + elif arch == "i686" or arch == "i386": + return "386" + else: + return None + + +def write_blessed_kubectl_checksum(system: str, arch: str, dest_path: str): + checksum = next( + ( + b["checksum"] + for b in KUBECTL_BLESSED_NAME_AND_CHECKSUMS + if b["system"] == system and b["arch"] == arch + ), + None, + ) + if checksum: + with open(dest_path, "w") as f: + f.write(checksum) + else: + click.secho("Could not find a matching kubectl binary and checksum", fg="red") + + +def write_blessed_helm_checksum(helm_filename: str, dest_path: str): + checksum = next( + (b["checksum"] for b in HELM_BLESSED_NAME_AND_CHECKSUMS if b["name"] == helm_filename), None + ) + if checksum: + with open(dest_path, "w") as f: + f.write(checksum) + else: + click.secho("Could not find a matching helm binary and checksum", fg="red") + + +def verify_checksum(file_path, checksum_path): + click.secho(" Verifying checksum...", fg="blue") + sha256_hash = hashlib.sha256() + with open(file_path, "rb") as f: + for byte_block in iter(lambda: f.read(4096), b""): + sha256_hash.update(byte_block) + + with open(checksum_path) as f: + expected_checksum = f.read().strip() + + if sha256_hash.hexdigest() != expected_checksum: + raise Exception("Checksum verification failed!") + click.secho(" Checksum verified.", fg="blue") + + +def install_to_venv(bin_path, binary_name): + venv_bin_dir = os.path.join(sys.prefix, "bin") + dst_path = os.path.join(venv_bin_dir, binary_name) + shutil.move(bin_path, dst_path) + os.chmod(dst_path, 0o755) + click.secho(f" {binary_name} installed into {dst_path}", fg="blue") + + +def install_helm_rootlessly_to_venv(): + if not is_in_virtualenv(): + click.secho( + "Error: You are not in a virtual environment. Please activate a virtual environment and try again.", + fg="yellow", + ) + sys.exit(1) + + version = HELM_BLESSED_VERSION + + os_name = get_os_name_for_helm() + if os_name is None: + click.secho( + "Error: Could not determine the operating system of this computer.", fg="yellow" + ) + sys.exit(1) + + uname_arch = os.uname().machine + arch = query_arch_from_uname(uname_arch) + if not arch: + click.secho(f"No Helm binary candidate for arch: {uname_arch}", fg="red") + sys.exit(1) + + helm_filename = f"{HELM_BINARY_NAME}-{version}-{os_name}-{arch}.tar.gz" + helm_url = f"{HELM_DOWNLOAD_URL_STUB}{helm_filename}" + + try: + with tempfile.TemporaryDirectory() as temp_dir: + helm_archive_path = os.path.join(temp_dir, helm_filename) + checksum_path = os.path.join(temp_dir, f"{helm_filename}.sha256") + + download_file(helm_url, helm_archive_path) + write_blessed_helm_checksum(helm_filename, checksum_path) + verify_checksum(helm_archive_path, checksum_path) + + # Extract Helm and install it in the virtual environment's bin folder + with tarfile.open(helm_archive_path, "r:gz") as tar: + tar.extractall(path=temp_dir) + helm_bin_path = os.path.join(temp_dir, os_name + "-" + arch, HELM_BINARY_NAME) + install_to_venv(helm_bin_path, HELM_BINARY_NAME) + + click.secho( + f" {HELM_BINARY_NAME} {version} installed successfully to your virtual environment!\n", + fg="blue", + ) + + except Exception as e: + click.secho(f"Error: {e}\nCould not install helm.", fg="yellow") + sys.exit(1) + + +def install_kubectl_rootlessly_to_venv(): + if not is_in_virtualenv(): + click.secho( + "Error: You are not in a virtual environment. Please activate a virtual environment and try again.", + fg="yellow", + ) + sys.exit(1) + + os_name = get_os_name_for_helm() + if os_name is None: + click.secho( + "Error: Could not determine the operating system of this computer.", fg="yellow" + ) + sys.exit(1) + + uname_arch = os.uname().machine + arch = query_arch_from_uname(uname_arch) + if arch not in ["arm64", "amd64"]: + click.secho(f"No Kubectl binary candidate for arch: {uname_arch}", fg="red") + sys.exit(1) + + uname_sys = os.uname().sysname.lower() + if uname_sys not in ["linux", "darwin"]: + click.secho(f"The following system is not supported: {uname_sys}", fg="red") + sys.exit(1) + + kubectl_url = f"{KUBECTL_DOWNLOAD_URL_STUB}/{uname_sys}/{arch}/{KUBECTL_BINARY_NAME}" + + try: + with tempfile.TemporaryDirectory() as temp_dir: + binary_path = os.path.join(temp_dir, KUBECTL_BINARY_NAME) + checksum_path = os.path.join(temp_dir, f"{KUBECTL_BINARY_NAME}.sha256") + + download_file(kubectl_url, binary_path) + write_blessed_kubectl_checksum(uname_sys, arch, checksum_path) + verify_checksum(binary_path, checksum_path) + + install_to_venv(binary_path, KUBECTL_BINARY_NAME) + + click.secho( + f" {KUBECTL_BINARY_NAME} {KUBECTL_BLESSED_VERSION} installed successfully to your virtual environment!\n", + fg="blue", + ) + + except Exception as e: + click.secho(f"Error: {e}\nCould not install helm.", fg="yellow") + sys.exit(1) diff --git a/src/warnet/status.py b/src/warnet/status.py index ebbd245d4..df62ed2df 100644 --- a/src/warnet/status.py +++ b/src/warnet/status.py @@ -45,10 +45,11 @@ def status(): table.add_column("Component", style="cyan") table.add_column("Name", style="green") table.add_column("Status", style="yellow") + table.add_column("Namespace", style="green") # Add tanks to the table for tank in tanks: - table.add_row("Tank", tank["name"], tank["status"]) + table.add_row("Tank", tank["name"], tank["status"], tank["namespace"]) # Add a separator if there are both tanks and scenarios if tanks and scenarios: @@ -58,7 +59,7 @@ def status(): active = 0 if scenarios: for scenario in scenarios: - table.add_row("Scenario", scenario["name"], scenario["status"]) + table.add_row("Scenario", scenario["name"], scenario["status"], scenario["namespace"]) if scenario["status"] == "running" or scenario["status"] == "pending": active += 1 else: @@ -86,9 +87,23 @@ def status(): def _get_tank_status(): tanks = get_mission("tank") - return [{"name": tank.metadata.name, "status": tank.status.phase.lower()} for tank in tanks] + return [ + { + "name": tank.metadata.name, + "status": tank.status.phase.lower(), + "namespace": tank.metadata.namespace, + } + for tank in tanks + ] def _get_deployed_scenarios(): commanders = get_mission("commander") - return [{"name": c.metadata.name, "status": c.status.phase.lower()} for c in commanders] + return [ + { + "name": c.metadata.name, + "status": c.status.phase.lower(), + "namespace": c.metadata.namespace, + } + for c in commanders + ] diff --git a/src/warnet/users.py b/src/warnet/users.py index c85e53585..52a2c1080 100644 --- a/src/warnet/users.py +++ b/src/warnet/users.py @@ -1,70 +1,124 @@ +import difflib +import json import os -import subprocess import sys import click -import yaml + +from warnet.constants import KUBECONFIG +from warnet.k8s import K8sError, open_kubeconfig, write_kubeconfig @click.command() -@click.argument("kube_config", type=str) -def auth(kube_config: str) -> None: - """ - Authenticate with a warnet cluster using a kube config file - """ +@click.argument("auth_config", type=str) +def auth(auth_config): + """Authenticate with a Warnet cluster using a kubernetes config file""" try: - current_kubeconfig = os.environ.get("KUBECONFIG", os.path.expanduser("~/.kube/config")) - combined_kubeconfig = ( - f"{current_kubeconfig}:{kube_config}" if current_kubeconfig else kube_config - ) - os.environ["KUBECONFIG"] = combined_kubeconfig - with open(kube_config) as file: - content = yaml.safe_load(file) - user = content["users"][0] - user_name = user["name"] - user_token = user["user"]["token"] - current_context = content["current-context"] - flatten_cmd = "kubectl config view --flatten" - result_flatten = subprocess.run( - flatten_cmd, shell=True, check=True, capture_output=True, text=True - ) - except subprocess.CalledProcessError as e: - click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") - click.secho(e.stderr, fg="red") + auth_config = open_kubeconfig(auth_config) + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not open auth_config: {auth_config}", fg="red") sys.exit(1) - if result_flatten.returncode == 0: - with open(current_kubeconfig, "w") as file: - file.write(result_flatten.stdout) - click.secho(f"Authorization file written to: {current_kubeconfig}", fg="green") - else: - click.secho("Could not create authorization file", fg="red") - click.secho(result_flatten.stderr, fg="red") - sys.exit(result_flatten.returncode) + is_first_config = False + if not os.path.exists(KUBECONFIG): + os.makedirs(os.path.dirname(KUBECONFIG), exist_ok=True) + try: + write_kubeconfig(auth_config, KUBECONFIG) + is_first_config = True + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not write KUBECONFIG: {KUBECONFIG}", fg="red") + sys.exit(1) try: - update_cmd = f"kubectl config set-credentials {user_name} --token {user_token}" - result_update = subprocess.run( - update_cmd, shell=True, check=True, capture_output=True, text=True - ) - if result_update.returncode != 0: - click.secho("Could not update authorization file", fg="red") - click.secho(result_flatten.stderr, fg="red") - sys.exit(result_flatten.returncode) - except subprocess.CalledProcessError as e: - click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") - click.secho(e.stderr, fg="red") + base_config = open_kubeconfig(KUBECONFIG) + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not open KUBECONFIG: {KUBECONFIG}", fg="red") sys.exit(1) - with open(current_kubeconfig) as file: - contents = yaml.safe_load(file) + if not is_first_config: + for category in ["clusters", "users", "contexts"]: + if category in auth_config: + merge_entries(category, base_config, auth_config) - with open(current_kubeconfig, "w") as file: - contents["current-context"] = current_context - yaml.safe_dump(contents, file) + new_current_context = auth_config.get("current-context") + base_config["current-context"] = new_current_context - with open(current_kubeconfig) as file: - contents = yaml.safe_load(file) + # Check if the new current context has an explicit namespace + context_entry = next( + (ctx for ctx in base_config["contexts"] if ctx["name"] == new_current_context), None + ) + if context_entry and "namespace" not in context_entry["context"]: click.secho( - f"\nwarnet's current context is now set to: {contents['current-context']}", fg="green" + f"Warning: The context '{new_current_context}' does not have an explicit namespace.", + fg="yellow", ) + + try: + write_kubeconfig(base_config, KUBECONFIG) + click.secho(f"Updated kubeconfig with authorization data: {KUBECONFIG}", fg="green") + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not write KUBECONFIG: {KUBECONFIG}", fg="red") + sys.exit(1) + + try: + base_config = open_kubeconfig(KUBECONFIG) + click.secho( + f"Warnet's current context is now set to: {base_config['current-context']}", fg="green" + ) + except K8sError as e: + click.secho(f"Error reading from {KUBECONFIG}: {e}", fg="red") + sys.exit(1) + + +def merge_entries(category, base_config, auth_config): + name = "name" + base_list = base_config.setdefault(category, []) + auth_list = auth_config[category] + base_entry_names = {entry[name] for entry in base_list} # Extract existing names + for auth_entry in auth_list: + if auth_entry[name] in base_entry_names: + existing_entry = next( + base_entry for base_entry in base_list if base_entry[name] == auth_entry[name] + ) + if existing_entry != auth_entry: + # Show diff between existing and new entry + existing_entry_str = json.dumps(existing_entry, indent=2, sort_keys=True) + auth_entry_str = json.dumps(auth_entry, indent=2, sort_keys=True) + diff = difflib.unified_diff( + existing_entry_str.splitlines(), + auth_entry_str.splitlines(), + fromfile="Existing Entry", + tofile="New Entry", + lineterm="", + ) + click.echo("Differences between existing and new entry:\n") + click.echo("\n".join(diff)) + + if click.confirm( + f"The '{category}' section key '{auth_entry[name]}' already exists and differs. Overwrite?", + default=False, + ): + # Find and replace the existing entry + base_list[:] = [ + base_entry if base_entry[name] != auth_entry[name] else auth_entry + for base_entry in base_list + ] + click.secho( + f"Overwrote '{category}' section key '{auth_entry[name]}'", fg="yellow" + ) + else: + click.secho( + f"Skipped '{category}' section key '{auth_entry[name]}'", fg="yellow" + ) + else: + click.secho( + f"Entry for '{category}' section key '{auth_entry[name]}' is identical. No changes made.", + fg="blue", + ) + else: + base_list.append(auth_entry) + click.secho(f"Added new '{category}' section key '{auth_entry[name]}'", fg="green") diff --git a/test/conf_test.py b/test/conf_test.py index bc717a732..fc3c8f4b9 100755 --- a/test/conf_test.py +++ b/test/conf_test.py @@ -7,18 +7,22 @@ from test_base import TestBase +from warnet.control import stop_scenario from warnet.k8s import get_mission +from warnet.status import _get_deployed_scenarios as scenarios_deployed class ConfTest(TestBase): def __init__(self): super().__init__() self.network_dir = Path(os.path.dirname(__file__)) / "data" / "bitcoin_conf" + self.scen_dir = Path(os.path.dirname(__file__)).parent / "resources" / "scenarios" def run_test(self): try: self.setup_network() self.check_uacomment() + self.check_single_miner() finally: self.cleanup() @@ -52,6 +56,22 @@ def get_uacomment(): self.wait_for_predicate(get_uacomment) + def check_single_miner(self): + scenario_file = self.scen_dir / "miner_std.py" + self.log.info(f"Running scenario from: {scenario_file}") + # Mine from a tank that is not first or last and + # is one of the only few in the network that even + # has rpc reatewallet method! + self.warnet(f"run {scenario_file} --tank=tank-0026 --interval=1") + self.wait_for_predicate( + lambda: int(self.warnet("bitcoin rpc tank-0026 getblockcount")) >= 10 + ) + running = scenarios_deployed() + assert len(running) == 1, f"Expected one running scenario, got {len(running)}" + assert running[0]["status"] == "running", "Scenario should be running" + stop_scenario(running[0]["name"]) + self.wait_for_all_scenarios() + if __name__ == "__main__": test = ConfTest() diff --git a/test/dag_connection_test.py b/test/dag_connection_test.py index 258052fc4..dee38356a 100755 --- a/test/dag_connection_test.py +++ b/test/dag_connection_test.py @@ -10,6 +10,7 @@ class DAGConnectionTest(TestBase): def __init__(self): super().__init__() self.network_dir = Path(os.path.dirname(__file__)) / "data" / "ten_semi_unconnected" + self.scen_dir = Path(os.path.dirname(__file__)).parent / "resources" / "scenarios" def run_test(self): try: @@ -25,8 +26,9 @@ def setup_network(self): self.wait_for_all_edges() def run_connect_dag_scenario(self): - self.log.info("Running connect_dag scenario") - self.warnet("run test/data/scenario_connect_dag.py") + scenario_file = self.scen_dir / "test_scenarios" / "connect_dag.py" + self.log.info(f"Running scenario from: {scenario_file}") + self.warnet(f"run {scenario_file} --source_dir={self.scen_dir}") self.wait_for_all_scenarios() diff --git a/test/data/12_node_ring/network.yaml b/test/data/12_node_ring/network.yaml index 8ae4e752d..62110f3d7 100644 --- a/test/data/12_node_ring/network.yaml +++ b/test/data/12_node_ring/network.yaml @@ -1,61 +1,61 @@ nodes: - name: tank-0000 - connect: + addnode: - tank-0001 config: | debug=rpc debug=validation - name: tank-0001 - connect: + addnode: - tank-0002 config: | debug=net debug=validation - name: tank-0002 - connect: + addnode: - tank-0003 config: | debug=validation - name: tank-0003 - connect: + addnode: - tank-0004 config: | debug=validation - name: tank-0004 - connect: + addnode: - tank-0005 - name: tank-0005 - connect: + addnode: - tank-0006 config: | debug=validation - name: tank-0006 - connect: + addnode: - tank-0007 - name: tank-0007 config: | debug=validation - connect: + addnode: - tank-0008 config: | debug=validation - name: tank-0008 - connect: + addnode: - tank-0009 config: | debug=validation - name: tank-0009 - connect: + addnode: - tank-0010 config: | debug=validation - name: tank-0010 - connect: + addnode: - tank-0011 config: | debug=validation - name: tank-0011 - connect: + addnode: - tank-0000 config: | debug=validation \ No newline at end of file diff --git a/test/data/admin/namespaces/two_namespaces_two_users/namespace-defaults.yaml b/test/data/admin/namespaces/two_namespaces_two_users/namespace-defaults.yaml new file mode 100644 index 000000000..75cc8e42c --- /dev/null +++ b/test/data/admin/namespaces/two_namespaces_two_users/namespace-defaults.yaml @@ -0,0 +1,18 @@ +users: + - name: warnet-user + roles: + - pod-viewer + - pod-manager +# the pod-viewer and pod-manager roles are the default +# roles defined in values.yaml for the namespaces charts +# +# if you need a different set of roles for a particular namespaces +# deployment, you can override values.yaml by providing your own +# role definitions below +# +# roles: +# - name: my-custom-role +# rules: +# - apiGroups: "" +# resources: "" +# verbs: "" diff --git a/test/data/admin/namespaces/two_namespaces_two_users/namespaces.yaml b/test/data/admin/namespaces/two_namespaces_two_users/namespaces.yaml new file mode 100644 index 000000000..413d3bcb7 --- /dev/null +++ b/test/data/admin/namespaces/two_namespaces_two_users/namespaces.yaml @@ -0,0 +1,19 @@ +namespaces: + - name: wargames-red-team-warnettest + users: + - name: alice-warnettest + roles: + - pod-viewer + - name: bob-warnettest + roles: + - pod-viewer + - pod-manager + - name: wargames-blue-team-warnettest + users: + - name: mallory-warnettest + roles: + - pod-viewer + - name: carol-warnettest + roles: + - pod-viewer + - pod-manager diff --git a/test/data/bitcoin_conf/network.yaml b/test/data/bitcoin_conf/network.yaml index f1d1a8124..06ec79290 100644 --- a/test/data/bitcoin_conf/network.yaml +++ b/test/data/bitcoin_conf/network.yaml @@ -2,63 +2,63 @@ nodes: - name: tank-0016 image: tag: "0.16.1" - connect: + addnode: - tank-0017 config: uacomment=tank-0016 - name: tank-0017 image: tag: "0.17.0" - connect: + addnode: - tank-0019 config: uacomment=tank-0017 - name: tank-0019 image: tag: "0.19.2" - connect: + addnode: - tank-0020 config: uacomment=tank-0019 - name: tank-0020 image: tag: "0.20.0" - connect: + addnode: - tank-0021 config: uacomment=tank-0020 - name: tank-0021 image: tag: "0.21.1" - connect: + addnode: - tank-0024 config: uacomment=tank-0021 - name: tank-0024 image: tag: "24.2" - connect: + addnode: - tank-0025 config: uacomment=tank-0024 - name: tank-0025 image: tag: "25.1" - connect: + addnode: - tank-0026 config: uacomment=tank-0025 - name: tank-0026 image: tag: "26.0" - connect: + addnode: - tank-0027 config: uacomment=tank-0026 - name: tank-0027 image: tag: "27.0" - connect: + addnode: - tank-0016 config: uacomment=tank-0027 \ No newline at end of file diff --git a/test/data/logging/network.yaml b/test/data/logging/network.yaml index a06a5ea24..fb79c030e 100644 --- a/test/data/logging/network.yaml +++ b/test/data/logging/network.yaml @@ -1,15 +1,15 @@ nodes: - name: tank-0000 - connect: + addnode: - tank-0002 metricsExport: true - name: tank-0001 - connect: + addnode: - tank-0002 metricsExport: true metrics: txrate=getchaintxstats(10)["txrate"] - name: tank-0002 - connect: + addnode: - tank-0000 caddy: enabled: true \ No newline at end of file diff --git a/test/data/services/network.yaml b/test/data/services/network.yaml index d523fbf97..c9dde9931 100644 --- a/test/data/services/network.yaml +++ b/test/data/services/network.yaml @@ -6,7 +6,7 @@ nodes: rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo rpcwhitelistdefault=0 - connect: + addnode: - paul - name: paul config: | @@ -22,7 +22,7 @@ nodes: rpcauth=forkobserver:1418183465eecbd407010cf60811c6a0$d4e5f0647a63429c218da1302d7f19fe627302aeb0a71a74de55346a25d8057c rpcwhitelist=forkobserver:getchaintips,getblockheader,getblockhash,getblock,getnetworkinfo rpcwhitelistdefault=0 - connect: + addnode: - ringo - name: ringo config: | diff --git a/test/data/signet/network.yaml b/test/data/signet/network.yaml index eb422fddf..3551db4df 100644 --- a/test/data/signet/network.yaml +++ b/test/data/signet/network.yaml @@ -3,50 +3,50 @@ nodes: - name: tank-1 image: tag: "0.16.1" - connect: + addnode: - miner - name: tank-2 image: tag: "0.17.0" - connect: + addnode: - miner - name: tank-3 image: tag: "0.19.2" - connect: + addnode: - miner - name: tank-4 image: tag: "0.20.0" - connect: + addnode: - miner - name: tank-5 image: tag: "0.21.1" - connect: + addnode: - miner - name: tank-6 image: tag: "24.2" - connect: + addnode: - miner - name: tank-7 image: tag: "25.1" - connect: + addnode: - miner - name: tank-8 image: tag: "26.0" - connect: + addnode: - miner - name: tank-9 image: tag: "27.0" - connect: + addnode: - miner - name: tank-10 image: tag: "0.16.1" - connect: + addnode: - miner diff --git a/test/data/signet/node-defaults.yaml b/test/data/signet/node-defaults.yaml index 43523c669..ae51990ed 100644 --- a/test/data/signet/node-defaults.yaml +++ b/test/data/signet/node-defaults.yaml @@ -8,7 +8,7 @@ chain: signet spec: restartPolicy: Always -config: | +defaultConfig: | debug=rpc debug=net signetchallenge=0014d33b6e11ca95c4edccd8e986434358d79e919730 \ No newline at end of file diff --git a/test/data/ten_semi_unconnected/network.yaml b/test/data/ten_semi_unconnected/network.yaml index 5071de9c4..f058ac70f 100644 --- a/test/data/ten_semi_unconnected/network.yaml +++ b/test/data/ten_semi_unconnected/network.yaml @@ -22,7 +22,7 @@ nodes: config: | debug=validation - name: tank-0008 - connect: + addnode: - tank-0009 config: | debug=validation diff --git a/test/graph_test.py b/test/graph_test.py index 482c555ab..3d0ad5848 100755 --- a/test/graph_test.py +++ b/test/graph_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 +import json import os -import shutil import pexpect from test_base import TestBase @@ -15,34 +15,65 @@ def __init__(self): def run_test(self): try: + # cwd out of the git repo for remainder of script + os.chdir(self.tmpdir) self.directory_not_exist() os.mkdir(NETWORKS_DIR) self.directory_exists() - + self.run_created_network() finally: - shutil.rmtree(NETWORKS_DIR) if os.path.exists(NETWORKS_DIR) else None + self.cleanup() def directory_not_exist(self): - self.sut = pexpect.spawn("warnet create") - self.sut.expect("init", timeout=50) + try: + self.log.info("testing warnet create, dir doesn't exist") + self.sut = pexpect.spawn("warnet create") + self.sut.expect("init", timeout=10) + except Exception as e: + print(f"\nReceived prompt text:\n {self.sut.before.decode('utf-8')}\n") + raise e def directory_exists(self): - self.sut = pexpect.spawn("warnet create") - self.sut.expect("name", timeout=10) - self.sut.sendline("ANewNetwork") - self.sut.expect("many", timeout=10) - self.sut.sendline("") - self.sut.expect("connections", timeout=10) - self.sut.sendline("") - self.sut.expect("version", timeout=10) - self.sut.sendline("") - self.sut.expect("enable fork-observer", timeout=10) - self.sut.sendline("") - self.sut.expect("seconds", timeout=10) - self.sut.sendline("") - self.sut.expect("enable grafana", timeout=10) - self.sut.sendline("") - self.sut.expect("successfully", timeout=50) + try: + self.log.info("testing warnet create, dir does exist") + self.sut = pexpect.spawn("warnet create") + self.sut.expect("name", timeout=10) + self.sut.sendline("ANewNetwork") + self.sut.expect("many", timeout=10) + self.sut.sendline("") + self.sut.expect("connections", timeout=10) + self.sut.sendline("") + self.sut.expect("version", timeout=10) + self.sut.sendline("") + self.sut.expect("enable fork-observer", timeout=10) + self.sut.sendline("") + self.sut.expect("seconds", timeout=10) + self.sut.sendline("") + self.sut.expect("enable grafana", timeout=10) + self.sut.sendline("") + self.sut.expect("successfully", timeout=50) + except Exception as e: + print(f"\nReceived prompt text:\n {self.sut.before.decode('utf-8')}\n") + raise e + + def run_created_network(self): + self.log.info("adding custom config to one tank") + with open("networks/ANewNetwork/network.yaml") as f: + s = f.read() + s = s.replace(" name: tank-0000\n", " name: tank-0000\n config: debug=mempool\n") + with open("networks/ANewNetwork/network.yaml", "w") as f: + f.write(s) + + self.log.info("deploying new network") + self.warnet("deploy networks/ANewNetwork") + self.wait_for_all_tanks_status(target="running") + debugs = json.loads(self.warnet("bitcoin rpc tank-0000 logging")) + # set in defaultConfig + assert debugs["rpc"] + # set in config just for this tank + assert debugs["mempool"] + # santy check + assert not debugs["zmq"] if __name__ == "__main__": diff --git a/test/logging_test.py b/test/logging_test.py index 64abc0846..bfec6c25c 100755 --- a/test/logging_test.py +++ b/test/logging_test.py @@ -7,7 +7,7 @@ import requests from test_base import TestBase -GRAFANA_URL = "http://localhost:2019/grafana/" +from warnet.k8s import get_ingress_ip_or_host class LoggingTest(TestBase): @@ -29,13 +29,17 @@ def setup_network(self): self.log.info(self.warnet(f"deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running", timeout=10 * 60) self.wait_for_all_edges() + self.wait_for_predicate(lambda: get_ingress_ip_or_host()) + ingress_ip = get_ingress_ip_or_host() + self.grafana_url = f"http://{ingress_ip}/grafana" + self.log.info(f"Grafana URL: {self.grafana_url}") def wait_for_endpoint_ready(self): self.log.info("Waiting for Grafana to be ready to receive API calls...") def check_endpoint(): try: - response = requests.get(f"{GRAFANA_URL}login") + response = requests.get(f"{self.grafana_url}/login") return response.status_code == 200 except requests.RequestException: return False @@ -50,7 +54,7 @@ def make_grafana_api_request(self, ds_uid, start, metric): "from": f"{start}", "to": "now", } - reply = requests.post(f"{GRAFANA_URL}api/ds/query", json=data) + reply = requests.post(f"{self.grafana_url}/api/ds/query", json=data) if reply.status_code != 200: self.log.error(f"Grafana API request failed with status code {reply.status_code}") self.log.error(f"Response content: {reply.text}") @@ -67,7 +71,7 @@ def test_prometheus_and_grafana(self): self.warnet(f"run {miner_file} --allnodes --interval=5 --mature") self.warnet(f"run {tx_flood_file} --interval=1") - prometheus_ds = requests.get(f"{GRAFANA_URL}api/datasources/name/Prometheus") + prometheus_ds = requests.get(f"{self.grafana_url}/api/datasources/name/Prometheus") assert prometheus_ds.status_code == 200 prometheus_uid = prometheus_ds.json()["uid"] self.log.info(f"Got Prometheus data source uid from Grafana: {prometheus_uid}") @@ -92,7 +96,7 @@ def get_five_values_for_metric(metric): self.wait_for_predicate(lambda: get_five_values_for_metric("txrate")) # Verify default dashboard exists - dbs = requests.get(f"{GRAFANA_URL}api/search").json() + dbs = requests.get(f"{self.grafana_url}/api/search").json() assert dbs[0]["title"] == "Default Warnet Dashboard" diff --git a/test/namespace_admin_test.py b/test/namespace_admin_test.py new file mode 100755 index 000000000..5dd4cdfef --- /dev/null +++ b/test/namespace_admin_test.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 + +import os +from pathlib import Path +from typing import Callable, Optional + +from scenarios_test import ScenariosTest +from test_base import TestBase + +from warnet.constants import KUBECONFIG, WARGAMES_NAMESPACE_PREFIX +from warnet.k8s import ( + K8sError, + get_kubeconfig_value, + get_static_client, + open_kubeconfig, + write_kubeconfig, +) +from warnet.process import run_command + + +class NamespaceAdminTest(ScenariosTest, TestBase): + def __init__(self): + super().__init__() + + self.namespace_dir = ( + Path(os.path.dirname(__file__)) + / "data" + / "admin" + / "namespaces" + / "two_namespaces_two_users" + ) + + self.initial_context = None + self.current_context = None + self.bob_user = "bob-warnettest" + self.bob_auth_file = "bob-warnettest-wargames-red-team-warnettest-kubeconfig" + self.bob_context = "bob-warnettest-wargames-red-team-warnettest" + + self.blue_namespace = "wargames-blue-team-warnettest" + self.red_namespace = "wargames-red-team-warnettest" + self.blue_users = ["carol-warnettest", "default", "mallory-warnettest"] + self.red_users = ["alice-warnettest", self.bob_user, "default"] + + def run_test(self): + try: + os.chdir(self.tmpdir) + self.log.info(f"Running test in: {self.tmpdir}") + self.establish_initial_context() + self.setup_namespaces() + self.setup_service_accounts() + self.setup_network() + self.authenticate_and_become_bob() + self.bob_runs_scenario_tests() + finally: + self.return_to_initial_context() + try: + self.cleanup_kubeconfig() + except K8sError as e: + self.log.info(f"KUBECONFIG cleanup error: {e}") + self.cleanup() + + def establish_initial_context(self): + self.initial_context = get_kubeconfig_value("{.current-context}") + self.log.info(f"Initial context: {self.initial_context}") + self.current_context = self.initial_context + self.log.info(f"Current context: {self.current_context}") + + def setup_namespaces(self): + self.log.info("Setting up the namespaces") + self.log.info(self.warnet(f"deploy {self.namespace_dir}")) + self.wait_for_predicate(self.two_namespaces_are_validated) + self.log.info("Namespace setup complete") + + def setup_service_accounts(self): + self.log.info("Creating service accounts...") + self.log.info(self.warnet("admin create-kubeconfigs")) + self.wait_for_predicate(self.service_accounts_are_validated) + self.log.info("Service accounts have been set up and validated") + + def setup_network(self): + if self.current_context == self.bob_context: + self.log.info(f"Allowing {self.current_context} to update the network...") + assert self.this_is_the_current_context(self.bob_context) + self.warnet(f"deploy {self.network_dir}") + else: + self.log.info("Deploy networks to team namespaces") + assert self.this_is_the_current_context(self.initial_context) + self.log.info(self.warnet(f"deploy {self.network_dir} --to-all-users")) + self.wait_for_all_tanks_status() + self.log.info("Waiting for all edges") + self.wait_for_all_edges() + + def authenticate_and_become_bob(self): + self.log.info("Authenticating and becoming bob...") + self.log.info(f"Current context: {self.current_context}") + assert self.initial_context == self.current_context + assert get_kubeconfig_value("{.current-context}") == self.initial_context + self.warnet(f"auth kubeconfigs/{self.bob_auth_file}") + self.current_context = self.bob_context + assert get_kubeconfig_value("{.current-context}") == self.current_context + self.log.info(f"Current context: {self.current_context}") + + def service_accounts_are_validated(self) -> bool: + self.log.info("Checking service accounts") + sclient = get_static_client() + namespaces = sclient.list_namespace().items + + filtered_namespaces = [ + ns.metadata.name + for ns in namespaces + if ns.metadata.name.startswith(WARGAMES_NAMESPACE_PREFIX) + ] + assert len(filtered_namespaces) != 0 + + maybe_service_accounts = {} + + for namespace in filtered_namespaces: + service_accounts = sclient.list_namespaced_service_account(namespace=namespace).items + for sa in service_accounts: + maybe_service_accounts.setdefault(namespace, []).append(sa.metadata.name) + + expected = { + self.blue_namespace: self.blue_users, + self.red_namespace: self.red_users, + } + + return maybe_service_accounts == expected + + def get_namespaces(self) -> Optional[list[str]]: + self.log.info("Querying the namespaces...") + resp = self.warnet("admin namespaces list") + if resp == "No warnet namespaces found.": + return None + namespaces = [] + for line in resp.splitlines(): + if line.startswith("- "): + namespaces.append(line.lstrip("- ")) + self.log.info(f"Namespaces: {namespaces}") + return namespaces + + def two_namespaces_are_validated(self) -> bool: + maybe_namespaces = self.get_namespaces() + if maybe_namespaces is None: + return False + if self.blue_namespace not in maybe_namespaces: + return False + return self.red_namespace in maybe_namespaces + + def return_to_initial_context(self): + cmd = f"kubectl config use-context {self.initial_context}" + self.log.info(run_command(cmd)) + self.wait_for_predicate(self.this_is_the_current_context(self.initial_context)) + + def this_is_the_current_context(self, context: str) -> Callable[[], bool]: + cmd = "kubectl config current-context" + current_context = run_command(cmd).strip() + self.log.info(f"Current context: {current_context} {context == current_context}") + return lambda: current_context == context + + def cleanup_kubeconfig(self): + try: + kubeconfig_data = open_kubeconfig(KUBECONFIG) + except K8sError as e: + raise K8sError(f"Could not open KUBECONFIG: {KUBECONFIG}") from e + + kubeconfig_data = remove_user(kubeconfig_data, self.bob_user) + kubeconfig_data = remove_context(kubeconfig_data, self.bob_context) + + try: + write_kubeconfig(kubeconfig_data, KUBECONFIG) + except Exception as e: + raise K8sError(f"Could not write to KUBECONFIG: {KUBECONFIG}") from e + + def bob_runs_scenario_tests(self): + assert self.this_is_the_current_context(self.bob_context) + super().run_test() + assert self.this_is_the_current_context(self.bob_context) + + +def remove_user(kubeconfig_data: dict, username: str) -> dict: + kubeconfig_data["users"] = [ + user for user in kubeconfig_data["users"] if user["name"] != username + ] + return kubeconfig_data + + +def remove_context(kubeconfig_data: dict, context_name: str) -> dict: + kubeconfig_data["contexts"] = [ + context for context in kubeconfig_data["contexts"] if context["name"] != context_name + ] + return kubeconfig_data + + +if __name__ == "__main__": + test = NamespaceAdminTest() + test.run_test() diff --git a/test/scenarios_test.py b/test/scenarios_test.py index 867d5107f..dfb8d6b2d 100755 --- a/test/scenarios_test.py +++ b/test/scenarios_test.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import os +import re from pathlib import Path from test_base import TestBase @@ -14,12 +15,14 @@ class ScenariosTest(TestBase): def __init__(self): super().__init__() self.network_dir = Path(os.path.dirname(__file__)) / "data" / "12_node_ring" + self.scen_dir = Path(os.path.dirname(__file__)).parent / "resources" / "scenarios" def run_test(self): try: self.setup_network() self.run_and_check_miner_scenario_from_file() self.run_and_check_scenario_from_file() + self.run_and_check_scenario_from_file_debug() self.check_regtest_recon() self.check_active_count() finally: @@ -71,7 +74,7 @@ def check_blocks(self, target_blocks, start: int = 0): return count >= start + target_blocks def run_and_check_miner_scenario_from_file(self): - scenario_file = "resources/scenarios/miner_std.py" + scenario_file = self.scen_dir / "miner_std.py" self.log.info(f"Running scenario from file: {scenario_file}") self.warnet(f"run {scenario_file} --allnodes --interval=1") start = int(self.warnet("bitcoin rpc tank-0000 getblockcount")) @@ -81,22 +84,28 @@ def run_and_check_miner_scenario_from_file(self): assert "Active Scenarios: 1" in table self.stop_scenario() + def run_and_check_scenario_from_file_debug(self): + scenario_file = self.scen_dir / "test_scenarios" / "p2p_interface.py" + self.log.info(f"Running scenario from: {scenario_file}") + output = self.warnet(f"run {scenario_file} --source_dir={self.scen_dir} --debug") + self.check_for_pod_deletion_message(output) + def run_and_check_scenario_from_file(self): - scenario_file = "test/data/scenario_p2p_interface.py" + scenario_file = self.scen_dir / "test_scenarios" / "p2p_interface.py" self.log.info(f"Running scenario from: {scenario_file}") - self.warnet(f"run {scenario_file}") + self.warnet(f"run {scenario_file} --source_dir={self.scen_dir}") self.wait_for_predicate(self.check_scenario_clean_exit) def check_regtest_recon(self): - scenario_file = "resources/scenarios/reconnaissance.py" + scenario_file = self.scen_dir / "reconnaissance.py" self.log.info(f"Running scenario from file: {scenario_file}") self.warnet(f"run {scenario_file}") self.wait_for_predicate(self.check_scenario_clean_exit) def check_active_count(self): - scenario_file = "test/data/scenario_buggy_failure.py" + scenario_file = self.scen_dir / "test_scenarios" / "buggy_failure.py" self.log.info(f"Running scenario from: {scenario_file}") - self.warnet(f"run {scenario_file}") + self.warnet(f"run {scenario_file} --source_dir={self.scen_dir}") def two_pass_one_fail(): deployed = scenarios_deployed() @@ -108,6 +117,12 @@ def two_pass_one_fail(): table = self.warnet("status") assert "Active Scenarios: 0" in table + def check_for_pod_deletion_message(self, input): + message = "Deleting pod..." + self.log.info(f"Checking for message: '{message}'") + assert re.search(re.escape(message), input, flags=re.MULTILINE) + self.log.info(f"Found message: '{message}'") + if __name__ == "__main__": test = ScenariosTest() diff --git a/test/services_test.py b/test/services_test.py index a80717db9..e74fc85e1 100755 --- a/test/services_test.py +++ b/test/services_test.py @@ -1,11 +1,15 @@ #!/usr/bin/env python3 +import json import os from pathlib import Path +from time import sleep import requests from test_base import TestBase +from warnet.k8s import get_ingress_ip_or_host, wait_for_ingress_controller + class ServicesTest(TestBase): def __init__(self): @@ -29,14 +33,27 @@ def check_fork_observer(self): self.log.info("Creating chain split") self.warnet("bitcoin rpc john createwallet miner") self.warnet("bitcoin rpc john -generate 1") - # Port will be auto-forwarded by `warnet deploy`, routed through the enabled Caddy pod + + self.log.info("Waiting for ingress controller") + wait_for_ingress_controller() + + self.log.info("Waiting for ingress host") + ingress_ip = None + attempts = 100 + while not ingress_ip: + ingress_ip = get_ingress_ip_or_host() + attempts -= 1 + if attempts < 0: + raise Exception("Never got ingress host") + sleep(1) + # network id is 0xDEADBE in decimal + fo_data_uri = f"http://{ingress_ip}/fork-observer/api/14593470/data.json" def call_fo_api(): - fo_root = "http://localhost:2019/fork-observer" + # if on minikube remember to run `minikube tunnel` for this test to run try: - fo_res = requests.get(f"{fo_root}/api/networks.json") - network_id = fo_res.json()["networks"][0]["id"] - fo_data = requests.get(f"{fo_root}/api/{network_id}/data.json") + self.log.info(f"Getting: {fo_data_uri}") + fo_data = requests.get(fo_data_uri) # fork observed! return len(fo_data.json()["header_infos"]) == 2 except Exception as e: @@ -47,6 +64,19 @@ def call_fo_api(): self.wait_for_predicate(call_fo_api) self.log.info("Fork observed!") + self.log.info("Checking node description...") + fo_data = requests.get(fo_data_uri) + nodes = fo_data.json()["nodes"] + assert len(nodes) == 4 + assert nodes[1]["name"] == "john" + assert nodes[1]["description"] == "john.default.svc:18444" + + self.log.info("Checking reachable address is provided...") + self.warnet("bitcoin rpc george addnode john.default.svc:18444 onetry") + self.wait_for_predicate( + lambda: len(json.loads(self.warnet("bitcoin rpc george getpeerinfo"))) > 1 + ) + if __name__ == "__main__": test = ServicesTest() diff --git a/test/test_base.py b/test/test_base.py index 1a2a4c983..2b024da64 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -39,12 +39,13 @@ def setup_logging(self): logging.config.dictConfig(logging_config) self.log = logging.getLogger("test") self.log.info("Logging started") + self.log.info(f"Testdir: {self.tmpdir}") def cleanup(self, signum=None, frame=None): try: self.log.info("Stopping network") if self.network: - self.warnet("down") + self.warnet("down --force") self.wait_for_all_tanks_status(target="stopped", timeout=60, interval=1) except Exception as e: self.log.error(f"Error bringing network down: {e}") @@ -130,7 +131,7 @@ def check_scenarios(): if len(scns) == 0: return True for s in scns: - exit_status = get_pod_exit_status(s["name"]) + exit_status = get_pod_exit_status(s["name"], s["namespace"]) self.log.debug(f"Scenario {s['name']} exited with code {exit_status}") if exit_status != 0: return False