diff --git a/.dockerignore b/.dockerignore index e41ff7a8b..9f4bf0390 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,9 +2,16 @@ **/*.pyc **/__pycache__ +**/rootfs/ +**/*.sqlite3 +# **/*.squashfs **/*.bin **/*.ext4 **/*.zip **/*.pyz +**/*.rdb +**/*.key **/data.tgz /pydantic/ +**/target +/packaging/sevctl/target diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..dd84ea782 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,38 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Desktop (please complete the following information):** + - OS: [e.g. iOS] + - Browser [e.g. chrome, safari] + - Version [e.g. 22] + +**Smartphone (please complete the following information):** + - Device: [e.g. iPhone6] + - OS: [e.g. iOS8.1] + - Browser [e.g. stock browser, safari] + - Version [e.g. 22] + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..bbcbbe7d6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..ff965a1de --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,34 @@ +Explain what problem this PR is resolving + +Related ClickUp, GitHub or Jira tickets : ALEPH-XXX + +## Self proofreading checklist + +- [ ] The new code clear, easy to read and well commented. +- [ ] New code does not duplicate the functions of builtin or popular libraries. +- [ ] An LLM was used to review the new code and look for simplifications. +- [ ] New classes and functions contain docstrings explaining what they provide. +- [ ] All new code is covered by relevant tests. +- [ ] Documentation has been updated regarding these changes. +- [ ] Dependencies update in the project.toml have been mirrored in the Debian package build script `packaging/Makefile` + +## Changes + +Explain the changes that were made. The idea is not to list exhaustively all the changes made (GitHub already provides a full diff), but to help the reviewers better understand: +- which specific file changes go together, e.g: when creating a table in the front-end, there usually is a config file that goes with it +- the reasoning behind some changes, e.g: deleted files because they are now redundant +- the behaviour to expect, e.g: tooltip has purple background color because the client likes it so, changed a key in the API response to be consistent with other endpoints + +## How to test + +Explain how to test your PR. +If a specific config is required explain it here (account, data entry, ...) + +## Print screen / video + +Upload here screenshots or videos showing the changes if relevant. + +## Notes + +Things that the reviewers should know: known bugs that are out of the scope of the PR, other trade-offs that were made. +If the PR depends on a PR in another repo, or merges into another PR (i.o. main), it should also be mentioned here diff --git a/.github/scripts/extract_droplet_ipv4.py b/.github/scripts/extract_droplet_ipv4.py new file mode 100755 index 000000000..891058072 --- /dev/null +++ b/.github/scripts/extract_droplet_ipv4.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python3 +""" +Extract the IP address of a DigitalOcean Droplet +from the JSON returned by `doctl compute droplet get $name --output json +""" + +import json +import sys + +droplet_info = json.load(sys.stdin) +print(droplet_info[0]["networks"]["v4"][0]["ip_address"]) diff --git a/.github/workflows/build-deb-package.yml b/.github/workflows/build-deb-package.yml new file mode 100644 index 000000000..624117f55 --- /dev/null +++ b/.github/workflows/build-deb-package.yml @@ -0,0 +1,92 @@ +--- +name: "Build Packages" +on: push + + +jobs: + build_deb: + name: "Build ${{ matrix.os }} Package" + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + os: ["debian-12", "ubuntu-22.04", "ubuntu-24.04"] + include: + - os: "debian-12" + make_target: "all-podman-debian-12" + artifact_name: "aleph-vm.debian-12.deb" + - os: "ubuntu-22.04" + make_target: "all-podman-ubuntu-2204" + artifact_name: "aleph-vm.ubuntu-22.04.deb" + - os: "ubuntu-24.04" + make_target: "all-podman-ubuntu-2404" + artifact_name: "aleph-vm.ubuntu-24.04.deb" + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: true + # Fetch the whole history for all tags and branches (required for aleph.__version__) + fetch-depth: 0 + + - name: Initialize git submodules + run: git submodule init + + - run: | + cd packaging && make ${{ matrix.make_target }} && cd .. + ls packaging/target + + - name: Ensure that the relevant files are present in the package + run: | + dpkg --contents packaging/target/${{ matrix.artifact_name }} | grep /opt/kubo/ipfs + dpkg --contents packaging/target/${{ matrix.artifact_name }} | grep /opt/firecracker/firecracker + dpkg --contents packaging/target/${{ matrix.artifact_name }} | grep /opt/firecracker/jailer + dpkg --contents packaging/target/${{ matrix.artifact_name }} | grep /opt/firecracker/vmlinux.bin + dpkg --contents packaging/target/${{ matrix.artifact_name }} | grep /opt/sevctl + + - uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.artifact_name }} + path: packaging/target/${{ matrix.artifact_name }} + + build_rootfs: + name: "Build runtime aleph-${{ matrix.os }}-python" + runs-on: ubuntu-latest + strategy: + matrix: + os: ["debian-12"] + include: + - os: "debian-12" + artifact_name: "aleph-debian-12-python.squashfs" + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Workaround github issue https://github.com/actions/runner-images/issues/7192 + run: sudo echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc + + - run: | + sudo apt update + sudo apt install -y debootstrap + cd runtimes/aleph-${{ matrix.os }}-python && sudo ./create_disk_image.sh && cd ../.. + + - uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.artifact_name }} + path: runtimes/aleph-${{ matrix.os }}-python/rootfs.squashfs + + build_example_venv_volume: + name: "Build example squashfs volume using Docker" + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - run: | + docker build -t aleph-vm-build-squashfs -f examples/volumes/Dockerfile examples/volumes + docker run --rm -v "$(pwd)":/mnt aleph-vm-build-squashfs + + - uses: actions/upload-artifact@v4 + with: + name: example-volume-venv.squashfs + path: volume-venv.squashfs diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 000000000..fda8e50e3 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,74 @@ +--- +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + + +on: + push: + branches: [main] + pull_request: + # The branches below must be a subset of the branches above + branches: [main] + schedule: + - cron: '15 16 * * 0' + + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: ['python'] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] + # Learn more: + # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/deploy-main-on-staging.yml b/.github/workflows/deploy-main-on-staging.yml new file mode 100644 index 000000000..76d4a3343 --- /dev/null +++ b/.github/workflows/deploy-main-on-staging.yml @@ -0,0 +1,64 @@ +--- +# This workflow automatically deploys main on staging +name: "Deploy `main` automatically on staging" + + +on: + push: + branches: + - main + + +jobs: + deploy_staging_servers: + name: "Deploying on ${{ matrix.staging_servers.hostname }}" + runs-on: ubuntu-latest + strategy: + matrix: + staging_servers: + - hostname: "ovh.staging.aleph.sh" + # Use `ssh-keyscan -H host | base64 --wrap=0` to obtain the host keys + host_keys: "fDF8b3JHVkxyOU83Qnh0QmkvWjd4N0lVTkRDSHFRPXwxZEdZSnNjNlFyejA5QkR6cGROR1BLYjNES009IHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQmdRRHZwSmNpV2dscTNCbEsxY2xOUmNETnVQeFVCeGF3bE5qVElHZFV2MmoyTVo4KzliVVpDSkI1aXFIKzNvbkc5Vklla1RQdW1ybFlXbFMvZkkvSzM3dTh5UXJuQ3JkNi9XRm9XWWJOaTJ4NWxSOUhzaTViRXQ4MFAwRkFyVVpaSkdzbnRQdVhGeFJGR3dHeFNENTN2emVMbmU4VjRlaUxrQ3BjMDU5YzZVVHBublcvSjdRRnlzZURDUXIwVzFsMzBNcjlnTm1LbmpBd2VLWXdCS0hYaG42VGdSd1RYT1E3VXJCc3c2Q1d0OHI2N2g4QkJ2UHQ5OWt5OHl4dUw2Z25TRlhqeWhyKzVhd1lTY3VhVU5JS3B0Y2JFOWpISHhEY1FLSHN0akZZRHRsM0JWN29rUEkvUWJablpSMDVTdDgvZldoK2p5K3ZtR3BTWmtFckJ2NkUwdFhHMDhmdkdheVRXNWFVcWxRQmlLMzJpNmJlUWordjI3b0pUWndvcndBOVJCY1QramlCWVRNVUFpTTJrblhXMGlqT2ViWDNackpITm5QbXJkbjBTd1JldGlLRzg2SGdRK3d3a0dwd3UxVk01eTFlbTVwZ0VUdnU5SHg1RTFKeEJLcXJ3ZkdhTVVRWFZEWG8yNDg5bW1XZzA1aUFHejZIaXNTVWRESFlRKzhnWnA4PQp8MXxvUzkyc1NEb3RxU1hSb0F6MUpFS1V2RDhVTGM9fDVtSHZBSVdqbk1CN2IwcllRQlo0SXBpaFlqQT0gZWNkc2Etc2hhMi1uaXN0cDI1NiBBQUFBRTJWalpITmhMWE5vWVRJdGJtbHpkSEF5TlRZQUFBQUlibWx6ZEhBeU5UWUFBQUJCQkZNanZFOEFsQmYxbkp1Y0ZlaEJjSUY2RE8wdGJOdU96OEx5QlFUdC82RlEwaWYyWVAxQUJ1TjBmYXVIT3R4WEx6b25vSGVhTDZaV0JoakhmRGV4NlY4PQp8MXxMc2lPc3RhVGk5bEhYSlFsWDJYQ3c3Q0lTU1k9fGk1RzlFTHJydHpaYkUrR2JjbWF1SDIxOG1ZND0gc3NoLWVkMjU1MTkgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSUp1QVNEMWY1d2dXM3pnd3FGalBXYzhPRi9BZ1pmSFFVa3lRMDE2c1MrRmoK" + os: "debian-12" + make_target: "all-podman-debian-12" + artifact_name: "aleph-vm.debian-12.deb" + + - hostname: "hetzner.staging.aleph.sh" + # Use `ssh-keyscan -H host | base64 --wrap=0` to obtain the host keys + host_keys: "fDF8WUlKd0FYWnYxZ24vNkRCU0tkYjg0TC9sUngwPXwrRk96RzdoSTJ5Y3JzUW1uSEwrdEFBQkR4YUU9IHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQmdRRHBKcHF5ajUxWUluRkNyZjZUWjE5eUF3cHlXNTNHaFAxNXQ0Wm56cHBwOUVnNTNnZmVWdmk5WUV1bVV6cnVUN01LdFpobjNsb0U5YVFtRUYzSElpb3c5ZmlCWVA3aUMzUUFGdUJCandPUmQwV1RVWDZQQUN2c2p0b1JLWjJpTWZ2YXdITHdrWHErWnkrc2hHNU44L2pwQlJ4MC9paXJta2xPS0F5QWw0QTYzZ2MxMndsVGQzcS9IcDVxd1dSYVV3M1JVUTFTVVJSN2RGRW81VWxqeUZVYS9zdWV1STBYOXdLd0tPZ09iOEo3ZFZDMEdDT3VibkJXL3Jmb3N0YVV5eStaSzdQdzBrM251M2szTFZuUVlPTGlNOG1NMnJub2ZWZ2RSWXpiM3RTUVVrbk9wektBVzBXK3llWmpSOXp1UG4yNXF4bWxsRmRaNmt3QTFDcWY2MmQyQ0dtQ2NDU3dUSUl4ZHJ3M29oOEZOclpROTI4OGQvcmF4djZXZi9oZDI0Y1JqeDdFSEJxOUFWMW02UTZWeGxnSWl0WjIzODlsYjRmODNGclRrNUtib3J3Zm5oM3NaZFRSSkJqRjRhdHZ5NktsWFYxenROc05BeDhFN1RnTDMzVFlnRGc4RWlldGN1TVlzUlcwSnREdldBNGxsZDFQS3JrbDJ1LzZvLzNUb0xVPQp8MXxmQ3FnTjB2WHpMTnAzdklnZXdkSFRQRTA0ZUk9fDhnSituTC9hUGpEQlRMcUNJak1sZFpVbFRpST0gZWNkc2Etc2hhMi1uaXN0cDI1NiBBQUFBRTJWalpITmhMWE5vWVRJdGJtbHpkSEF5TlRZQUFBQUlibWx6ZEhBeU5UWUFBQUJCQktWbnE5aWsvcHZFaDdXbHFydUtWZmdZeTlwOVpNQnVKV2IrZkVvS0hZY0ZSYld5c0lYRjJlalBnaUMyOFEvZExqeUhXd2RVZlMySFBMbGNxRVFEZlpvPQp8MXxtVzA4T3ZqUnh0bmRjYVNyc0poWXBQcXp2akk9fFlDcktMeUg4ZnJJR0lRV05RS3hiUnArNlIvTT0gc3NoLWVkMjU1MTkgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSUl5ZGNhTXF1dkZFTEpNUDBlRmhNUGJWZVBSVjlSUEhVRzhIZGZIQmRvaTEK" + os: "debian-12" + make_target: "all-podman-debian-12" + artifact_name: "aleph-vm.debian-12.deb" + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + # Fetch the whole history for all tags and branches (required for aleph.__version__) + fetch-depth: 0 + + - run: | + cd packaging && make ${{ matrix.staging_servers.make_target }} && cd .. + ls packaging/target + + - name: Setup SSH private key + run: | + mkdir ~/.ssh + echo $STAGING_SSH_PRIVATE_KEY | base64 --decode > ~/.ssh/id_ed25519 + chmod 0700 ~/.ssh + chmod 0600 ~/.ssh/id_ed25519 + env: + # Create using: + # ssh-keygen -t ed25519 -f ./id_ed25519 + # cat ./id_ed25519 | base64 --wrap=0 + STAGING_SSH_PRIVATE_KEY: ${{ secrets.STAGING_SSH_PRIVATE_KEY }} + + - name: Install Aleph-VM on the Staging servers + run: |- + echo ${{ matrix.staging_servers.host_keys }} | base64 --decode > ~/.ssh/known_hosts + + # Wait for /var/lib/apt/lists/lock to be unlocked on the remote host via SSH. + while ssh root@${{ matrix.staging_servers.hostname }} lsof /var/lib/apt/lists/lock; do sleep 1; done + + scp packaging/target/${{ matrix.staging_servers.artifact_name }} root@${{ matrix.staging_servers.hostname }}:/opt + ssh root@${{ matrix.staging_servers.hostname }} DEBIAN_FRONTEND=noninteractive "apt-get -o DPkg::Lock::Timeout=60 install -y --allow-downgrades /opt/${{ matrix.staging_servers.artifact_name }}" diff --git a/.github/workflows/pr-rating.yml b/.github/workflows/pr-rating.yml new file mode 100644 index 000000000..e1f4e2cb6 --- /dev/null +++ b/.github/workflows/pr-rating.yml @@ -0,0 +1,23 @@ +--- +name: Test PR Difficulty Rating Action + + +permissions: + pull-requests: write + + +on: + pull_request: + types: [opened, reopened, ready_for_review] + + +jobs: + difficulty-rating: + runs-on: ubuntu-latest + if: github.event.pull_request.draft == false + steps: + - name: PR Difficulty Rating + uses: rate-my-pr/difficulty@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LLAMA_URL: ${{ secrets.LLAMA_URL }} diff --git a/.github/workflows/test-build-examples.yml b/.github/workflows/test-build-examples.yml new file mode 100644 index 000000000..dc4c76ade --- /dev/null +++ b/.github/workflows/test-build-examples.yml @@ -0,0 +1,47 @@ +--- + +name: "Build Examples" +on: push + + +jobs: + build_pip: + name: "Build with Pip requirements" + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Workaround github issue https://github.com/actions/runner-images/issues/7192 + run: sudo echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc + + - run: | + sudo apt-get -y update + sudo apt-get -y upgrade + sudo apt-get -y install python3-pip python3-venv squashfs-tools build-essential python3-nftables + + sudo mkdir /opt/packages + sudo chown $(whoami) /opt/packages + + - run: | + pip3 install hatch + + - run: | + hatch build + + - run: | + ls + pwd + pip3 install -t /opt/packages -r ./examples/example_pip/requirements.txt + mksquashfs /opt/packages packages.squashfs + +# - run: | +# ipfs add packages.squashfs + +# TODO: There is currently no easy way pass the item_hash from a pin to a new program. +# - run: | +# aleph pin QmQr3dEd6LiFq6JmUJYPLrffy45RGFhPWsxWmzo9zZb7Sy +# +# - run: | +# aleph program ./examples/example_pip main:app diff --git a/.github/workflows/test-new-runtime-examples.yml b/.github/workflows/test-new-runtime-examples.yml new file mode 100644 index 000000000..25c65302d --- /dev/null +++ b/.github/workflows/test-new-runtime-examples.yml @@ -0,0 +1,109 @@ +--- +name: "Test new runtime and examples" +on: push + + +jobs: + run_debian_12: + name: "Test new runtime on Droplet with Debian 12" + runs-on: ubuntu-latest + concurrency: droplet-aleph-vm-runtime + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + # Fetch the whole history for all tags and branches (required for aleph.__version__) + fetch-depth: 0 + + - name: Workaround github issue https://github.com/actions/runner-images/issues/7192 + run: sudo echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc + + - name: Install doctl + uses: digitalocean/action-doctl@v2 + with: + token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} + + - name: Setup SSH private key + run: | + mkdir ~/.ssh + echo $DIGITALOCEAN_SSH_PRIVATE_KEY | base64 --decode > ~/.ssh/id_ed25519 + chmod 0700 ~/.ssh + chmod 0600 ~/.ssh/id_ed25519 + env: + DIGITALOCEAN_SSH_PRIVATE_KEY: ${{ secrets.DIGITALOCEAN_SSH_PRIVATE_KEY }} + + - name: Create the Droplet + run: | + doctl compute droplet create \ + --image debian-12-x64 \ + --size c-4 \ + --region ams3 \ + --vpc-uuid 5976b7bd-4417-49e8-8522-672aaa920c30 \ + --enable-ipv6 \ + --ssh-keys ab:2b:25:16:46:6f:25:d0:80:63:e5:be:67:04:cb:64 \ + aleph-vm-ci-runtime + + - name: "Build custom runtime" + run: | + sudo apt update + sudo apt install -y debootstrap + cd runtimes/aleph-debian-12-python && sudo ./create_disk_image.sh && cd ../.. + + - uses: actions/upload-artifact@v4 + with: + name: aleph-debian-12-python.squashfs + path: runtimes/aleph-debian-12-python/rootfs.squashfs + + - name: Build Debian Package + run: | + cd packaging && make all-podman-debian-12 && cd .. + ls packaging/target + + - name: Wait for the system to setup and boot + run: | + export DROPLET_IPV4="$(doctl compute droplet get aleph-vm-ci-runtime --output json | ./.github/scripts/extract_droplet_ipv4.py)" + until ssh-keyscan -H ${DROPLET_IPV4}; do sleep 1; done + + - name: Copy the runtime to the system + run: | + export DROPLET_IPV4="$(doctl compute droplet get aleph-vm-ci-runtime --output json | ./.github/scripts/extract_droplet_ipv4.py)" + ssh-keyscan -H ${DROPLET_IPV4} > ~/.ssh/known_hosts + scp runtimes/aleph-debian-12-python/rootfs.squashfs root@${DROPLET_IPV4}:/opt + + - name: Install Aleph-VM on the Droplet + run: | + export DROPLET_IPV4="$(doctl compute droplet get aleph-vm-ci-runtime --output json | ./.github/scripts/extract_droplet_ipv4.py)" + ssh-keyscan -H ${DROPLET_IPV4} > ~/.ssh/known_hosts + + ssh root@${DROPLET_IPV4} DEBIAN_FRONTEND=noninteractive "apt-get -o DPkg::Lock::Timeout=60 update" + ssh root@${DROPLET_IPV4} DEBIAN_FRONTEND=noninteractive "apt-get -o DPkg::Lock::Timeout=60 upgrade -y" + ssh root@${DROPLET_IPV4} DEBIAN_FRONTEND=noninteractive "apt-get -o DPkg::Lock::Timeout=60 install -y docker.io apparmor-profiles" + ssh root@${DROPLET_IPV4} "docker run -d -p 127.0.0.1:4021:4021/tcp --restart=always --name vm-connector alephim/vm-connector:alpha" + + scp packaging/target/aleph-vm.debian-12.deb root@${DROPLET_IPV4}:/opt + scp -pr ./examples root@${DROPLET_IPV4}:/opt/ + ssh root@${DROPLET_IPV4} DEBIAN_FRONTEND=noninteractive "apt -o DPkg::Lock::Timeout=60 install -y /opt/aleph-vm.debian-12.deb" + ssh root@${DROPLET_IPV4} "echo ALEPH_VM_SUPERVISOR_HOST=0.0.0.0 >> /etc/aleph-vm/supervisor.env" + ssh root@${DROPLET_IPV4} "echo ALEPH_VM_FAKE_DATA_PROGRAM=/opt/examples/example_fastapi >> /etc/aleph-vm/supervisor.env" + ssh root@${DROPLET_IPV4} "echo ALEPH_VM_FAKE_DATA_RUNTIME=/opt/rootfs.squashfs >> /etc/aleph-vm/supervisor.env" + ssh root@${DROPLET_IPV4} "systemctl restart aleph-vm-supervisor" + + - name: Test Aleph-VM on the Droplet + run: | + export DROPLET_IPV4="$(doctl compute droplet get aleph-vm-ci-runtime --output json | ./.github/scripts/extract_droplet_ipv4.py)" + + sleep 3 + curl --retry 5 --max-time 10 --fail "http://${DROPLET_IPV4}:4020/about/usage/system" + curl --retry 5 --max-time 10 --fail "http://${DROPLET_IPV4}:4020/status/check/fastapi" + + - name: Export aleph logs + if: always() + run: | + export DROPLET_IPV4="$(doctl compute droplet get aleph-vm-ci-runtime --output json | ./.github/scripts/extract_droplet_ipv4.py)" + ssh root@${DROPLET_IPV4} "journalctl -u aleph-vm-supervisor" + + - name: Cleanup + if: always() + run: |- + doctl compute droplet delete -f aleph-vm-ci-runtime diff --git a/.github/workflows/test-on-droplets-matrix.yml b/.github/workflows/test-on-droplets-matrix.yml new file mode 100644 index 000000000..380e91341 --- /dev/null +++ b/.github/workflows/test-on-droplets-matrix.yml @@ -0,0 +1,195 @@ +--- +# These are end-to-end tests running on ephemeral DigitalOcean "Droplet" virtual machines +# with the different operating systems that are supported. +# +# The main focus of these tests is to ensure that the packaging works on all supported platforms +# and to ensure the compatibility of dependencies (system and vendored) across these platforms. +name: "Testing on DigitalOcean Droplets" + +# Run automatically on main branches, Pull Request updates and allow manual execution using `workflow_dispatch`. +on: + push: + branches: + - main + pull_request: + types: + - "opened" + - "reopened" + - "synchronize" + - "ready_for_review" + workflow_dispatch: + + +jobs: + run_on_droplet: + name: "Test Droplet with ${{ matrix.os_config.os_name }}-${{ matrix.check_vm.alias\ + \ }}" + runs-on: ubuntu-latest + concurrency: "${{ matrix.os_config.concurrency_group }}-${{ matrix.check_vm.alias\ + \ }}" + timeout-minutes: 10 + + strategy: + fail-fast: false + matrix: + + # Check compatibility with all supported OSes. + os_config: + - os_name: "Debian 12" + os_image: "debian-12-x64" + alias: "debian-12" + package_build_command: "all-podman-debian-12" + package_name: "aleph-vm.debian-12.deb" + concurrency_group: "droplet-aleph-vm-debian-12" + + - os_name: "Ubuntu 22.04" + os_image: "ubuntu-22-04-x64" + alias: "ubuntu-22-04" + package_build_command: "all-podman-ubuntu-2204" + package_name: "aleph-vm.ubuntu-22.04.deb" + concurrency_group: "droplet-aleph-vm-ubuntu-22-04" + + - os_name: "Ubuntu 24.04" + os_image: "ubuntu-24-04-x64" + alias: "ubuntu-24-04" + package_build_command: "all-podman-ubuntu-2404" + package_name: "aleph-vm.ubuntu-24.04.deb" + concurrency_group: "droplet-aleph-vm-ubuntu-24-04" + + # Check compatibility with all supported runtimes. + check_vm: + - alias: "runtime-6770" # Old runtime, using Debian 11 + item_hash: "67705389842a0a1b95eaa408b009741027964edc805997475e95c505d642edd8" + query_params: "?retro-compatibility=true" + - alias: "runtime-3fc0" # Newer runtime, using Debian 12 but now old SDK + item_hash: "3fc0aa9569da840c43e7bd2033c3c580abb46b007527d6d20f2d4e98e867f7af" + query_params: "?retro-compatibility=true" + - alias: "runtime-63fa" # Latest runtime, using Debian 12 and SDK 0.9.0 + item_hash: "63faf8b5db1cf8d965e6a464a0cb8062af8e7df131729e48738342d956f29ace" + query_params: "" + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Install doctl + uses: digitalocean/action-doctl@v2 + with: + token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} + + - name: Setup SSH private key + run: | + mkdir ~/.ssh + echo $DIGITALOCEAN_SSH_PRIVATE_KEY | base64 --decode > ~/.ssh/id_ed25519 + chmod 0700 ~/.ssh + chmod 0600 ~/.ssh/id_ed25519 + env: + DIGITALOCEAN_SSH_PRIVATE_KEY: ${{ secrets.DIGITALOCEAN_SSH_PRIVATE_KEY }} + + - name: Create the Droplet + run: | + doctl compute droplet create \ + --image ${{ matrix.os_config.os_image }} \ + --size c-4 \ + --region ams3 \ + --vpc-uuid 5976b7bd-4417-49e8-8522-672aaa920c30 \ + --enable-ipv6 \ + --ssh-keys ab:2b:25:16:46:6f:25:d0:80:63:e5:be:67:04:cb:64 \ + aleph-vm-ci-${{ matrix.os_config.alias }}-${{ matrix.check_vm.alias }} + + - name: Build Package + run: | + cd packaging && make ${{ matrix.os_config.package_build_command }} && cd .. + ls packaging/target + + - name: Get droplet ip and export it in env + run: | + echo "DROPLET_IPV4=$(doctl compute droplet get aleph-vm-ci-${{ matrix.os_config.alias }}-${{ matrix.check_vm.alias }} --output json | ./.github/scripts/extract_droplet_ipv4.py)" >> "$GITHUB_ENV" + + - name: Wait for the system to setup and boot + run: | + until ssh-keyscan -H ${DROPLET_IPV4}; do sleep 1; done + timeout-minutes: 3 + - name: Install Aleph-VM on the Droplet + run: | + ssh-keyscan -H ${DROPLET_IPV4} > ~/.ssh/known_hosts + + # Configuration + echo ALEPH_VM_SUPERVISOR_HOST=0.0.0.0 >> supervisor.env + echo ALEPH_VM_ALLOCATION_TOKEN_HASH=9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 >> supervisor.env + echo ALEPH_VM_CHECK_FASTAPI_VM_ID=${{ matrix.check_vm.item_hash }} >> supervisor.env + echo ALEPH_VM_SENTRY_DSN=${{ secrets.SENTRY_DSN }} >> supervisor.env + ssh root@${DROPLET_IPV4} mkdir -p /etc/aleph-vm/ + scp supervisor.env root@${DROPLET_IPV4}:/etc/aleph-vm/supervisor.env + + # Wait a few seconds for DigitalOcean to setup the Droplet using apt, which conflicts with our comands: + sleep 5 + + # Wait for /var/lib/apt/lists/lock to be unlocked on the remote host via SSH. + while ssh root@${DROPLET_IPV4} lsof /var/lib/apt/lists/lock; do sleep 1; done + + ssh root@${DROPLET_IPV4} DEBIAN_FRONTEND=noninteractive "apt-get -o DPkg::Lock::Timeout=60 update" + ssh root@${DROPLET_IPV4} DEBIAN_FRONTEND=noninteractive "apt-get -o DPkg::Lock::Timeout=60 upgrade -y" + ssh root@${DROPLET_IPV4} DEBIAN_FRONTEND=noninteractive "apt-get -o DPkg::Lock::Timeout=60 install -y docker.io apparmor-profiles" + ssh root@${DROPLET_IPV4} "docker pull ghcr.io/aleph-im/vm-connector:alpha" + ssh root@${DROPLET_IPV4} "docker run -d -p 127.0.0.1:4021:4021/tcp --restart=always --name vm-connector ghcr.io/aleph-im/vm-connector:alpha" + + scp packaging/target/${{ matrix.os_config.package_name }} root@${DROPLET_IPV4}:/opt + # "--force-confold" keeps existing config files during package install/upgrade, avoiding prompts. + ssh root@${DROPLET_IPV4} DEBIAN_FRONTEND=noninteractive "apt-get -o DPkg::Lock::Timeout=60 -o Dpkg::Options::="--force-confold" install -y /opt/${{ matrix.os_config.package_name }}" + + # Allow some time for IPFS Kubo to start + sleep 5 + + - name: Test Aleph-VM on the Droplet + id: test-aleph-vm + if: always() + continue-on-error: true + run: | + curl --retry 5 --max-time 10 --fail "http://${DROPLET_IPV4}:4020/about/usage/system" + curl --retry 5 --max-time 10 --fail "http://${DROPLET_IPV4}:4020/status/check/fastapi${{ matrix.check_vm.query_params }}" + + - name: Test Aleph-VM on the Droplet again restarting the server first + if: steps.test-aleph-vm.outcome == 'failure' + run: | + # If the first execution fails, restart supervisor and try again + ssh root@${DROPLET_IPV4} "systemctl restart aleph-vm-supervisor" + sleep 5 + + curl --retry 5 --max-time 10 --fail "http://${DROPLET_IPV4}:4020/status/check/fastapi${{ matrix.check_vm.query_params }}" + + - name: Schedule an instance on the Droplet by faking a call from the scheduler + run: | + curl --retry 5 --max-time 10 --fail -X POST -H "Content-Type: application/json" \ + -H "X-Auth-Signature: test" \ + -d '{"persistent_vms": [], "instances": ["${{ matrix.check_vm.item_hash }}"]}' \ + "http://${DROPLET_IPV4}:4020/control/allocations" + + - name: Fetch system usage endpoint + run: | + export DROPLET_IPV4="$(doctl compute droplet get aleph-vm-ci-${{ matrix.os_config.alias }}-${{ matrix.check_vm.alias }} --output json | ./.github/scripts/extract_droplet_ipv4.py)" + curl -X GET -H "Content-Type: application/json" \ + "http://${DROPLET_IPV4}:4020/about/usage/system" + + - name: Run the sevctl command to ensure it's properly packaged and working + run: | + export DROPLET_IPV4="$(doctl compute droplet get aleph-vm-ci-${{ matrix.os_config.alias }}-${{ matrix.check_vm.alias }} --output json | ./.github/scripts/extract_droplet_ipv4.py)" + ssh root@${DROPLET_IPV4} "/opt/sevctl --version" + + - name: Export aleph logs + if: always() + run: | + ssh root@${DROPLET_IPV4} "journalctl -u aleph-vm-supervisor" + + - name: Cleanup + if: always() + run: |- + DROPLET_IDS=$(doctl compute droplet list --format "ID,Name" --no-header | grep "aleph-vm-ci-${{ matrix.os_config.alias }}-${{ matrix.check_vm.alias }}" | awk '{print $1}') + + for DROPLET_ID in $DROPLET_IDS; do + echo "Deleting droplet with ID: $DROPLET_ID" + doctl compute droplet delete --force $DROPLET_ID + done diff --git a/.github/workflows/test-using-pytest.yml b/.github/workflows/test-using-pytest.yml new file mode 100644 index 000000000..a0b1ec229 --- /dev/null +++ b/.github/workflows/test-using-pytest.yml @@ -0,0 +1,112 @@ +--- +name: "py.test and linting" + +on: push + + +jobs: + tests-python: + name: "Test Python code" + runs-on: ubuntu-22.04 + services: + # Run vm connector for the execution tests + vm-connector: + image: alephim/vm-connector:alpha + ports: + - 4021:4021 + + steps: + - uses: actions/checkout@v4 + + - name: Workaround github issue https://github.com/actions/runner-images/issues/7192 + run: sudo echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc + + - name: Install required system packages only for Ubuntu Linux + run: | + sudo apt-get update + sudo apt-get -y upgrade + sudo apt-get install -y python3 python3-pip python3-aiohttp python3-msgpack python3-aiodns python3-alembic python3-sqlalchemy python3-setproctitle redis python3-aioredis python3-psutil sudo acl curl systemd-container squashfs-tools debootstrap python3-packaging python3-cpuinfo python3-nftables python3-jsonschema nftables + pip install --upgrade typing-extensions types-PyYAML + + - name: Install required Python packages + run: | + python3 -m pip install hatch hatch-vcs coverage + + - name: Test style wth ruff, black and isort + run: | + hatch run linting:style + + - name: Test typing with Mypy + run: | + hatch run linting:typing + + - name: Install required system packages for installing and running tests + run: | + sudo apt-get install libsystemd-dev cmake libdbus-1-dev libglib2.0-dev + + - name: Download and build required files for running tests. Copied from packaging/Makefile. + run: | + sudo useradd jailman + sudo mkdir --parents /opt/firecracker/ + sudo curl -fsSL -o "/opt/firecracker/vmlinux.bin" "https://ipfs.aleph.cloud/ipfs/bafybeiaj2lf6g573jiulzacvkyw4zzav7dwbo5qbeiohoduopwxs2c6vvy" + + rm -fr /tmp/firecracker-release + mkdir --parents /tmp/firecracker-release /opt/firecracker + curl -fsSL https://github.com/firecracker-microvm/firecracker/releases/download/v1.5.0/firecracker-v1.5.0-x86_64.tgz | tar -xz --no-same-owner --directory /tmp/firecracker-release + # Copy binaries: + cp /tmp/firecracker-release/release-v*/firecracker-v*[!.debug] /opt/firecracker/firecracker + cp /tmp/firecracker-release/release-v*/jailer-v*[!.debug] /opt/firecracker/jailer + chmod +x /opt/firecracker/firecracker + chmod +x /opt/firecracker/jailer + + # this produces a 33 MB log + # find /opt + + - name: "Build custom runtimes" + run: | + sudo apt update + sudo apt install -y debootstrap ndppd acl cloud-image-utils qemu-utils qemu-system-x86 + cd runtimes/aleph-debian-12-python && sudo ./create_disk_image.sh && cd ../.. + cd runtimes/instance-rootfs && sudo ./create-ubuntu-22-04-qemu-disk.sh && cd ../.. + cd runtimes/instance-rootfs && sudo ./create-debian-12-disk.sh && cd ../.. + + - name: "Build example volume" + run: | + cd examples/volumes && bash build_squashfs.sh + + # Unit tests create and delete network interfaces, and therefore require to run as root + - name: Run unit tests + run: | + sudo python3 -m pip install hatch hatch-vcs coverage + sudo hatch run testing:cov + + - name: Output modules used and their version + if: always() + run: | + # re-install hatch in case previous job failed and hatch didn't get installed + sudo python3 -m pip install hatch hatch-vcs coverage + sudo hatch -e testing run pip freeze + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4.0.1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + slug: aleph-im/aleph-vm + + code-quality-shell: + runs-on: ubuntu-22.04 + + steps: + - uses: actions/checkout@v4 + + - name: Workaround github issue https://github.com/actions/runner-images/issues/7192 + run: sudo echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc + + - name: Install required system packages only for Ubuntu Linux + run: | + sudo apt-get update + sudo apt-get install -y shellcheck + + - name: Run Shellcheck on all shell scripts + run: |- + find ./ -type f -name "*.sh" -exec shellcheck {} \; diff --git a/.gitignore b/.gitignore index a51dd6bbe..af67f6e22 100644 --- a/.gitignore +++ b/.gitignore @@ -2,9 +2,26 @@ *.pyc __pycache__ +*.sqlite3 *.bin *.ext4 *.zip *.pyz *.tgz +*.rdb +*.key /pydantic/ +node_modules +*.squashfs +/examples/example_http_rust/target/ +/examples/example_django/static/admin/ +/runtimes/aleph-debian-11-python/rootfs/ +/packaging/aleph-vm/opt/ +/packaging/target/ +/packaging/sevctl/target/ +/packaging/repositories/*/db/ +/packaging/repositories/*/dists/ +/packaging/repositories/*/pool/ +/kernels/linux-*/ +/kernels/linux-*.tar +/kernels/linux-*.tar.sign diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..e69de29bb diff --git a/CONFIGURE_CADDY.md b/CONFIGURE_CADDY.md new file mode 100644 index 000000000..4efafca9b --- /dev/null +++ b/CONFIGURE_CADDY.md @@ -0,0 +1,120 @@ +# Caddy Reverse-proxy for Aleph-VM + +A reverse-proxy is required for production use. It allows: + + - A different domain name for each VM function + - Secure connections using HTTPS + - Load balancing between multiple servers + +Using a different domain name for each VM function is important when running web applications, +both for security and usability purposes. + +The VM Supervisor supports using domains in the form `https://identifer.vm.yourdomain.org`, where +_identifier_ is the identifier/hash of the message describing the VM function and `yourdomain.org` +represents your domain name. + +## 1. Wildcard certificates + +A wildcard certificate is recommended to allow any subdomain of your domain to work. + +You can create one using [Let's Encrypt](https://letsencrypt.org/) and +[Certbot](https://certbot.eff.org/) with the following instructions. + +```shell +sudo apt install -y certbot + +certbot certonly --manual --email email@yourdomain.org --preferred-challenges dns \ + --server https://acme-v02.api.letsencrypt.org/directory --agree-tos \ + -d 'vm.yourdomain.org,*.vm.yourdomain.org' +``` + +## 2. Caddy Server + +In this documentation, we will install the modern [Caddy](https://caddyserver.com/) reverse-proxy. + +Replace `vm.yourdomain.org` with your domain of choice. + +To install on Debian/Ubuntu, according to the +[official instructions](https://caddyserver.com/docs/install#debian-ubuntu-raspbian): +```shell +sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https +curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg +curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list +sudo apt update +sudo apt install caddy +``` + +Then give Caddy access to the certificates generated by Certbot: +```shell +chmod 750 /etc/letsencrypt/live/ +chmod 750 /etc/letsencrypt/archive/ +chmod 640 /etc/letsencrypt/archive/vm.yourdomain.org/privkey1.pem +chgrp -R caddy /etc/letsencrypt/archive/ +chgrp -R caddy /etc/letsencrypt/live/ +``` + +Configure Caddy: +```shell +cat >/etc/caddy/Caddyfile </etc/caddy/Caddyfile < Note: This project is still early prototyping. +The Aleph-VM project allows you to run programs on [Aleph.im](https://aleph.im/). -The Aleph VM project allows you to run programs on [Aleph.im](https://aleph.im/). +Aleph-VM is optimized to run programs on demand in a "function-as-as-service", +as a response to HTTP requests. -These programs can currently be written in Python using ASGI compatible frameworks ( +Programs can be written in any language as long as they can run a web server. +They benefit from running in their own, customizable Linux virtual environment. + +Writing programs in Python using ASGI compatible frameworks ( [FastAPI](https://github.com/tiangolo/fastapi), -[Django](https://docs.djangoproject.com/en/3.0/topics/async/), -[Sanic](https://sanicframework.org/), -...) and respond to HTTP requests. +[Django](https://docs.djangoproject.com/en/3.0/topics/async/), +...) allows developers to use advanced functionalities not yet available for other languages. + +# Production install for Aleph-VM +## Installation from packages + + +Head over to the official user doc https://docs.aleph.im/nodes/compute/ on how to run an Aleph.im Compute Resource +Node + +## 2. Install Aleph-VM from source + +This method is not recommended, except for development and testing. +Read the installation document for the various components and the developer documentation. + +1. Install the [VM-Connector](./vm_connector/README.md) +2. Install the [VM-Supervisor](src/aleph/vm/orchestrator/README.md). +3. Install and configure a reverse-proxy such as [Caddy](./CONFIGURE_CADDY.md) + +## Create and run an Aleph Program + +Have a look at [tutorials/README.md](tutorials/README.md) for a tutorial on how to program VMs +as a user. + +The rest of this document focuses on how to run an Aleph-VM node that hosts and executes the programs. + +# Developer Setup + +Due to aleph-vm’s deep integration with the Linux system, it must be run with root privileges and configured +specifically for Linux. **It is strongly recommended** to deploy aleph-vm on a dedicated machine or a cloud-based server +to ensure security and stability. + +> **Note**: aleph-vm does not run on macOS or Windows, including for testing purposes. + +### Recommended Development Environment + +A typical setup for developing aleph-vm involves: + +1. Cloning the repository on your local machine for code editing. +2. Setting up a remote Linux server for deployment and testing. + +You can synchronize changes to the remote server using tools like `rsync` or PyCharm’s Remote Interpreter feature. + +## Remote Development Deployment + +To deploy aleph-vm for development on a remote server, we start with the Debian package as it includes essential binaries like `firecracker` and `sevctl`, system + configuration, and dependencies. + +1. **Run the vm-connector.** + +The vm-connector need to run for aleph-vm to works, even when running py.test. + +Unless your focus is developing the VM-Connector, using the Docker image is easier. + See the [VM-Connector README](./vm_connector/README.md) for more details. + + ```shell + docker run -d -p 127.0.0.1:4021:4021/tcp --restart=always --name vm-connector alephim/vm-connector:alpha + ``` + +2. **Install the Debian Package** + Replace `1.2.0` with the latest release version. + + **On Debian 12 (Bookworm)**: + ```shell + wget -P /opt https://github.com/aleph-im/aleph-vm/releases/download/1.2.0/aleph-vm.debian-12.deb + sudo apt install /opt/aleph-vm.debian-12.deb + ``` + + **On Ubuntu 22.04 (Jammy Jellyfish)**: + ```shell + sudo wget -P /opt https://github.com/aleph-im/aleph-vm/releases/download/1.2.0/aleph-vm.ubuntu-22.04.deb + sudo apt install /opt/aleph-vm.ubuntu-22.04.deb + ``` + + **On Ubuntu 24.04 (Noble Numbat)**: + ```shell + sudo wget -P /opt https://github.com/aleph-im/aleph-vm/releases/download/1.2.0/aleph-vm.ubuntu-24.04.deb + sudo apt install /opt/aleph-vm.ubuntu-24.04.deb + ``` + +3. **Disable Systemd Service** + To prevent conflicts, deactivate the system version of aleph-vm by disabling its `systemd` service. + + ```shell + sudo systemctl disable aleph-vm-supervisor.service + ``` + +4. **Clone the Repository and Set Up a Virtual Environment** + - Clone the aleph-vm repository to your development environment. + - Create a virtual environment to manage dependencies. + + Inside the virtual environment, run: + + ```shell + pip install -e . + ``` + + This installs aleph-vm in "editable" mode within the virtual environment, allowing you to use the `aleph-vm` command + directly during development. + +## Testing +See [Testing doc](./TESTING.md) + +## Code Formatting and Linting + +To help maintain a clean and consistent codebase, we provide automated tools for formatting and style checks. +To ensure your code is properly **formatted** according to project standards, you can use: + +```bash +hatch linting:fmt +``` + +**Typing** helps ensure your code adheres to expected type annotations, improving reliability and clarity. To validate +typing in your code, use: +```bash +hatch linting:typing +``` + +These checks are also validated in Continuous Integration (CI) alongside unit tests. To ensure a smooth workflow, we +recommend running these commands before committing changes. + +**Linting** checks for potential errors, coding style violations, and patterns that may lead to bugs or reduce code +quality (e.g., unused variables, incorrect imports, or inconsistent naming). While linting is not currently enforced in +Continuous Integration (CI), it is considered a best practice to check linting manually to maintain high-quality code. +You can manually lint your code by running: + +```bash +hatch fmt +``` + +Following these best practices can help streamline code reviews and improve overall project quality. -## Architecture +# Architecture -![image](https://user-images.githubusercontent.com/404665/115885445-452f5180-a450-11eb-856e-f4071023a105.png) +![Aleph im VM - Details](https://user-images.githubusercontent.com/404665/127126908-3225a633-2c36-4129-8766-9810f2fcd7d6.png) -### VM Supervisor +### VM Supervisor (also called Orchestrator) Actually runs the programs in a secure environment on virtualization enabled systems. -See [vm_supervisor/README.md](./vm_supervisor/README.md). +See [vm_supervisor/README.md](src/aleph/vm/orchestrator/README.md). ### VM Connector -Schedules the execution of programs on VM Supervisors and assists -them with operations related to the Aleph network. +Assist with operations related to the Aleph network. See [vm_connector/README.md](./vm_connector/README.md). diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 000000000..1c6e05bcb --- /dev/null +++ b/TESTING.md @@ -0,0 +1,73 @@ +# Testing aleph-vm + +This procedure describes how to run tests on a dev system. See the dev setup section of the README first. + +Tests also run on GitHub Actions via [the following workflow](./.github/workflows/test-on-droplets-matrix.yml). + +Since these tests create block devices and manipulate network interfaces, they need to run as root. +If you are not comfortable with this, run them in a virtual machine. + +## 1. Clone this repository + +```shell +git clone https://github.com/aleph-im/aleph-vm.git +``` + +## 2. Install [hatch](https://hatch.pypa.io/), the project manager + +Since installing tools globally is not recommended, we will install `hatch` + in a dedicated virtual environment. Alternatives include using [pipx](https://pipx.pypa.io) +or your distribution. + +```shell +python3 -m venv /opt/venv +source /opt/venv/bin/activate + +# Inside the venv +pip install hatch +``` + +## 3. Initialize hatch for running the tests + +It is required that the testing virtual environment relies on system packages +for `nftables` instead of the package obtained from `salsa.debian.org` as defined in +[pyproject.toml](./pyproject.toml). + +Create the testing virtual environment: +```shell +hatch env create testing +``` + + +## 4. Run tests + +```shell +hatch run testing:test +``` + + +## Debugging the tests +Some tricks and options that might help debugging problematic tests. + +Only launch pytest with a test name and more verbose debugging +```shell +hatch run testing:pytest -vv --log-level=DEBUG --full-trace -o log_cli=true -k +``` + + +Specify `--capture=no` to pytest so it launch. This way you get the full output, including firecracker logs + +## Debugging runtimes +If the error is in the runtime: +Modify the #! to pass the -v option to python, which will print all the debugging info +`#!/usr/bin/python3 -vOO` + +To have these modification take effect you need to rebuild the runtime file using `create_disk_image.sh` as _root_ + +```shell +sudo bash create_disk_image.sh +``` + +Don't forget to have the print system log option set `ALEPH_VM_PRINT_SYSTEM_LOGS=1` + +`aleph-debian-12-python` is used in test_create_execution \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 000000000..66dfe5379 --- /dev/null +++ b/config.json @@ -0,0 +1,21 @@ +{ + "vm_id": 5, + "settings": { + "PRINT_SYSTEM_LOGS": true, + "IPV4_ADDRESS_POOL": "172.16.0.0/12", + "IPV4_NETWORK_PREFIX_LENGTH": 24, + "NETWORK_INTERFACE": "enp5s0", + "IPV6_ALLOCATION_POLICY": "static", + "IPV6_ADDRESS_POOL": "fc00:1:2:3::/64", + "IPV6_SUBNET_PREFIX": 124, + "USE_NDP_PROXY": true, + "IPV6_FORWARDING_ENABLED": false + }, + "vm_configuration": { + "use_jailer": true, + "firecracker_bin_path": "/opt/firecracker/firecracker", + "jailer_bin_path": "/opt/firecracker/jailer", + "config_file_path": "/var/lib/aleph/vm/config.json", + "init_timeout": 30 + } +} diff --git a/doc/INSTALL-Debian-11.md b/doc/INSTALL-Debian-11.md new file mode 100644 index 000000000..242296c04 --- /dev/null +++ b/doc/INSTALL-Debian-11.md @@ -0,0 +1 @@ +[[https://docs.aleph.im/nodes/compute/installation/debian-11/]] \ No newline at end of file diff --git a/doc/INSTALL-Debian-12.md b/doc/INSTALL-Debian-12.md new file mode 100644 index 000000000..bc2b74cb7 --- /dev/null +++ b/doc/INSTALL-Debian-12.md @@ -0,0 +1 @@ +[[https://docs.aleph.im/nodes/compute/installation/debian-12/]] \ No newline at end of file diff --git a/doc/INSTALL-Ubuntu-20.04.md b/doc/INSTALL-Ubuntu-20.04.md new file mode 100644 index 000000000..6705c5bb7 --- /dev/null +++ b/doc/INSTALL-Ubuntu-20.04.md @@ -0,0 +1 @@ +Moved to [[https://docs.aleph.im/nodes/compute/installation/ubuntu-20.04/]] \ No newline at end of file diff --git a/doc/INSTALL-Ubuntu-22.04.md b/doc/INSTALL-Ubuntu-22.04.md new file mode 100644 index 000000000..010b81536 --- /dev/null +++ b/doc/INSTALL-Ubuntu-22.04.md @@ -0,0 +1 @@ +[[https://docs.aleph.im/nodes/compute/installation/ubuntu-22.04/]] \ No newline at end of file diff --git a/doc/INSTALL.md b/doc/INSTALL.md new file mode 100644 index 000000000..d42a9f50e --- /dev/null +++ b/doc/INSTALL.md @@ -0,0 +1,2 @@ +# Installing Aleph-VM +see [[ https://docs.aleph.im/nodes/compute/]] \ No newline at end of file diff --git a/doc/confidential.md b/doc/confidential.md new file mode 100644 index 000000000..4e208d7c9 --- /dev/null +++ b/doc/confidential.md @@ -0,0 +1,266 @@ +# Confidential computing + +Aleph-vm offers to launch confidential VM with AMD SEV. This is also known as TEE, Trusted Execution Environment. + +This is only supported for instances using Qemu as their hypervisor. + +## Life cycle +First, a user creates a VM message and sends it with notify_allocate. This notifies the orchestrator about the creation of the new VM. +The user fetches the platform certificate, validates its chain again AMD root certificate. +The user must then upload so-called Guest Owner certificates (created with sevctl) to create an encrypted channel between the user and the Security Processor. + +Once uploaded, the VM is started in Qemu in stopped mode: Qemu will allocate the RAM for the VM, load the firmware inside it and then let the AMD Security Processor encrypt the memory. Once this is done, the SEV endpoints allow to retrieve a measure of the memory of the VM and to decide whether to inject a user secret in the VM. Upon secret injection, the VM is launched, i.e. the VM CPU is started and goes through the boot sequence of the VM. + +The end result is a virtual machine that is accessible through SSH and is completely encrypted in RAM, making it inaccessible from the point of view of the hypervisor. + +```mermaid +flowchart TD + A[Start] -->|Allocate VM on CRN| B(CRN: Check payment, download image, volume) + B --> |Download certificate from CRN| C(User: Validate Certificate again CHAIN) + C --> |Create session certificates| D[Certificates file created] + D --> |Send certificate to CRN to init sessions | E[CRN: Launch VM with firmware with encrypted communication channel] + E --> |Fetch measurement from VM| F[User: Calculate it's own measurement and verify them again the CRN's] + F --> | if ok: Send secret in encrypted channel | G[CRN: Start and unlock VM] +``` + + +# CRN Side + +## Hardware requirement +4th Generation AMD EPYC™ Processors with SEV support. + +This includes the [9004 Series Processors and 8004 Series Processors](https://www.amd.com/en/products/processors/server/epyc/4th-generation-9004-and-8004-series.html#tabs-4380fde236-item-2130f0d757-tab). + +Note that the [4004 Series Processors do not provide SEV](https://www.amd.com/en/products/processors/server/epyc/infinity-guard.html) and are therefore not supported. + +> ℹ️ The 4th Generation requirement stems from security vulnerabilities discovered in SEV on Zen3 and earlier architectures. + +## Requirements for the CRN +* Support must be [enabled in the computer BIOS](https://www.amd.com/content/dam/amd/en/documents/epyc-technical-docs/tuning-guides/58207-using-sev-with-amd-epyc-processors.pdf) (see Section 2.1). +* The kernel and platform must support SEV. (e.g Ubuntu 24.04 support it by default) +* [sevctl](https://github.com/virtee/sevctl) must be installed. A copy is included in the aleph-vm Debian package and installed as `/opt/sevctl`. +* QEMU must be installed. + +Check with the `sevctl ok` command that the system is supporting AMD SEV properly, at least: + +```[ PASS ] - Secure Encrypted Virtualization (SEV)``` + + + +See AMD DOC for more info on enabling SEV for your system +https://www.amd.com/fr/developer/sev.html + + +## Enabling the confidential computing feature in aleph-vm + +Enable SEV in the configuration of `aleph-vm`, by default in `/etc/aleph-vm/supervisor.env`: +``` +ALEPH_VM_ENABLE_QEMU_SUPPORT=1 +ALEPH_VM_ENABLE_CONFIDENTIAL_COMPUTING=1 + +``` + +After launching the server you can check the endpoint +http://localhost:4020/status/config and verify that ENABLE_CONFIDENTIAL_COMPUTING is true + + +# User side +The user wanting to launch the VM, referred as the Guest Owner. + +The aleph-sdk-python and the aleph-client provide way to launch , validate and start the VM. + +## Create an encrypted VM image + +The user must create a virtual machine disk image that has been encrypted using a password of their choice. +Follow the instruction here: https://github.com/aleph-im/aleph-vm/blob/dev-confidential/examples/example_confidential_image/README.md + +## OVMF Launcher Firmware +The OMVF file, a UEFI firmware for virtual machines, handle launching the confidential VM. +It receives the secret (decryption key) in a secure manner and pass it to the VM bootloader (see Boot process section). + +Aleph.im provide a default one, destined to work with confidential image created following the procedure described above. + + +In the usual case a user would just create an encrypted VM image but they might also provide a customised firmware in the `firmare` field of `trusted_execution`. + +See [the instructions on how the Firmware is built](runtimes/ovmf/README.md) + +The hash from the Firmware is needed to validate if it's the one launched the CRN. + + +# Implementation details +## Aleph-message +on Instance type message, we check if the `content.environment.trusted_execution` is set + +``` + "trusted_execution": { + "policy": 1, + "firmware": "e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317" + } +``` + +* Firmware is an [IPFS CID](https://docs.ipfs.tech/concepts/content-addressing/) reference to the OVMF firmware file (see OVMF firmware section) +* policy is an AMD SEV Policy (for now we only expose if AMD SEV and SEV-ES are supported) + + +## Boot process +The following diagram describes the different pieces of the VM boot process. + +![Boot process](./images/boot_process.drawio.png) + +* OVMF: UEFI firmware (see section above), finds the bootloader and launches it +* GRUB, the boot loader, decrypts the VM image and jumps to it. +* GRUB configuration files: the unencrypted script looks for the user disk decryption password injected during + the SEV boot process, then jumps to a complete Grub configuration file provided by the user inside the VM + image. +* Kernel + initrd + root filesystem: The OS of the VM. + +OVMF and Grub must be unencrypted. This means that the VM supervisor can alter these pieces at will. +It is therefore crucial that these pieces are part of the launch measurement retrieved during the SEV +sequence. + +The process documented in `runtimes/ovmf/README.md` can be used to generate a firmware image that combines OVMF and Grub +into one binary. + + +## Detailed sequence with endpoints +```mermaid +sequenceDiagram + participant Qemu + participant CRN + actor User + CRN->>User: Fetch platform certificate (GET /about/certificates/) + Note right of User: Generate via sevctl using the platfom certificate:
TIK, TEK, GODH, Session + User->>CRN:Upload certificates POST /control/machine/{ref}/confidential/inialize + Note over CRN,User:session.b64, godh.b64 + CRN->>Qemu: Run qemu process (pass session, godh, image, ovmf) + Note left of Qemu: Emulator is in stopped state + User->>CRN: Fetch measurement (GET /control/machine/{ref}/confidential/measurement) + Qemu->>CRN: Retrieve launch measurement (via qmp) + CRN->>User: Measurements (SEV version, policy, firmware hash, signature) + Note right of User: Verify measuremetn signature + Note right of User: Encrypt secret using TEK key + User->>CRN: Pass encoded secrets (POST /control/machine/{ref}/confidential/inject_secret) + CRN->>Qemu: Inject secret (via qmp) + CRN->>Qemu: Start VM (via qmp) + Note left of Qemu: Emulator is in started state, VM Boot + User->>Qemu: SSH or other interaction +``` + +# Development and debugging + +See QEMU.md in general for QEMU related developement + + ## Note on systemd in dev + If you use a local copy of aleph-vm, for example a version you are developping on, by default systemd will still use the system version of the aleph controller. It is necessary to modify + `/etc/systemd/system/aleph-vm-controller@.service` to point to your version. + + For example here is what I use + ``` + [Unit] +Description=Aleph VM %i Controller Olivier +After=network.target + +[Service] +Type=simple +RestartSec=5s +PrivateTmp=yes +NoNewPrivileges=true +WorkingDirectory=/home/olivier/pycharm-aleph-vm/src +Environment=PYTHONPATH=/home/olivier/pycharm-aleph-vm/src:$PYTHONPATH +ExecStart=/home/olivier/.virtualenvs/aleph-vm/bin/python3 -m aleph.vm.controllers --config=/var/lib/aleph/vm/%i-controller.json +Restart=no + +[Install] +WantedBy=multi-user.target +``` + +After modification use the following command to have the modification taken into account +```shell +sudo systemctl daemon-reload +``` + +# Testing + +After initializing the VM you can check it's status with: +`sudo systemctl status aleph-vm-controller@decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca.service` + +and see the logs with +` sudo journalctl -u aleph-vm-controller@decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca.service` + +**Important** + +If you modify your base image between tests, you will need to delete the image file on disk (which is a delta of the base image) +For example using : +`sudo rm /var/lib/aleph/vm/volumes/persistent/decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca/rootfs.qcow2` + +Ensure the VM controller is stopped before! +`sudo systemctl stop aleph-vm-controller@decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca.service` + + Between your test you can also stop the execution using + ```http + ### Stop all VMs +POST http://localhost:4020/control/allocations +Content-Type: application/json +X-Auth-Signature: test +Accept: application/json + + +{ + "persistent_vms": [], + "instances": [ + ] +} + +``` + +## Sevctl +Most operations done by `sevctl` are implemented in [aleph-sdk-python](https://github.com/aleph-im/aleph-sdk-python), either by calling it, calling the relevant endpoint +or by reimplementing the functionality in python. Here is a primer in case you need to call it manually. + +### Install `sevctl` +If you are not taking the version from the debian package, you can install sevctl manually with cargo + +Requirements: + * `cargo` + +On Ubuntu/ Debian install it via `apt install cargo` (as root) + +To build and install sevctl +```cargo install sevctl``` + +Ensure $HOME/.cargo/bin is in your PATH to launch it manually. + +To configure which bin aleph-vm use, set the environment variable +``` +ALEPH_VM_SEV_CTL_PATH=/home/olivier/.cargo/bin/sevctl +``` + +Alternatively, `sevctl` can be build from `git` : ```cargo install --git https://github.com/virtee/sevctl``` + + +## Example Commands +## Generate session key +You can generate the sessions keys using sevctl +1. Export the platform key + `sudo sevctl export platform.pem` +2. Create the sessions files + `sevctl session platform.pem 0x1 dwdw` + +This will create the files `vm_godh.b64`, `vm_session.b64`, `vm_tek.bin`, `vm_tik.bin` in your current directory + +### Calculate measurement + +```shell +RUST_LOG=trace sevctl measurement build + --api-major 01 --api-minor 55 --build-id 24 --policy 1 + --tik ~/pycharm-aleph-sdk-python/decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca_tik.bin + --firmware /usr/share/ovmf/OVMF.fd + --nonce URQNqJAqh/2ep4drjx/XvA + ``` + +### Debug +To enable debugging log, set the environment variable +```env +RUST_LOG=trace +``` \ No newline at end of file diff --git a/doc/images/boot_process.drawio.png b/doc/images/boot_process.drawio.png new file mode 100644 index 000000000..a0e20f74e Binary files /dev/null and b/doc/images/boot_process.drawio.png differ diff --git a/doc/operator_auth.md b/doc/operator_auth.md new file mode 100644 index 000000000..b37b3e397 --- /dev/null +++ b/doc/operator_auth.md @@ -0,0 +1,181 @@ + Authentication protocol for VM owner +======================================= + +This custom protocol allows a user (owner of a VM) to securely authenticate to a CRN, using their Ethereum or Solana +wallet. This scheme was designed in a way that's convenient to be integrated into the console web page. + +It allows the user to control their VM. e.g: stop, reboot, view their log, etc. + +## Motivations + +This protocol ensures secure authentication between a blockchain wallet owner and an aleph.im compute node. + +Signing operations are typically gated by prompts requiring manual approval for each operation. With hardware wallets, +users are prompted both by the software on their device and the hardware wallet itself. + +## Overview + +The client generates a [JSON Web Key](https://www.rfc-editor.org/rfc/rfc7517) (JWK) key pair and signs the public key +with their Ethereum or Solana account. The signed public key is sent in the `X-SignedPubKey` header. The client also +signs the operation payload with the private JWK, sending it in the `X-SignedOperation` header. The server verifies both +the public key and payload signatures, ensuring the request's integrity and authenticity. If validation fails (e.g., +expired key or invalid signature), the server returns a 401 Unauthorized error. + + +## Authentication Method for HTTP Endpoints + +Two custom headers are added to each authenticated request: + +- **X-SignedPubKey**: This contains the public key and its associated metadata (such as the sender’s address, chain, and + expiration date), along with a signature that ensures its authenticity. +- **X-SignedOperation**: This includes the payload of the operation and its cryptographic signature, ensuring that the + operation itself has not been tampered with. + +### 1. Generate an ephemeral keys and Sign Public Key + +An ephemeral key pair (as JWK) is generated using elliptic curve cryptography (EC, P-256). + +The use of a temporary JWK key allows the user to delegate limited control to the console without needing to sign every +individual request with their Ethereum or Solana wallet. This is crucial for improving the user experience, as +constantly signing each operation would be cumbersome and inefficient. By generating a temporary key, the user can +provide permission for a set period of time (until the key expires), enabling the console to perform actions like +stopping or rebooting the VM on their behalf. This maintains security while streamlining interactions with the console, +as the server verifies each operation using the temporary key without requiring ongoing involvement from the user's +wallet. + +The generated public key is converted into a JSON structure with additional metadata: + +- **`pubkey`**: The public key information. +- **`alg`**: The signing algorithm, ECDSA. +- **`domain`**: The domain for which the key is valid. +- **`address`**: The wallet address of the sender, binding the temporary key to this identity. +- **`chain`**: Indicates the blockchain used for signing (`ETH` or `SOL`). Defaults to `ETH`. +- **`expires`**: The expiration time of the key. + +Example: + +```json +{ + "pubkey": { + "crv": "P-256", + "kty": "EC", + "x": "hbslLmhG3h2RwuzBYNVeQ7WCbU-tUzMjSpCFO2i5-tA", + "y": "KI4FJARKwyYcRy6xz1J9lu8OItV87Fw91eThe2hnnuc" + }, + "alg": "ECDSA", + "domain": "localhost", + "address": "0x8Dd070629F107e7946dD68BDcb8ABE8475F47B0E", + "chain": "ETH", + "expires": "2010-12-26T17:05:55Z" +} +``` + +This public key is signed using either the Ethereum or Solana account, depending on the `chain` parameter. The resulting +signature is combined with the public key into a payload and sent as the `X-SignedPubKey` header. + +### 2. Sign Operation Payload + +#### Operation Payload Format + +The operation payload is a JSON object that encapsulates the details of an API request. It ensures that the request's +integrity can be verified through signing. Below are the fields included: + +- **`time`**: (string, ISO 8601 format) The timestamp for when the operation is valid, including the timezone is mandatory (`Z` + indicates UTC). This helps prevent replay attacks (capturing the packet and replying it multiple time). e.g. `"2010-12-25T17:05:55Z"` +- **`method`**: (string) The HTTP method used for the operation (e.g., `GET`, `POST`). +- **`path`**: (string) The endpoint path of the request (e.g., `/`). +- **`domain`**: (string) The domain associated with the request. This ensures the request is valid for the intended + CRN. (e.g., `localhost`). + +Example: + +```json +{ + "time": "2010-12-25T17:05:55Z", + "method": "GET", + "path": "/", + "domain": "localhost" +} +``` + +It is sent serialized as a hex string. + +#### Signature + + +- The operation payload (containing details such as time, method, path, and domain) is JSON serialized and converted into a + hex string. +- The ephemeral key (private key) is used to sign this operation payload, ensuring its integrity. This signature is then included + in the `X-SignedOperation` header. + +### 3. Include Authentication Headers + +These two headers are to be added to the HTTP request: + +1. **`X-SignedPubKey` Header**: + - This header contains the public key payload and the signature of the public key generated by the Ethereum or + Solana account. + + Example: + + ```json + { + "payload": "", + "signature": "" + } + ``` + +2. **`X-SignedOperation` Header**: + - This header contains the operation payload and the signature of the operation payload generated using the private + JWK. + + Example: + + ```json + { + "payload": "", + "signature": "" + } + ``` + +### Expiration and Validation + +- The public key has an expiration date, ensuring that keys are not used indefinitely. +- Both the public key and the operation signature are validated for authenticity and integrity at the server side, + taking into account the specified blockchain (Ethereum or Solana). +- Requests failing verification or expired keys are rejected with `401 Unauthorized` status, providing an error message + indicating the reason. + +## WebSocket Authentication Protocol + +In the WebSocket variant of the authentication protocol, the client establishes a connection and authenticates through +an initial message that includes their Ethereum or Solana-signed identity, ensuring secure communication. + +Due to web browsers not allowing custom HTTP headers in WebSocket connections, the two headers are sent in one JSON +packet, under the `auth` key. + +Example authentication packet: + +```json +{ + "auth": { + "X-SignedPubKey": { + "payload": "7b227075626b6579223a207b22637276223a2022502d323536222c20226b7479223a20224543222c202278223a20223962446f34754949686b735a5272677a31477972325050656d4334364e735f4730577144364d4d6a774673222c202279223a20226f48343342786c7854334f3065733336685967713143372d61325a535a71456d5f6b56356e636c79667a59227d2c2022616c67223a20224543445341222c2022646f6d61696e223a20226c6f63616c686f7374222c202261646472657373223a2022307862413236623135333539314434363230666432413734304130463165463730644164363532336230222c202265787069726573223a2022323031302d31322d32365431373a30353a35355a227d", + "signature": "0xea99ef5f1a10f2d103f94dce4f8650730315246e6d15cf9e5862c11adfd6482703cd1ec684a4f3dffb36ae5c4a57b08a47108fe55e3b2454e45f6e63342e0f471b" + }, + "X-SignedOperation": { + "payload": "7b2274696d65223a2022323031302d31322d32355431373a30353a35355a222c20226d6574686f64223a2022474554222c202270617468223a20222f222c2022646f6d61696e223a20226c6f63616c686f7374227d", + "signature": "6f737654cd00e4d4155d387509978e7a9a4d27f5b59c9492ac1dec7b09f9aecc58c9365526bbddd6211b65f40f4956c50ab26f395f7170ce1698c11e28e25d3a" + } + } +} +``` + +If the authentication succeed the server will answer with +```json +{ + "status": "connected" +} +``` + +In case of failed auth the server will respond with await `{"status": "failed", "reason": "string describing the reason"})` and close the connexion diff --git a/docker/publish_vm_connector.sh b/docker/publish_vm_connector.sh new file mode 100755 index 000000000..4101400bf --- /dev/null +++ b/docker/publish_vm_connector.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -euf -o pipefail + +if hash docker 2> /dev/null +then + DOCKER_COMMAND=docker +else + DOCKER_COMMAND=podman +fi + +#VERSION=$(git describe --tags)-alpha +VERSION=alpha + +$DOCKER_COMMAND build -t alephim/vm-connector -f docker/vm_connector.dockerfile . + +$DOCKER_COMMAND tag alephim/vm-connector alephim/vm-connector:$VERSION +$DOCKER_COMMAND push alephim/vm-connector:$VERSION docker.io/alephim/vm-connector:$VERSION +echo docker.io/alephim/vm-connector:$VERSION diff --git a/docker/publish_vm_supervisor_dev.sh b/docker/publish_vm_supervisor_dev.sh new file mode 100755 index 000000000..69827f2de --- /dev/null +++ b/docker/publish_vm_supervisor_dev.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -euf -o pipefail + +if hash docker 2> /dev/null +then + DOCKER_COMMAND=docker +else + DOCKER_COMMAND=podman +fi + +#VERSION=$(git describe --tags)-alpha +VERSION=alpha + +$DOCKER_COMMAND build -t alephim/vm-supervisor-dev -f docker/vm_supervisor-dev.dockerfile . + +$DOCKER_COMMAND tag alephim/vm-supervisor-dev alephim/vm-supervisor-dev:$VERSION +$DOCKER_COMMAND push alephim/vm-supervisor-dev:$VERSION docker.io/alephim/vm-supervisor-dev:$VERSION +echo docker.io/alephim/vm-supervisor-dev:$VERSION diff --git a/docker/run_vm_connector.sh b/docker/run_vm_connector.sh index 500f604e3..fc2c60845 100755 --- a/docker/run_vm_connector.sh +++ b/docker/run_vm_connector.sh @@ -1,7 +1,19 @@ #!/bin/sh -docker build -t aleph-connector -f docker/vm_connector.dockerfile . -docker run -ti --rm -p 8000:8000/tcp \ - -v $(pwd)/kernels:/opt/kernels:ro \ - -v $(pwd)/vm_connector:/opt/vm_connector:ro \ - aleph-connector $@ +set -euf + +# Use Podman if installed, else use Docker +if hash podman 2> /dev/null +then + DOCKER_COMMAND=podman +else + DOCKER_COMMAND=docker +fi + +$DOCKER_COMMAND build -t aleph-connector -f docker/vm_connector.dockerfile . + +$DOCKER_COMMAND run -ti --rm -p 4021:4021/tcp \ + -v "$(pwd)/kernels:/opt/kernels:ro" \ + -v "$(pwd)/vm_connector:/opt/vm_connector:ro" \ + --name aleph-connector \ + aleph-connector "$@" diff --git a/docker/run_vm_supervisor.sh b/docker/run_vm_supervisor.sh new file mode 100755 index 000000000..00c558989 --- /dev/null +++ b/docker/run_vm_supervisor.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +set -euf + +# Use Podman if installed, else use Docker +if hash podman 2> /dev/null +then + DOCKER_COMMAND=podman +else + DOCKER_COMMAND=docker +fi + +$DOCKER_COMMAND build -t alephim/vm-supervisor-dev -f docker/orchestrator-dev.dockerfile . + +$DOCKER_COMMAND run -ti --rm \ + -v "$(pwd)/runtimes/aleph-debian-11-python/rootfs.squashfs:/opt/aleph-vm/runtimes/aleph-debian-11-python/rootfs.squashfs:ro" \ + -v "$(pwd)/examples/volumes/volume-venv.squashfs:/opt/aleph-vm/examples/volumes/volume-venv.squashfs:ro" \ + -v "$(pwd)/vm_supervisor:/opt/aleph-vm/vm_supervisor:ro" \ + -v "$(pwd)/firecracker:/opt/aleph-vm/firecracker:ro" \ + --device /dev/kvm \ + -p 4020:4020 \ + alephim/vm-supervisor-dev "$@" diff --git a/docker/vm_connector.dockerfile b/docker/vm_connector.dockerfile index 0d02c5560..3aebf7478 100644 --- a/docker/vm_connector.dockerfile +++ b/docker/vm_connector.dockerfile @@ -7,16 +7,9 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y \ RUN pip install fastapi aiofiles uvicorn aleph-client eth-account -COPY ./examples /opt/examples -COPY ./runtimes /opt/runtimes -COPY ./kernels /opt/kernels - -WORKDIR /opt/examples -RUN make - WORKDIR /opt ENV PYTHONPATH=/opt -EXPOSE 8000 +EXPOSE 4021 COPY ./vm_connector /opt/vm_connector -CMD ["uvicorn", "vm_connector.main:app", "--host", "0.0.0.0", "--reload"] +CMD ["uvicorn", "vm_connector.main:app", "--host", "0.0.0.0", "--port", "4021", "--reload"] diff --git a/docker/vm_supervisor-dev.dockerfile b/docker/vm_supervisor-dev.dockerfile new file mode 100644 index 000000000..3db28b836 --- /dev/null +++ b/docker/vm_supervisor-dev.dockerfile @@ -0,0 +1,50 @@ +# This is mainly a copy of the installation instructions from [orchestrator/README.md] + +FROM debian:bookworm + +RUN apt-get update && apt-get -y upgrade && apt-get install -y \ + sudo acl curl squashfs-tools git \ + python3 python3-aiohttp python3-alembic python3-msgpack python3-pip python3-aiodns python3-aioredis\ + python3-nftables python3-psutil python3-setproctitle python3-sqlalchemy python3-packaging python3-cpuinfo ndppd nftables \ + && rm -rf /var/lib/apt/lists/* + +RUN useradd jailman + +RUN mkdir /opt/firecracker +RUN chown $(whoami) /opt/firecracker +RUN curl -fsSL https://github.com/firecracker-microvm/firecracker/releases/download/v1.3.3/firecracker-v1.3.3-x86_64.tgz | tar -xz --no-same-owner --directory /opt/firecracker +RUN curl -fsSL -o /opt/firecracker/vmlinux.bin https://s3.amazonaws.com/spec.ccfc.min/img/quickstart_guide/x86_64/kernels/vmlinux.bin + +# Link binaries on version-agnostic paths: +RUN ln /opt/firecracker/release-*/firecracker-v* /opt/firecracker/firecracker +RUN ln /opt/firecracker/release-*/jailer-v* /opt/firecracker/jailer + +RUN pip3 install typing-extensions 'aleph-message==0.4.9' + +RUN mkdir -p /var/lib/aleph/vm/jailer + +ENV PYTHONPATH /mnt + +# Networking only works in privileged containers +ENV ALEPH_VM_ALLOW_VM_NETWORKING False +ENV ALEPH_VM_NETWORK_INTERFACE "tap0" +# Jailer does not work in Docker containers +ENV ALEPH_VM_USE_JAILER False +# Use fake test data +ENV ALEPH_VM_FAKE_DATA True +# Allow connections from host +ENV ALEPH_VM_SUPERVISOR_HOST "0.0.0.0" + +# Make it easy to enter this command from a shell script +RUN echo "python3 -m vm_supervisor --print-settings --very-verbose --system-logs --profile -f ./examples/example_fastapi" >> /root/.bash_history + +RUN mkdir /opt/aleph-vm/ +COPY ./vm_supervisor /opt/aleph-vm/vm_supervisor +COPY ./firecracker /opt/aleph-vm/firecracker +COPY ./guest_api /opt/aleph-vm/guest_api +COPY ./examples /opt/aleph-vm/examples +COPY ./runtimes /opt/aleph-vm/runtimes + +WORKDIR /opt/aleph-vm + +CMD "bash" diff --git a/examples/Makefile b/examples/Makefile index 791ebbb7a..b148ee309 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -1,11 +1,16 @@ -all: example_fastapi_2.zip data.tgz +all: example_fastapi.zip data.tgz clean: - rm example_fastapi_2.zip + rm example_fastapi.zip rm data.tgz -example_fastapi_2.zip: - zip -r example_fastapi_2.zip example_fastapi_2 +example_fastapi.zip: + zip -r example_fastapi.zip example_fastapi data.tgz: tar -cvzf data.tgz data + +example_pip.squashfs: + rm -fr /opt/python + pip3 install -t /opt/requirements -r example_pip/requirements.txt + mksquashfs /opt/requirements requirements.squashfs diff --git a/examples/README.md b/examples/README.md index 269961af2..e76f3986d 100644 --- a/examples/README.md +++ b/examples/README.md @@ -6,11 +6,11 @@ on [FastAPI](https://fastapi.tiangolo.com/). ## Initial setup Let's start by creating a package for our app: -Create a directory named `example_fastapi_2` +Create a directory named `example_fastapi` and an empty file named `__init__.py` file within the directory. ``` -example_fastapi_2/ -example_fastapi_2/__init__.py +example_fastapi/ +example_fastapi/__init__.py ``` The copy the example from the FastAPI tutorial in `__init__.py`: @@ -23,7 +23,7 @@ app = FastAPI() @app.get("/") -def read_root(): +def index(): return {"Hello": "World"} @@ -41,7 +41,7 @@ Uvicorn is used to run ASGI compatible web applications, such as the `app` web application from the example above. You need to specify it the name of the Python module to use and the name of the app: ```shell -uvicorn example_fastapi_2:app --reload +uvicorn example_fastapi:app --reload ``` Then open the app in a web browser on http://localhost:8000 @@ -59,7 +59,7 @@ To achieve this, we need to follow the following steps: ### 1. Create a zip archive containing the app ```shell -zip -r example_fastapi_2.zip example_fastapi_2 +zip -r example_fastapi.zip example_fastapi ``` ### 2. Store the zip archive on Aleph @@ -80,4 +80,4 @@ Update the `entrypoint` field according to your app if necessary. Open the HTTP interface of a node running the VM Supervisor: -http://ip-of-supervisor:8080/vm/function/{message_hash}/ +http://ip-of-supervisor:4020/vm/{message_hash}/ diff --git a/examples/confidential_instance_message_from_aleph.json b/examples/confidential_instance_message_from_aleph.json new file mode 100644 index 000000000..30cf7ee7a --- /dev/null +++ b/examples/confidential_instance_message_from_aleph.json @@ -0,0 +1,55 @@ +{ + "chain": "ETH", + "item_hash": "fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-hash", + "sender": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "type": "INSTANCE", + "channel": "Fun-dApps", + "confirmed": true, + "content": { + "address": "0xE0178501683a4C321cAE8263839F349e0f07dECd", + "allow_amend": false, + "variables": { + "VM_CUSTOM_NUMBER": "32" + }, + "environment": { + "hypervisor": "qemu", + "reproducible": true, + "internet": true, + "aleph_api": true, + "shared_cache": true, + "trusted_execution": { + "firmware": "88978bb4c2ff54400ce5f51c3a109e1af1ab03d1ea4409666917317ac513846b", + "policy": 1 + } + }, + "resources": { + "vcpus": 1, + "memory": 512, + "seconds": 30 + }, + "rootfs": { + "parent": { + "ref": "549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613", + "use_latest": true + }, + "persistence": "host", + "size_mib": 5000 + }, + "authorized_keys": [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDj95BHGUx0/z2G/tTrEi8o49i70xvjcEUdSs3j4A33jE7pAphrfRVbuFMgFubcm8n9r5ftd/H8SjjTL4hY9YvWV5ZuMf92GUga3n4wgevvPlBszYZCy/idxFl0vtHYC1CcK9v4tVb9onhDt8FOJkf2m6PmDyvC+6tl6LwoerXTeeiKr5VnTB4KOBkammtFmix3d1X1SZd/cxdwZIHcQ7BNsqBm2w/YzVba6Z4ZnFUelBkQtMQqNs2aV51O1pFFqtZp2mM71D5d8vn9pOtqJ5QmY5IW6NypcyqKJZg5o6QguK5rdXLkc7AWro27BiaHIENl3w0wazp9EDO9zPAGJ6lz olivier@lanius" + ], + "time": 1619017773.8950517 + }, + "item_content": "{\"address\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"allow_amend\":false,\"variables\":{\"VM_CUSTOM_NUMBER\":\"32\"},\"environment\":{\"reproducible\":true,\"internet\":true,\"aleph_api\":true,\"shared_cache\":true},\"resources\":{\"vcpus\":1,\"memory\":128,\"seconds\":30},\"rootfs\":{\"parent\":{\"ref\":\"549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613\",\"use_latest\":true},\"persistence\":\"host\",\"size_mib\":20000},\"cloud_config\":{\"password\":\"password\",\"chpasswd\":{\"expire\":\"False\"}},\"volumes\":[{\"mount\":\"/opt/venv\",\"ref\":\"5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51\",\"use_latest\":false},{\"comment\":\"Working data persisted on the VM supervisor, not available on other nodes\",\"mount\":\"/var/lib/example\",\"name\":\"data\",\"persistence\":\"host\",\"size_mib\":5}],\"replaces\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"time\":1619017773.8950517}", + "item_type": "inline", + "signature": "0x372da8230552b8c3e65c05b31a0ff3a24666d66c575f8e11019f62579bf48c2b7fe2f0bbe907a2a5bf8050989cdaf8a59ff8a1cbcafcdef0656c54279b4aa0c71b", + "size": 749, + "time": 1619017773.8950577, + "confirmations": [ + { + "chain": "ETH", + "height": 12284734, + "hash": "0x67f2f3cde5e94e70615c92629c70d22dc959a118f46e9411b29659c2fce87cdc" + } + ] +} diff --git a/examples/example_confidential_image/README.md b/examples/example_confidential_image/README.md new file mode 100644 index 000000000..0e91e6949 --- /dev/null +++ b/examples/example_confidential_image/README.md @@ -0,0 +1,79 @@ +# Create an encrypted VM image +Theses samples scripts create an encrypted VM image suitable be used for confidential computing. + +They will create an encrypted partition, a boot partition and the necessary initramfs to decrypt the partition. The created image is designed to work in tandem with the custom OVMF found in `runtimes/ovmf` which can receive the decryption key in a secure channel via QMP and pass it to grub to decrypt the disk. + +You can customise your VM by modifying the `setup_debian_rootfs.sh` script and adding your instructions at the end. This script is run "inside" the VM chroot. For examples: add your user, ssh key or install additional software. + + +## Procedure to create the image +### Requirements +* guestmount +* parted +* cryptsetup + +On debian they can be installed via their respective packages : +`apt install guestmount parted cryptsetup` + +### Procure a debian image +Your image need to have cloud-init installed in it for the network setup. It is recommended to start from the genericcloud image. Experiment with using the nocloud image then installing cloud-init have failed to work. + +```shell +wget https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2 +``` + +### Extract the root filesystem +To do so, we simply need to mount the raw image with `guestmount`. + +> Make sure that you stop the VM before exporting the root filesystem. + +```shell +sudo mkdir -p /mnt/debian +sudo guestmount \ + --format=qcow2 \ + -a ./debian-12-genericcloud-amd64.qcow2 \ + -o allow_other \ + -i /mnt/debian +``` + +Then, you can simply copy the root file system to any directory, take caution to preserve the proper permission like the setuid bit with the --archive option. + +```shell +export ROOT_DIR=./extracted +mkdir ${ROOT_DIR} +sudo cp --archive /mnt/debian/* ${ROOT_DIR} +``` + +Clean up the mount +```shell +sudo guestunmount /mnt/debian +sudo rmdir /mnt/debian +``` + + +Run the build_debian_image.sh that will create the image with the encrypted disk +> This script will require sudo for certain commands + +The password option is the *secret* password key, with which the disk will be encrypted, you will need to pass it to launch the VM. + +```shell +bash ./build_debian_image.sh --rootfs-dir $ROOT_DIR -o ~/destination-image.img --password your-password +``` + +> Tip: To debug the image creation, pass the `-x` option to bash in front of the script name + +## To test and further customise you image you can also boot it inside qemu +```shell +sudo qemu-system-x86_64 \ + -drive format=raw,file= \ + -enable-kvm \ + -m 2048 \ + -nic user,model=virtio \ + -nographic \ + -serial mon:stdio \ + -drive if=pflash,format=raw,unit=0,file=/usr/share/ovmf/OVMF.fd,readonly=on + ``` + +> Once you have entered your password you might have to wait a minute or so for the disk to decrypt and boot. + +To exit qemu : press Ctrl a, x and then [Enter] diff --git a/examples/example_confidential_image/build_debian_image.sh b/examples/example_confidential_image/build_debian_image.sh new file mode 100644 index 000000000..c7a928dff --- /dev/null +++ b/examples/example_confidential_image/build_debian_image.sh @@ -0,0 +1,159 @@ +#!/bin/bash + +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +ROOTFS_DIR="" +IMAGE_SIZE="4GB" +IMAGE_FILE="" +MAPPER_NAME="cr_root" +LOOP_DEVICE_ID="" +MAPPED_DEVICE_ID="" +MOUNT_POINT="" +CLEANUP_DONE=false + +cleanup() { + if [ "$CLEANUP_DONE" = true ]; then + return + fi + CLEANUP_DONE=true + if mountpoint -q "${MOUNT_POINT}"; then + sudo umount --recursive "${MOUNT_POINT}" || echo "Failed to unmount ${MOUNT_POINT}" + fi + if [ -n "${MAPPED_DEVICE_ID}" ]; then + sudo cryptsetup close "${MAPPED_DEVICE_ID}" || echo "Failed to close encrypted device ${MAPPED_DEVICE_ID}" + fi + if [ -n "${LOOP_DEVICE_ID}" ]; then + sudo losetup -d "${LOOP_DEVICE_ID}" || echo "Failed to detach loop device ${LOOP_DEVICE_ID}" + fi + if [ -f "${KEY_FILE}" ]; then + rm -f "${KEY_FILE}" || echo "Failed to remove key file ${KEY_FILE}" + fi +} + + +# Trap command to catch and handle various signals: +# - EXIT: Triggered when the script exits (normal completion or an error). +# - HUP (SIGHUP): Signal 1, sent when the controlling terminal is closed (e.g., terminal window closed or SSH session logout). +# - INT (SIGINT): Signal 2, sent when the user interrupts the process (e.g., pressing Ctrl+C). +# - QUIT (SIGQUIT): Signal 3, sent when the user requests the process to quit and perform a core dump (e.g., pressing Ctrl+\). +# - PIPE (SIGPIPE): Signal 13, sent when attempting to write to a pipe without a reader (e.g., in scripts using pipelines if a command in the pipeline exits prematurely). +# - TERM (SIGTERM): Signal 15, sent by the kill command to request the process to terminate gracefully. +trap cleanup EXIT HUP INT QUIT PIPE TERM + +error_handler() { + echo "" + echo "An error occured while building the image and the process was not completed properly." + echo "Please check the log, fix any error if required and restart the script." + echo "For more help see https://docs.aleph.im/computing/confidential/encrypted-disk/" +} + +trap error_handler ERR + +usage() { + cat <&2 +Usage: + $0 --rootfs-dir ROOTFS_DIR [--image-size IMAGE_SIZE] [--password DISK_PASSWORD] [--mapper-name MAPPER_NAME] + -o IMAGE_FILE | --output IMAGE_FILE Image file to use. Defaults to ".img." + -p DISK_PASSWORD | --password=DISK_PASSWORD Password to use for the encrypted disk. Automatically generated if not specified. + -r ROOTFS_DIR | --rootfs-dir=ROOTFS_DIR Directory containing the original rootfs. + -s IMAGE_SIZE | --image-size IMAGE_SIZE Size of the target image, ex: 20GB. Defaults to 4GB. + -m MAPPER_NAME | --mapper-name=MAPPER_NAME Device mapped name for encrypted disk. Default to "cr_root" if not specified. +USAGE +} + +while true; do + case "$1" in + -o | --output) + IMAGE_FILE=$2 + shift 2 + ;; + -p | --password) + DISK_PASSWORD=$2 + shift 2 + ;; + -r | --rootfs-dir) + ROOTFS_DIR=$2 + shift 2 + ;; + -s | --image-size) + IMAGE_SIZE=$2 + shift 2 + ;; + -m | --mapper-name) + MAPPER_NAME=$2 + shift 2 + ;; + *) + break + ;; + esac +done + +if [ -z "${ROOTFS_DIR}" ]; then + usage + exit 1 +fi + +if [ -z "${DISK_PASSWORD}" ]; then + echo "No disk password provided. Generating one..." + DISK_PASSWORD=$( + tr "${KEY_FILE}" + +sudo cryptsetup --batch-mode --type luks1 --key-file "${KEY_FILE}" luksFormat "${OS_PARTITION_DEVICE_ID}" +sudo cryptsetup open --key-file "${KEY_FILE}" "${OS_PARTITION_DEVICE_ID}" "${MAPPER_NAME}" +sudo mkfs.ext4 "${MAPPED_DEVICE_ID}" + +echo "Copying root file system to the new OS partition..." +sudo mkdir -p "${MOUNT_POINT}" +sudo mount "${MAPPED_DEVICE_ID}" "${MOUNT_POINT}" +sudo cp --archive "${ROOTFS_DIR}"/* "${MOUNT_POINT}" + +echo "Configuring root file system..." +for m in run sys proc dev; do sudo mount --bind /$m ${MOUNT_POINT}/$m; done +sudo cp "${SCRIPT_DIR}/setup_debian_rootfs.sh" "${KEY_FILE}" "${MOUNT_POINT}" +sudo chroot "${MOUNT_POINT}" bash setup_debian_rootfs.sh --loop-device-id "${LOOP_DEVICE_ID}" --mapper-name "${MAPPER_NAME}" +sudo rm "${MOUNT_POINT}/setup_debian_rootfs.sh" "${KEY_FILE}" + +cleanup + +echo "Done! The new image is available as ${IMAGE_FILE}." +echo "Disk password: ${DISK_PASSWORD}" diff --git a/examples/example_confidential_image/setup_debian_rootfs.sh b/examples/example_confidential_image/setup_debian_rootfs.sh new file mode 100644 index 000000000..ff732dfd8 --- /dev/null +++ b/examples/example_confidential_image/setup_debian_rootfs.sh @@ -0,0 +1,132 @@ +#! /bin/bash +# This script sets up the Debian root file system to boot from an encrypted OS partition. +# In details: +# * Configure crypttab to add a second key to the OS partition to make the kernel unlock +# the partition by itself without requiring user input +# * Configure /etc/fstab to point to the correct devices +# * Regenerate Grub in removable so that the only unencrypted script just points to +# the Grub scripts inside the encrypted partition +# * Update the initramfs to take the modifications to the config files into account. + +set -eo pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +LOOP_DEVICE_ID="" +MAPPER_NAME="" + +usage() +{ + cat << USAGE >&2 +Usage: + $0 --loop-device LOOP_DEVICE_ID [--mapper-name MAPPER_NAME] + -d LOOP_DEVICE_ID | --loop-device-id=LOOP_DEVICE_ID Device ID of the disk image. + -m MAPPER_NAME | --mapper-name=MAPPER_NAME Device mapped name for encrypted disk. Automatically set to "cr_root" if not specified. +USAGE +} + +while test -n "$1"; do + case "$1" in + -d | --loop-device-id) + LOOP_DEVICE_ID=$2 + shift 2 + ;; + -p | --mapper-name) + MAPPER_NAME=$2 + shift 2 + ;; + esac +done + +if [ -z "${LOOP_DEVICE_ID}" ]; then + usage + exit 1 +fi + +if [ -z "${MAPPER_NAME}" ]; then + MAPPER_NAME=cr_root +fi + +# Temporary tmp is needed for apt +mount -t tmpfs -o size=100M tmpfs /tmp +# Install crypsetup and openssh +DEBIAN_FRONTEND=noninteractive apt update +DEBIAN_FRONTEND=noninteractive apt install -y -f openssh-server openssh-client cryptsetup cryptsetup-initramfs + +# The original password of the OS partition. Must be provided by the caller of the script. +BOOT_KEY_FILE="${SCRIPT_DIR}/os_partition.key" + +BOOT_PARTITION_DEVICE_ID="${LOOP_DEVICE_ID}p1" +OS_PARTITION_DEVICE_ID="${LOOP_DEVICE_ID}p2" + +BOOT_PARTITION_UUID=$(blkid --match-tag=UUID --output=value "${BOOT_PARTITION_DEVICE_ID}" ) +OS_PARTITION_UUID=$(blkid --match-tag=UUID --output=value "${OS_PARTITION_DEVICE_ID}" ) + +MAPPED_DEVICE_ID="/dev/mapper/${MAPPER_NAME}" + +# Create key file to unlock the disk at boot +mkdir -p /etc/cryptsetup-keys.d +KEY_FILE="/etc/cryptsetup-keys.d/luks-${OS_PARTITION_UUID}.key" +dd if=/dev/urandom bs=1 count=33|base64 -w 0 > "${KEY_FILE}" +chmod 0600 "${KEY_FILE}" +cryptsetup \ + --key-slot 1 \ + --iter-time 1 \ + --key-file "${BOOT_KEY_FILE}" \ + luksAddKey "${OS_PARTITION_DEVICE_ID}" \ + "${KEY_FILE}" + +# Tell the kernel to look for keys in /etc/cryptsetup-keys.d +echo "KEYFILE_PATTERN=\"/etc/cryptsetup-keys.d/*\"" >>/etc/cryptsetup-initramfs/conf-hook + +# Reduce the accessibility of the initramfs +echo "UMASK=0077" >> /etc/initramfs-tools/initramfs.conf + +# Configure Grub and crypttab +echo "GRUB_ENABLE_CRYPTODISK=y" >> /etc/default/grub +echo 'GRUB_PRELOAD_MODULES="luks cryptodisk lvm ext2"' >> /etc/default/grub +echo "${MAPPER_NAME} UUID=${OS_PARTITION_UUID} ${KEY_FILE} luks" >> /etc/crypttab +cat << EOF > /etc/fstab +${MAPPED_DEVICE_ID} / ext4 rw,discard,errors=remount-ro 0 1 +UUID=${BOOT_PARTITION_UUID} /boot/efi vfat defaults 0 0 +EOF + +# Install Grub and regenerate grub.cfg +mount /boot/efi +grub-install --target=x86_64-efi --removable +grub-install --target=x86_64-efi --recheck +grub-mkconfig -o /boot/grub/grub.cfg +umount /boot/efi + +# Force Grub config to use a crypt device +sed -i "s+root=PARTUUID= +cryptdevice=UUID=${OS_PARTITION_UUID}:${MAPPER_NAME} root=${MAPPED_DEVICE_ID} +g" /boot/grub/grub.cfg + +# Update initramfs after changes to fstab and crypttab +update-initramfs -u + +# Generate system SSH keys +ssh-keygen -A + +### Example to add a user with sudo right +#USER="username" +#PASSWORD="password" +#SSH_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArQslTrAf9A... user@example.com" + +## Create a new user with a home directory and Bash shell +#useradd -m -s /bin/bash "$USER" +# +## Set the user's password +#echo "$USER:$PASSWORD" | chpasswd +# +## Add the user to the sudo group +#usermod -aG sudo "$USER" +# +## Install ssh key +#USER_HOME="/home/$USER" +#mkdir -p "$USER_HOME/.ssh" +#chmod 700 "$USER_HOME/.ssh" +#echo "$SSH_KEY" >> "$USER_HOME/.ssh/authorized_keys" +#chmod 600 "$USER_HOME/.ssh/authorized_keys" +#chown -R $USER:$USER "$USER_HOME/.ssh" + +### END example +umount /tmp \ No newline at end of file diff --git a/examples/example_django/blog/__init__.py b/examples/example_django/blog/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/example_django/blog/admin.py b/examples/example_django/blog/admin.py new file mode 100644 index 000000000..54601b564 --- /dev/null +++ b/examples/example_django/blog/admin.py @@ -0,0 +1,6 @@ +from django.contrib import admin + +from .models import Article, Comment + +admin.site.register(Article) +admin.site.register(Comment) diff --git a/examples/example_django/blog/apps.py b/examples/example_django/blog/apps.py new file mode 100644 index 000000000..6be26c734 --- /dev/null +++ b/examples/example_django/blog/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class BlogConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "blog" diff --git a/examples/example_django/blog/fixtures/default_articles.json b/examples/example_django/blog/fixtures/default_articles.json new file mode 100644 index 000000000..b9a135fb2 --- /dev/null +++ b/examples/example_django/blog/fixtures/default_articles.json @@ -0,0 +1,11 @@ +[ + { + "model": "blog.article", + "pk": "f115d067-f6c9-4532-a140-40c51f37a1bc", + "fields": { + "date": "2021-07-02T13:33:03Z", + "title": "Something different", + "body": "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum." + } + } +] diff --git a/examples/example_django/blog/forms.py b/examples/example_django/blog/forms.py new file mode 100644 index 000000000..e07413eff --- /dev/null +++ b/examples/example_django/blog/forms.py @@ -0,0 +1,11 @@ +from django import forms +from django.forms import ModelForm + +from .models import Comment + + +class CommentForm(ModelForm): + class Meta: + model = Comment + fields = ["text", "article"] + widgets = {"article": forms.HiddenInput()} diff --git a/examples/example_django/blog/migrations/0001_initial.py b/examples/example_django/blog/migrations/0001_initial.py new file mode 100644 index 000000000..d8dacd8dc --- /dev/null +++ b/examples/example_django/blog/migrations/0001_initial.py @@ -0,0 +1,37 @@ +# Generated by Django 3.2.4 on 2021-07-02 09:35 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + initial = True + + dependencies = [] + + operations = [ + migrations.CreateModel( + name="Article", + fields=[ + ("date", models.DateTimeField(auto_created=True)), + ("id", models.UUIDField(primary_key=True, serialize=False)), + ( + "title", + models.CharField(help_text="Title of the blog article", max_length=256), + ), + ("body", models.TextField(help_text="Body of the blog article")), + ], + ), + migrations.CreateModel( + name="Comment", + fields=[ + ("date", models.DateTimeField(auto_created=True, auto_now_add=True)), + ("id", models.UUIDField(primary_key=True, serialize=False)), + ("text", models.CharField(max_length=1024)), + ( + "article", + models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="blog.article"), + ), + ], + ), + ] diff --git a/examples/example_django/blog/migrations/0002_auto_20210702_1331.py b/examples/example_django/blog/migrations/0002_auto_20210702_1331.py new file mode 100644 index 000000000..d5c813fb6 --- /dev/null +++ b/examples/example_django/blog/migrations/0002_auto_20210702_1331.py @@ -0,0 +1,29 @@ +# Generated by Django 3.2.4 on 2021-07-02 13:31 + +import uuid + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("blog", "0001_initial"), + ] + + operations = [ + migrations.AlterField( + model_name="article", + name="id", + field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False), + ), + migrations.AlterField( + model_name="comment", + name="date", + field=models.DateTimeField(auto_now_add=True), + ), + migrations.AlterField( + model_name="comment", + name="id", + field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False), + ), + ] diff --git a/examples/example_django/blog/migrations/__init__.py b/examples/example_django/blog/migrations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/example_django/blog/models.py b/examples/example_django/blog/models.py new file mode 100644 index 000000000..b60094096 --- /dev/null +++ b/examples/example_django/blog/models.py @@ -0,0 +1,23 @@ +import uuid + +from django.db import models + + +class Article(models.Model): + id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) + title = models.CharField(max_length=256, help_text="Title of the blog article") + body = models.TextField(help_text="Body of the blog article") + date = models.DateTimeField(auto_created=True) + + def __str__(self): + return f"Blog article '{self.title}'" + + +class Comment(models.Model): + id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) + text = models.CharField(max_length=1024) + article = models.ForeignKey(to=Article, on_delete=models.CASCADE) + date = models.DateTimeField(auto_now_add=True, editable=False) + + def __str__(self): + return f"Comment on {self.article.title}" diff --git a/examples/example_django/blog/templates/blog/article_list.html b/examples/example_django/blog/templates/blog/article_list.html new file mode 100644 index 000000000..362732ea2 --- /dev/null +++ b/examples/example_django/blog/templates/blog/article_list.html @@ -0,0 +1,62 @@ + + + + My Django Blog + + + +

My Django Blog

+ +{% for article in object_list %} +
+

{{ article.title }}

+
Published on
+

+ {{ article.body }} +

+
+ {% for comment in article.comment_set.all %} +

{{ comment.text }}

+ {% endfor %} + +
+ {% csrf_token %} + {{ form }} + + +
+
+
+{% empty %} +
  • No articles yet.
  • +{% endfor %} + + + + diff --git a/examples/example_django/blog/templates/blog/comment.html b/examples/example_django/blog/templates/blog/comment.html new file mode 100644 index 000000000..58409efe3 --- /dev/null +++ b/examples/example_django/blog/templates/blog/comment.html @@ -0,0 +1,5 @@ +
    + {% csrf_token %} + {{ form }} + +
    diff --git a/examples/example_django/blog/urls.py b/examples/example_django/blog/urls.py new file mode 100644 index 000000000..1d5ef5927 --- /dev/null +++ b/examples/example_django/blog/urls.py @@ -0,0 +1,10 @@ +from django.urls import path +from django.views.decorators.csrf import csrf_exempt + +from .views import ArticleListView, CommentFormView, test_view + +urlpatterns = [ + path("", ArticleListView.as_view(), name="article-list"), + path("comment", csrf_exempt(CommentFormView.as_view()), name="comment"), + path("post", csrf_exempt(test_view), name="test-post"), +] diff --git a/examples/example_django/blog/views.py b/examples/example_django/blog/views.py new file mode 100644 index 000000000..1e3ad6d08 --- /dev/null +++ b/examples/example_django/blog/views.py @@ -0,0 +1,23 @@ +from django.http import JsonResponse +from django.views.generic import CreateView, ListView + +from .forms import CommentForm +from .models import Article + + +class ArticleListView(ListView): + model = Article + ordering = "-date" + + extra_context = {"form": CommentForm} + + +class CommentFormView(CreateView): + template_name = "blog/comment.html" + form_class = CommentForm + success_url = "/" + + +def test_view(request): + print(request.POST) + return JsonResponse(request.POST) diff --git a/examples/example_django/example_django/__init__.py b/examples/example_django/example_django/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/example_django/example_django/asgi.py b/examples/example_django/example_django/asgi.py new file mode 100644 index 000000000..feac32c9f --- /dev/null +++ b/examples/example_django/example_django/asgi.py @@ -0,0 +1,20 @@ +""" +ASGI config for example_django project. + +It exposes the ASGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ +""" + +import os + +from django.core.asgi import get_asgi_application + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings") + +application = get_asgi_application() + +os.system("/usr/bin/python3 /opt/code/manage.py migrate") + +os.system("/usr/bin/python3 /opt/code/manage.py loaddata /opt/code/blog/fixtures/default_articles.json") diff --git a/examples/example_django/example_django/settings.py b/examples/example_django/example_django/settings.py new file mode 100644 index 000000000..18238fdd3 --- /dev/null +++ b/examples/example_django/example_django/settings.py @@ -0,0 +1,139 @@ +""" +Django settings for example_django project. + +Generated by 'django-admin startproject' using Django 3.2.4. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/3.2/ref/settings/ +""" + +import os.path +from pathlib import Path + +# Build paths inside the project like this: BASE_DIR / 'subdir'. +BASE_DIR = Path(__file__).resolve().parent.parent + + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = "django-insecure-1r3v1fc$q%sqy)0#bybc4pd##g+!tpm%+4^5opqyu93o0hqk$w" + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +ALLOWED_HOSTS = ["127.0.0.1", "vm.demo.okeso.fr", "test.vm.demo.okeso.fr"] + + +# Application definition + +INSTALLED_APPS = [ + "django.contrib.admin", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.sessions", + "django.contrib.messages", + "django.contrib.staticfiles", + "blog", +] + +MIDDLEWARE = [ + "django.middleware.security.SecurityMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", +] + +ROOT_URLCONF = "example_django.urls" + +TEMPLATES = [ + { + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": [], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", + ], + }, + }, +] + +WSGI_APPLICATION = "example_django.wsgi.application" + + +# Database +# https://docs.djangoproject.com/en/3.2/ref/settings/#databases + +if os.path.isdir("/var/lib/sqlite"): + # Inside Aleph VM + DATABASES = { + "default": { + "ENGINE": "django.db.backends.sqlite3", + "NAME": "/var/lib/sqlite/db.sqlite3", + } + } +else: + # On developer setup + DATABASES = { + "default": { + "ENGINE": "django.db.backends.sqlite3", + "NAME": BASE_DIR / "db.sqlite3", + } + } + + +# Password validation +# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", + }, +] + + +# Internationalization +# https://docs.djangoproject.com/en/3.2/topics/i18n/ + +LANGUAGE_CODE = "en-us" + +TIME_ZONE = "UTC" + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/3.2/howto/static-files/ + +STATIC_URL = "https://ipfs.io/ipfs/QmUhm7UWrGrjoJY5cVZ9ur9PtT7nHzdmXJuNpD8s7VLcJR/" + +STATIC_ROOT = os.path.join(BASE_DIR, "static") + +# Default primary key field type +# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field + +DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField" diff --git a/examples/example_django/example_django/urls.py b/examples/example_django/example_django/urls.py new file mode 100644 index 000000000..82915b927 --- /dev/null +++ b/examples/example_django/example_django/urls.py @@ -0,0 +1,23 @@ +"""example_django URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/3.2/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +""" + +from django.contrib import admin +from django.urls import include, path + +urlpatterns = [ + path("", include("blog.urls")), + path("admin/", admin.site.urls), +] diff --git a/examples/example_django/example_django/wsgi.py b/examples/example_django/example_django/wsgi.py new file mode 100644 index 000000000..35cccb4bf --- /dev/null +++ b/examples/example_django/example_django/wsgi.py @@ -0,0 +1,16 @@ +""" +WSGI config for example_django project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ +""" + +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings") + +application = get_wsgi_application() diff --git a/examples/example_django/manage.py b/examples/example_django/manage.py new file mode 100755 index 000000000..bcc50390c --- /dev/null +++ b/examples/example_django/manage.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +"""Django's command-line utility for administrative tasks.""" + +import os +import sys + + +def main(): + """Run administrative tasks.""" + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings") + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == "__main__": + main() diff --git a/examples/example_fastapi/README.md b/examples/example_fastapi/README.md new file mode 100644 index 000000000..231ce255b --- /dev/null +++ b/examples/example_fastapi/README.md @@ -0,0 +1,6 @@ +Publish using: + +```shell + aleph program upload ../aleph-vm/examples/example_fastapi main:app \ + --persistent-volume "persistence=host,size_mib=1,mount=/var/lib/example,name=increment-storage,comment=Persistence" +``` diff --git a/examples/example_fastapi/main.py b/examples/example_fastapi/main.py new file mode 100644 index 000000000..9801dfb41 --- /dev/null +++ b/examples/example_fastapi/main.py @@ -0,0 +1,461 @@ +import asyncio +import json +import logging +import os +import socket +import subprocess +import sys +from datetime import datetime, timezone +from os import listdir +from pathlib import Path +from typing import Any, Optional + +import aiohttp +from aleph_message.models import ( + MessagesResponse, + PostMessage, + ProgramMessage, + StoreMessage, +) +from aleph_message.status import MessageStatus +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import PlainTextResponse +from pip._internal.operations.freeze import freeze +from pydantic import BaseModel, HttpUrl +from starlette.responses import JSONResponse + +from aleph.sdk.chains.remote import RemoteAccount +from aleph.sdk.client import AlephHttpClient, AuthenticatedAlephHttpClient +from aleph.sdk.query.filters import MessageFilter +from aleph.sdk.types import StorageEnum +from aleph.sdk.vm.app import AlephApp +from aleph.sdk.vm.cache import VmCache + +logger = logging.getLogger(__name__) +logger.debug("imports done") + +http_app = FastAPI() +app = AlephApp(http_app=http_app) +app.add_middleware( + CORSMiddleware, + allow_credentials=True, + allow_origins=["*"], + allow_methods=["*"], + allow_headers=["*"], +) +cache = VmCache() + +startup_lifespan_executed: bool = False + + +@app.on_event("startup") +async def startup_event() -> None: + global startup_lifespan_executed + startup_lifespan_executed = True + + +@app.get("/") +async def index() -> dict[str, Any]: + if os.path.exists("/opt/venv"): + opt_venv = list(listdir("/opt/venv")) + else: + opt_venv = [] + return { + "Example": "example_fastapi", + "endpoints": [ + # Features + "/lifespan", + "/environ", + "/state/increment", + "/wait-for/{delay}", + # Local cache + "/cache/get/{key}", + "/cache/set/{key}/{value}", + "/cache/remove/{key}", + "/cache/keys", + # Networking + "/dns", + "/ip/address", + "/ip/4", + "/ip/6", + "/internet", + # Error handling + "/raise", + "/crash", + # Aleph.im + "/messages", + "/get_a_message", + "/post_a_message", + "/post_a_message_local_account", + "/post_a_file", + "/sign_a_message", + # Platform properties + "/platform/os", + "/platform/python", + "/platform/pip-freeze", + ], + "files_in_volumes": { + "/opt/venv": opt_venv, + }, + } + + +@app.get("/lifespan") +async def check_lifespan(): + """ + Check that ASGI lifespan startup signal has been received + """ + return {"Lifespan": startup_lifespan_executed} + + +@app.get("/environ") +async def environ() -> dict[str, str]: + """List environment variables""" + return dict(os.environ) + + +@app.get("/messages") +async def read_aleph_messages() -> dict[str, MessagesResponse]: + """Read data from Aleph using the Aleph Client library.""" + async with AlephHttpClient() as client: + message_filter = MessageFilter(hashes=["f246f873c3e0f637a15c566e7a465d2ecbb83eaa024d54ccb8fb566b549a929e"]) + data = await client.get_messages(message_filter=message_filter) + return {"Messages": data} + + +@app.get("/dns") +async def resolve_dns_hostname(): + """Check if DNS resolution is working.""" + hostname = "example.org" + ipv4: Optional[str] = None + ipv6: Optional[str] = None + + info = socket.getaddrinfo(hostname, 80, proto=socket.IPPROTO_TCP) + if not info: + logger.error("DNS resolution failed") + + # Iterate over the results to find the IPv4 and IPv6 addresses they may not all be present. + # The function returns a list of 5-tuples with the following structure: + # (family, type, proto, canonname, sockaddr) + for info_tuple in info: + if info_tuple[0] == socket.AF_INET: + ipv4 = info_tuple[4][0] + elif info_tuple[0] == socket.AF_INET6: + ipv6 = info_tuple[4][0] + + if ipv4 and not ipv6: + logger.warning(f"DNS resolution for {hostname} returned only an IPv4 address") + elif ipv6 and not ipv4: + logger.warning(f"DNS resolution for {hostname} returned only an IPv6 address") + + result = {"ipv4": ipv4, "ipv6": ipv6} + status_code = 200 if len(info) > 1 else 503 + return JSONResponse(content=result, status_code=status_code) + + +@app.get("/ip/address") +async def ip_address(): + """Fetch the ip addresses of the virtual machine.""" + output = subprocess.check_output(["ip", "addr"], shell=False) + return PlainTextResponse(content=output) + + +@app.get("/ip/4") +async def connect_ipv4(): + """Connect to the Quad9 VPN provider using their IPv4 address.""" + ipv4_host = "9.9.9.9" + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(5) + sock.connect((ipv4_host, 53)) + return {"result": True} + except socket.timeout: + logger.warning(f"Socket connection for host {ipv4_host} failed") + return {"result": False} + + +@app.get("/ip/6") +async def connect_ipv6(): + """Connect to the Quad9 VPN provider using their IPv6 address. + The webserver on that address returns a 404 error, so we accept that response code. + """ + ipv6_host = "https://[2620:fe::fe]" + timeout = aiohttp.ClientTimeout(total=5) + async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(), timeout=timeout) as session: + try: + async with session.get(ipv6_host) as resp: + # We expect this endpoint to return a 404 error + if resp.status != 404: + resp.raise_for_status() + return {"result": True, "headers": resp.headers} + except TimeoutError: + logger.warning(f"Session connection to host {ipv6_host} timed out") + return {"result": False, "reason": "Timeout"} + except aiohttp.ClientConnectionError as error: + logger.warning(f"Client connection to host {ipv6_host} failed: {error}") + # Get a string that describes the error + return {"result": False, "reason": str(error.args[0])} + + +async def check_url(internet_host: HttpUrl, timeout_seconds: int = 5, socket_family=socket.AF_INET): + """Check the connectivity of a single URL.""" + timeout = aiohttp.ClientTimeout(total=timeout_seconds) + tcp_connector = aiohttp.TCPConnector(family=socket_family) + async with aiohttp.ClientSession(timeout=timeout, connector=tcp_connector) as session: + try: + async with session.get(internet_host) as resp: + resp.raise_for_status() + return {"result": resp.status, "headers": resp.headers, "url": internet_host} + except (aiohttp.ClientConnectionError, TimeoutError): + logger.warning(f"Session connection for host {internet_host} failed") + return {"result": False, "url": internet_host} + + +@app.get("/internet") +async def read_internet(): + """Check Internet connectivity of the system, requiring IP connectivity, domain resolution and HTTPS/TLS.""" + internet_hosts: list[HttpUrl] = [ + HttpUrl(url="https://aleph.im/", scheme="https"), + HttpUrl(url="https://ethereum.org", scheme="https"), + HttpUrl(url="https://ipfs.io/", scheme="https"), + ] + timeout_seconds = 5 + + # Create a list of tasks to check the URLs in parallel + tasks: set[asyncio.Task] = {asyncio.create_task(check_url(host, timeout_seconds)) for host in internet_hosts} + + # While no tasks have completed, keep waiting for the next one to finish + while tasks: + done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) + result = done.pop().result() + + if result["result"]: + # The task was successful, cancel the remaining tasks and return the result + for task in tasks: + task.cancel() + return result + else: + continue + + # No URL was reachable + return {"result": False} + + +@app.get("/get_a_message") +async def get_a_message(): + """Get a message from the Aleph.im network""" + item_hash = "3fc0aa9569da840c43e7bd2033c3c580abb46b007527d6d20f2d4e98e867f7af" + async with AlephHttpClient() as client: + message = await client.get_message( + item_hash=item_hash, + message_type=ProgramMessage, + ) + return message.dict() + + +@app.post("/post_a_message") +async def post_with_remote_account(): + """Post a message on the Aleph.im network using the remote account of the host.""" + try: + account = await RemoteAccount.from_crypto_host(host="http://localhost", unix_socket="/tmp/socat-socket") + + content = { + "date": datetime.now(tz=timezone.utc).isoformat(), + "test": True, + "answer": 42, + "something": "interesting", + } + async with AuthenticatedAlephHttpClient( + account=account, + ) as client: + message: PostMessage + status: MessageStatus + message, status = await client.create_post( + post_content=content, + post_type="test", + ref=None, + channel="TEST", + inline=True, + storage_engine=StorageEnum.storage, + sync=True, + ) + if status != MessageStatus.PROCESSED: + return JSONResponse(status_code=500, content={"error": status}) + return { + "message": message, + } + except aiohttp.client_exceptions.UnixClientConnectorError: + return JSONResponse(status_code=500, content={"error": "Could not connect to the remote account"}) + + +@app.post("/post_a_message_local_account") +async def post_with_local_account(): + """Post a message on the Aleph.im network using a local private key.""" + from aleph.sdk.chains.ethereum import get_fallback_account + + account = get_fallback_account() + + content = { + "date": datetime.now(tz=timezone.utc).isoformat(), + "test": True, + "answer": 42, + "something": "interesting", + } + async with AuthenticatedAlephHttpClient( + account=account, + api_server="https://api2.aleph.im", + allow_unix_sockets=False, + ) as client: + message: PostMessage + status: MessageStatus + message, status = await client.create_post( + post_content=content, + post_type="test", + ref=None, + channel="TEST", + inline=True, + storage_engine=StorageEnum.storage, + sync=True, + ) + if status != MessageStatus.PROCESSED: + return JSONResponse(status_code=500, content={"error": status}) + return { + "message": message, + } + + +@app.post("/post_a_file") +async def post_a_file(): + from aleph.sdk.chains.ethereum import get_fallback_account + + account = get_fallback_account() + file_path = Path(__file__).absolute() + async with AuthenticatedAlephHttpClient( + account=account, + ) as client: + message: StoreMessage + status: MessageStatus + message, status = await client.create_store( + file_path=file_path, + ref=None, + channel="TEST", + storage_engine=StorageEnum.storage, + sync=True, + ) + if status != MessageStatus.PROCESSED: + return JSONResponse(status_code=500, content={"error": status}) + return { + "message": message, + } + + +@app.get("/sign_a_message") +async def sign_a_message(): + """Sign a message using a locally managed account within the virtual machine.""" + # FIXME: Broken, fixing this depends on https://github.com/aleph-im/aleph-sdk-python/pull/120 + from aleph.sdk.chains.ethereum import get_fallback_account + + account = get_fallback_account() + message = {"hello": "world", "chain": "ETH"} + signed_message = await account.sign_message(message) + return {"message": signed_message} + + +@app.get("/cache/get/{key}") +async def get_from_cache(key: str): + """Get data in the VM cache""" + return await cache.get(key) + + +@app.get("/cache/set/{key}/{value}") +async def store_in_cache(key: str, value: str): + """Store data in the VM cache""" + return await cache.set(key, value) + + +@app.get("/cache/remove/{key}") +async def remove_from_cache(key: str): + """Store data in the VM cache""" + result = await cache.delete(key) + return result == 1 + + +@app.get("/cache/keys") +async def keys_from_cache(pattern: str = "*"): + """List keys from the VM cache""" + return await cache.keys(pattern) + + +@app.get("/state/increment") +async def increment() -> dict[str, int]: + path = "/var/lib/example/storage.json" + try: + with open(path) as fd: + data = json.load(fd) + data["counter"] += 1 + except FileNotFoundError: + data = {"counter": 0} + with open(path, "w") as fd: + json.dump(data, fd) + return data + + +class Data(BaseModel): + text: str + number: int + + +@app.post("/post") +async def receive_post(data: Data) -> str: + return str(data) + + +class CustomError(Exception): + pass + + +@app.get("/raise") +def raise_error() -> None: + """Raises an error to check that the init handles it properly without crashing""" + error_message = "Whoops" + raise CustomError(error_message) + + +@app.get("/crash") +def crash() -> None: + """Crash the entire VM in order to check that the supervisor can handle it""" + sys.exit(1) + + +filters = [ + { + # "sender": "0xB31B787AdA86c6067701d4C0A250c89C7f1f29A5", + "channel": "TEST" + } +] + + +@app.get("/platform/os") +def platform_os() -> PlainTextResponse: + return PlainTextResponse(content=Path("/etc/os-release").read_text()) + + +@app.get("/platform/python") +def platform_python() -> PlainTextResponse: + return PlainTextResponse(content=sys.version) + + +@app.get("/platform/pip-freeze") +def platform_pip_freeze() -> list[str]: + return list(freeze()) + + +@app.event(filters=filters) +async def aleph_event(event) -> dict[str, str]: + print("aleph_event", event) + async with aiohttp.ClientSession(connector=aiohttp.TCPConnector()) as session: + async with session.get("https://official.aleph.cloud/api/v0/info/public.json") as resp: + print("RESP", resp) + resp.raise_for_status() + return {"result": "Good"} diff --git a/examples/example_fastapi_2/__init__.py b/examples/example_fastapi_2/__init__.py deleted file mode 100644 index 36d47d93d..000000000 --- a/examples/example_fastapi_2/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from typing import Optional - -from fastapi import FastAPI - -app = FastAPI() - - -@app.get("/") -def read_root(): - return {"Hello": "World"} - - -@app.get("/run/{item_id}") -def read_item(item_id: str, q: Optional[str] = None): - return {"pyz item_id": item_id, "q": q} - - -@app.post("/run/{item_id}") -def read_item_post(item_id: str, q: Optional[str] = None): - return {"pyz item_id_post": item_id, "q": q} diff --git a/examples/example_fastapi_2/__main__.py b/examples/example_fastapi_2/__main__.py deleted file mode 100644 index 36d47d93d..000000000 --- a/examples/example_fastapi_2/__main__.py +++ /dev/null @@ -1,20 +0,0 @@ -from typing import Optional - -from fastapi import FastAPI - -app = FastAPI() - - -@app.get("/") -def read_root(): - return {"Hello": "World"} - - -@app.get("/run/{item_id}") -def read_item(item_id: str, q: Optional[str] = None): - return {"pyz item_id": item_id, "q": q} - - -@app.post("/run/{item_id}") -def read_item_post(item_id: str, q: Optional[str] = None): - return {"pyz item_id_post": item_id, "q": q} diff --git a/examples/example_http_js/.dockerignore b/examples/example_http_js/.dockerignore new file mode 100644 index 000000000..c6d35cc33 --- /dev/null +++ b/examples/example_http_js/.dockerignore @@ -0,0 +1,3 @@ +*.zip +*.squashfs +*.key diff --git a/examples/example_http_js/Dockerfile b/examples/example_http_js/Dockerfile new file mode 100644 index 000000000..cf2167653 --- /dev/null +++ b/examples/example_http_js/Dockerfile @@ -0,0 +1,15 @@ +FROM node:16-bookworm + +RUN apt-get update && apt-get -y upgrade && apt-get install -y \ + libsecp256k1-dev \ + squashfs-tools \ + python3-pip \ + git \ + && rm -rf /var/lib/apt/lists/* + +RUN pip install aleph-client + +WORKDIR /usr/src/example_http_js +COPY . . + +RUN npm i diff --git a/examples/example_http_js/Makefile b/examples/example_http_js/Makefile new file mode 100644 index 000000000..6c43a3f06 --- /dev/null +++ b/examples/example_http_js/Makefile @@ -0,0 +1,19 @@ + +podman-prepare: + podman build -t aleph-example-js . + +podman-publish: + podman run --rm -ti aleph-example-js make publish + +podman-client: + podman rmi aleph-example-js + +docker-prepare: + docker build -t aleph-example-js . + +docker-publish: + docker run --rm -ti aleph-example-js make publish + +publish: + chmod +x ./src/run.sh + aleph program upload ./src "run.sh" diff --git a/examples/example_http_js/README.md b/examples/example_http_js/README.md new file mode 100644 index 000000000..8c04304f7 --- /dev/null +++ b/examples/example_http_js/README.md @@ -0,0 +1,32 @@ +# Aleph VM JS Example + +A simple example program written in JS that can run in an Aleph VM. + +## About + +This example is a simple HTTP server listening on port 8080. +It does not depend on third-party libraries. + +Test it on http://localhost:8080. + +## Publish the program + +### Locally + +```shell +make publish +``` + +### Using Podman + +```shell +make podman-prepare +make podman-publish +``` + +### Using Docker + +```shell +make docker-prepare +make docker-publish +``` diff --git a/examples/example_http_js/package.json b/examples/example_http_js/package.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/examples/example_http_js/package.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/examples/example_http_js/src/run.sh b/examples/example_http_js/src/run.sh new file mode 100755 index 000000000..18430a26a --- /dev/null +++ b/examples/example_http_js/src/run.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +set -euf + +cd /opt/code +node /opt/code/server.js diff --git a/examples/example_http_js/src/server.js b/examples/example_http_js/src/server.js new file mode 100644 index 000000000..2dc7189a5 --- /dev/null +++ b/examples/example_http_js/src/server.js @@ -0,0 +1,7 @@ +const http = require('http'); +const requestListener = function (req, res) { + res.writeHead(200, {'Content-Type': 'text/plain'}); + res.end('Hello, World!'); +} +const server = http.createServer(requestListener); +server.listen(8080); diff --git a/examples/example_http_rust/Cargo.toml b/examples/example_http_rust/Cargo.toml new file mode 100644 index 000000000..69cc888f8 --- /dev/null +++ b/examples/example_http_rust/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "example_http_rust" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/examples/example_http_rust/Dockerfile b/examples/example_http_rust/Dockerfile new file mode 100644 index 000000000..09bcab15b --- /dev/null +++ b/examples/example_http_rust/Dockerfile @@ -0,0 +1,14 @@ +FROM rust:bookworm + +RUN apt-get update && apt-get -y upgrade && apt-get install -y \ + libsecp256k1-dev \ + python3-pip \ + squashfs-tools \ + && rm -rf /var/lib/apt/lists/* + +RUN pip install aleph-client + +WORKDIR /usr/src/example_http_rust +COPY . . + +RUN cargo install --path . diff --git a/examples/example_http_rust/Makefile b/examples/example_http_rust/Makefile new file mode 100644 index 000000000..dbf618cd9 --- /dev/null +++ b/examples/example_http_rust/Makefile @@ -0,0 +1,18 @@ + +podman-prepare: + podman build -t aleph-example-rust . + +podman-publish: + podman run --rm -ti aleph-example-rust make publish + +docker-prepare: + docker build -t aleph-example-rust . + +docker-publish: + docker run --rm -ti aleph-example-rust make publish + +publish: + cargo build --release + mkdir -p ./dist + cp target/release/example_http_rust ./dist/ + aleph program upload ./dist example_http_rust diff --git a/examples/example_http_rust/README.md b/examples/example_http_rust/README.md new file mode 100644 index 000000000..3b9f325df --- /dev/null +++ b/examples/example_http_rust/README.md @@ -0,0 +1,33 @@ +# Aleph VM Rust Example + +A simple example program written in Rust that +can run in an Aleph VM. + +## About + +This example is a simple HTTP server listening on port 8080. +It does not depend on third-party libraries. + +Test it on http://localhost:8080. + +## Publish the program + +### Locally + +```shell +make publish +``` + +### Using Podman + +```shell +make podman-prepare +make podman-publish +``` + +### Using Docker + +```shell +make docker-prepare +make docker-publish +``` diff --git a/examples/example_http_rust/src/main.rs b/examples/example_http_rust/src/main.rs new file mode 100644 index 000000000..b354b6cff --- /dev/null +++ b/examples/example_http_rust/src/main.rs @@ -0,0 +1,32 @@ +use std::io::prelude::*; +use std::net::TcpListener; +use std::net::TcpStream; + +fn main() { + + let listener = TcpListener::bind("0.0.0.0:8080").unwrap(); + println!("Running on 0.0.0.0:8080"); + for stream in listener.incoming() { + let stream = stream.unwrap(); + handle_connection(stream); + } +} + + +fn handle_connection(mut stream: TcpStream) { + println!("handling connection"); + + const MSG: &str = "helloworld"; + let msg = MSG.as_bytes(); + + let response = format!("{:x?}", msg); + + let mut buffer = [0; 1024]; + + stream.read(&mut buffer).unwrap(); + + let response = format!("HTTP/1.1 200 OK\nContent-Type: text/plain\n\nOKIDOK\n{}", response); + + stream.write(response.as_bytes()).unwrap(); + stream.flush().unwrap(); +} diff --git a/examples/example_pip/main.py b/examples/example_pip/main.py new file mode 100644 index 000000000..ad0b66b5b --- /dev/null +++ b/examples/example_pip/main.py @@ -0,0 +1,11 @@ +import pandas as pandas +from fastapi import FastAPI, Response + +app = FastAPI() + + +@app.get("/") +async def root(): + data = range(10) + df = pandas.DataFrame(data) + return Response(content=df.to_html(), media_type="text/html") diff --git a/examples/example_pip/requirements.txt b/examples/example_pip/requirements.txt new file mode 100644 index 000000000..fb6c7ed7e --- /dev/null +++ b/examples/example_pip/requirements.txt @@ -0,0 +1 @@ +pandas diff --git a/examples/instance_message_from_aleph.json b/examples/instance_message_from_aleph.json new file mode 100644 index 000000000..a4da80c2b --- /dev/null +++ b/examples/instance_message_from_aleph.json @@ -0,0 +1,58 @@ +{ + "chain": "ETH", + "item_hash": "fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-hash", + "sender": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "type": "INSTANCE", + "channel": "Fun-dApps", + "confirmed": true, + "content": { + "address": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "allow_amend": false, + "variables": { + "VM_CUSTOM_NUMBER": "32" + }, + "environment": { + "reproducible": true, + "internet": true, + "aleph_api": true, + "shared_cache": true + }, + "resources": { + "vcpus": 1, + "memory": 512, + "seconds": 30 + }, + "rootfs": { + "parent": { + "ref": "549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613", + "use_latest": true + }, + "persistence": "host", + "size_mib": 5000 + }, + "authorized_keys": [ + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHlGJRaIv/EzNT0eNqNB5DiGEbii28Fb2zCjuO/bMu7y amolinsdiaz@gmail.com" + ], + "volumes": [ + { + "mount": "/opt/venv", + "ref": "5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51", + "use_latest": false + } + ], + "replaces": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "time": 1619017773.8950517 + }, + "item_content": "{\"address\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"allow_amend\":false,\"variables\":{\"VM_CUSTOM_NUMBER\":\"32\"},\"environment\":{\"reproducible\":true,\"internet\":true,\"aleph_api\":true,\"shared_cache\":true},\"resources\":{\"vcpus\":1,\"memory\":128,\"seconds\":30},\"rootfs\":{\"parent\":{\"ref\":\"549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613\",\"use_latest\":true},\"persistence\":\"host\",\"size_mib\":20000},\"cloud_config\":{\"password\":\"password\",\"chpasswd\":{\"expire\":\"False\"}},\"volumes\":[{\"mount\":\"/opt/venv\",\"ref\":\"5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51\",\"use_latest\":false},{\"comment\":\"Working data persisted on the VM supervisor, not available on other nodes\",\"mount\":\"/var/lib/example\",\"name\":\"data\",\"persistence\":\"host\",\"size_mib\":5}],\"replaces\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"time\":1619017773.8950517}", + "item_type": "inline", + "signature": "0x372da8230552b8c3e65c05b31a0ff3a24666d66c575f8e11019f62579bf48c2b7fe2f0bbe907a2a5bf8050989cdaf8a59ff8a1cbcafcdef0656c54279b4aa0c71b", + "size": 749, + "time": 1619017773.8950577, + "confirmations": [ + { + "chain": "ETH", + "height": 12284734, + "hash": "0x67f2f3cde5e94e70615c92629c70d22dc959a118f46e9411b29659c2fce87cdc" + } + ] +} diff --git a/examples/program_message_from_aleph.json b/examples/program_message_from_aleph.json new file mode 100644 index 000000000..ae6b27489 --- /dev/null +++ b/examples/program_message_from_aleph.json @@ -0,0 +1,93 @@ +{ + "_id": { + "$oid": "6080402d7f44efefd611dc1e" + }, + "chain": "ETH", + "item_hash": "fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-hash", + "sender": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "type": "PROGRAM", + "channel": "Fun-dApps", + "confirmed": true, + "content": { + "type": "vm-function", + "address": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "allow_amend": false, + "code": { + "encoding": "squashfs", + "entrypoint": "main:app", + "ref": "7eb2eca2378ea8855336ed76c8b26219f1cb90234d04441de9cf8cb1c649d003", + "use_latest": false + }, + "variables": { + "VM_CUSTOM_NUMBER": "32" + }, + "on": { + "http": true, + "message": [ + { + "sender": "0xb5F010860b0964090d5414406273E6b3A8726E96", + "channel": "TEST" + }, + { + "content": { + "ref": "4d4db19afca380fdf06ba7f916153d0f740db9de9eee23ad26ba96a90d8a2920" + } + } + ] + }, + "environment": { + "reproducible": true, + "internet": true, + "aleph_api": true, + "shared_cache": true + }, + "resources": { + "vcpus": 1, + "memory": 128, + "seconds": 30 + }, + "runtime": { + "ref": "5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51", + "use_latest": false, + "comment": "Aleph Alpine Linux with Python 3.8" + }, + "volumes": [ + { + "mount": "/opt/venv", + "ref": "5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51", + "use_latest": false + }, + { + "comment": "Working data persisted on the VM supervisor, not available on other nodes", + "mount": "/var/lib/example", + "name": "data", + "persistence": "host", + "size_mib": 5 + } + ], + "data": { + "encoding": "zip", + "mount": "/data", + "ref": "7eb2eca2378ea8855336ed76c8b26219f1cb90234d04441de9cf8cb1c649d003", + "use_latest": false + }, + "export": { + "encoding": "zip", + "mount": "/data" + }, + "replaces": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "time": 1619017773.8950517 + }, + "item_content": "{\"type\": \"vm-function\", \"address\": \"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\", \"allow_amend\": false, \"code\": {\"encoding\": \"squashfs\", \"entrypoint\": \"main:app\", \"ref\": \"7eb2eca2378ea8855336ed76c8b26219f1cb90234d04441de9cf8cb1c649d003\", \"use_latest\": false}, \"on\": {\"http\": true, \"message\": [{\"sender\": \"0xB31B787AdA86c6067701d4C0A250c89C7f1f29A5\", \"channel\": \"TEST\"}, {\"content\": {\"ref\": \"4d4db19afca380fdf06ba7f916153d0f740db9de9eee23ad26ba96a90d8a2920\"}}]}, \"environment\": {\"reproducible\": true, \"internet\": true, \"aleph_api\": true, \"shared_cache\": false}, \"resources\": {\"vcpus\": 1, \"memory\": 128, \"seconds\": 30}, \"runtime\": {\"ref\": \"5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51\", \"use_latest\": false, \"comment\": \"Aleph Alpine Linux with Python 3.8\"}, \"volumes\": [{\"mount\": \"/opt/venv\", \"ref\": \"5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51\", \"use_latest\": false}, {\"comment\": \"Working data persisted on the VM supervisor, not available on other nodes\", \"mount\": \"/var/lib/sqlite\", \"name\": \"database\", \"persistence\": \"host\", \"size_mib\": 5}], \"data\": {\"encoding\": \"zip\", \"mount\": \"/data\", \"ref\": \"7eb2eca2378ea8855336ed76c8b26219f1cb90234d04441de9cf8cb1c649d003\", \"use_latest\": false}, \"export\": {\"encoding\": \"zip\", \"mount\": \"/data\"}, \"replaces\": \"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\", \"time\": 1619017773.8950517}", + "item_type": "inline", + "signature": "0x372da8230552b8c3e65c05b31a0ff3a24666d66c575f8e11019f62579bf48c2b7fe2f0bbe907a2a5bf8050989cdaf8a59ff8a1cbcafcdef0656c54279b4aa0c71b", + "size": 749, + "time": 1619017773.8950577, + "confirmations": [ + { + "chain": "ETH", + "height": 12284734, + "hash": "0x67f2f3cde5e94e70615c92629c70d22dc959a118f46e9411b29659c2fce87cdc" + } + ] +} diff --git a/examples/qemu_message_from_aleph.json b/examples/qemu_message_from_aleph.json new file mode 100644 index 000000000..65220c198 --- /dev/null +++ b/examples/qemu_message_from_aleph.json @@ -0,0 +1,66 @@ +{ + "chain": "ETH", + "item_hash": "fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-hash", + "sender": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "type": "INSTANCE", + "channel": "Fun-dApps", + "confirmed": true, + "content": { + "address": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "allow_amend": false, + "variables": { + "VM_CUSTOM_NUMBER": "32" + }, + "environment": { + "reproducible": true, + "internet": true, + "aleph_api": true, + "shared_cache": true, + "hypervisor": "qemu" + }, + "resources": { + "vcpus": 1, + "memory": 512, + "seconds": 30 + }, + "rootfs": { + "parent": { + "ref": "549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613", + "use_latest": false + }, + "persistence": "host", + "size_mib": 5000 + }, + "authorized_keys": [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDj95BHGUx0/z2G/tTrEi8o49i70xvjcEUdSs3j4A33jE7pAphrfRVbuFMgFubcm8n9r5ftd/H8SjjTL4hY9YvWV5ZuMf92GUga3n4wgevvPlBszYZCy/idxFl0vtHYC1CcK9v4tVb9onhDt8FOJkf2m6PmDyvC+6tl6LwoerXTeeiKr5VnTB4KOBkammtFmix3d1X1SZd/cxdwZIHcQ7BNsqBm2w/YzVba6Z4ZnFUelBkQtMQqNs2aV51O1pFFqtZp2mM71D5d8vn9pOtqJ5QmY5IW6NypcyqKJZg5o6QguK5rdXLkc7AWro27BiaHIENl3w0wazp9EDO9zPAGJ6lz olivier@lanius" + ], + "volumes": [ + { + "mount": "/opt/venv", + "ref": "5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51", + "use_latest": false + }, + { + "comment": "Working data persisted on the VM supervisor, not available on other nodes", + "mount": "/var/lib/example", + "name": "data", + "persistence": "host", + "size_mib": 5 + } + ], + "replaces": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", + "time": 1619017773.8950517 + }, + "item_content": "{\"address\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"allow_amend\":false,\"variables\":{\"VM_CUSTOM_NUMBER\":\"32\"},\"environment\":{\"reproducible\":true,\"internet\":true,\"aleph_api\":true,\"shared_cache\":true},\"resources\":{\"vcpus\":1,\"memory\":128,\"seconds\":30},\"rootfs\":{\"parent\":{\"ref\":\"549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613\",\"use_latest\":true},\"persistence\":\"host\",\"size_mib\":20000},\"cloud_config\":{\"password\":\"password\",\"chpasswd\":{\"expire\":\"False\"}},\"volumes\":[{\"mount\":\"/opt/venv\",\"ref\":\"5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51\",\"use_latest\":false},{\"comment\":\"Working data persisted on the VM supervisor, not available on other nodes\",\"mount\":\"/var/lib/example\",\"name\":\"data\",\"persistence\":\"host\",\"size_mib\":5}],\"replaces\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"time\":1619017773.8950517}", + "item_type": "inline", + "signature": "0x372da8230552b8c3e65c05b31a0ff3a24666d66c575f8e11019f62579bf48c2b7fe2f0bbe907a2a5bf8050989cdaf8a59ff8a1cbcafcdef0656c54279b4aa0c71b", + "size": 749, + "time": 1619017773.8950577, + "confirmations": [ + { + "chain": "ETH", + "height": 12284734, + "hash": "0x67f2f3cde5e94e70615c92629c70d22dc959a118f46e9411b29659c2fce87cdc" + } + ] +} diff --git a/examples/volumes/Dockerfile b/examples/volumes/Dockerfile new file mode 100644 index 000000000..21a66c82c --- /dev/null +++ b/examples/volumes/Dockerfile @@ -0,0 +1,11 @@ +FROM debian:bookworm + +RUN apt-get update && apt-get -y upgrade && apt-get install -y \ + python3-venv \ + squashfs-tools \ + && rm -rf /var/lib/apt/lists/* + +RUN python3 -m venv /opt/venv +RUN /opt/venv/bin/pip install 'aleph-message==0.4.9' + +CMD mksquashfs /opt/venv /mnt/volume-venv.squashfs diff --git a/examples/volumes/build_squashfs.sh b/examples/volumes/build_squashfs.sh new file mode 100755 index 000000000..a48e133a0 --- /dev/null +++ b/examples/volumes/build_squashfs.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +set -euf + +# Use Podman if installed, else use Docker +if hash podman 2> /dev/null +then + DOCKER_COMMAND=podman +else + DOCKER_COMMAND=docker +fi + +$DOCKER_COMMAND build -t aleph-vm-build-squashfs . +$DOCKER_COMMAND run --rm -v "$(pwd)":/mnt aleph-vm-build-squashfs diff --git a/firecracker/__init__.py b/firecracker/__init__.py deleted file mode 100644 index e213855fe..000000000 --- a/firecracker/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from firecracker.microvm import MicroVM diff --git a/firecracker/microvm.py b/firecracker/microvm.py deleted file mode 100644 index 0e3fa65a5..000000000 --- a/firecracker/microvm.py +++ /dev/null @@ -1,338 +0,0 @@ -import asyncio -import base64 -import json -import logging -import os.path -from enum import Enum -from functools import lru_cache -from os import getuid -from pathlib import Path -from pwd import getpwnam - -import aiohttp -from aiohttp import ClientResponse - -logger = logging.getLogger(__name__) - - -class Encoding(str, Enum): - plain = "plain" - zip = "zip" - - -VSOCK_PATH = "/tmp/v.sock" - - -# extend the json.JSONEncoder class to support bytes -class JSONBytesEncoder(json.JSONEncoder): - - # overload method default - def default(self, obj): - - # Match all the types you want to handle in your converter - if isinstance(obj, bytes): - return obj.decode() - return json.JSONEncoder.default(self, obj) - - -def system(command): - logger.debug(command) - return os.system(command) - - -async def setfacl(): - user = getuid() - cmd = f"sudo setfacl -m u:{user}:rw /dev/kvm" - proc = await asyncio.create_subprocess_shell( - cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE - ) - stdout, stderr = await proc.communicate() - - if proc.returncode == 0: - return - logger.error(f"[{cmd!r} exited with {[proc.returncode]}]") - if stdout: - logger.error(f"[stdout]\n{stdout.decode()}") - if stderr: - logger.error(f"[stderr]\n{stderr.decode()}") - - -class MicroVM: - vm_id: int - use_jailer: bool - firecracker_bin_path: str - jailer_bin_path: str - proc: asyncio.subprocess.Process = None - - @property - def jailer_path(self): - firecracker_bin_name = os.path.basename(self.firecracker_bin_path) - return f"/srv/jailer/{firecracker_bin_name}/{self.vm_id}/root" - - @property - def socket_path(self): - if self.use_jailer: - return f"{self.jailer_path}/run/firecracker.socket" - else: - return f"/tmp/firecracker-{self.vm_id}.socket" - - @property - def vsock_path(self): - if self.use_jailer: - return f"{self.jailer_path}{VSOCK_PATH}" - else: - return f"{VSOCK_PATH}" - - def __init__( - self, - vm_id: int, - firecracker_bin_path: str, - use_jailer: bool = True, - jailer_bin_path: str = None, - ): - self.vm_id = vm_id - self.use_jailer = use_jailer - self.firecracker_bin_path = firecracker_bin_path - self.jailer_bin_path = jailer_bin_path - - @lru_cache() - def get_session(self) -> aiohttp.ClientSession: - conn = aiohttp.UnixConnector(path=self.socket_path) - return aiohttp.ClientSession(connector=conn) - - def cleanup_jailer(self): - system(f"rm -fr {self.jailer_path}") - - # system(f"rm -fr {self.jailer_path}/run/") - # system(f"rm -fr {self.jailer_path}/dev/") - # system(f"rm -fr {self.jailer_path}/opt/") - # - # if os.path.exists(path=self.vsock_path): - # os.remove(path=self.vsock_path) - # - system(f"mkdir -p {self.jailer_path}/tmp/") - system(f"chown jailman:jailman {self.jailer_path}/tmp/") - # - system(f"mkdir -p {self.jailer_path}/opt") - - # system(f"cp disks/rootfs.ext4 {self.jailer_path}/opt") - # system(f"cp hello-vmlinux.bin {self.jailer_path}/opt") - - async def start(self) -> asyncio.subprocess.Process: - if self.use_jailer: - return await self.start_jailed_firecracker() - else: - return await self.start_firecracker() - - async def start_firecracker(self) -> asyncio.subprocess.Process: - logger.debug( - " ".join((self.firecracker_bin_path, "--api-sock", self.socket_path)) - ) - if os.path.exists(VSOCK_PATH): - os.remove(VSOCK_PATH) - if os.path.exists(self.socket_path): - os.remove(self.socket_path) - self.proc = await asyncio.create_subprocess_exec( - self.firecracker_bin_path, - "--api-sock", - self.socket_path, - stdin=asyncio.subprocess.PIPE, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - return self.proc - - async def start_jailed_firecracker(self) -> asyncio.subprocess.Process: - uid = str(getpwnam("jailman").pw_uid) - gid = str(getpwnam("jailman").pw_gid) - logger.debug( - " ".join( - ( - self.jailer_bin_path, - "--id", - str(self.vm_id), - "--exec-file", - self.firecracker_bin_path, - "--uid", - uid, - "--gid", - gid, - ) - ) - ) - self.proc = await asyncio.create_subprocess_exec( - self.jailer_bin_path, - "--id", - str(self.vm_id), - "--exec-file", - self.firecracker_bin_path, - "--uid", - uid, - "--gid", - gid, - stdin=asyncio.subprocess.PIPE, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - return self.proc - - async def socket_is_ready(self, delay=0.01): - while not os.path.exists(self.socket_path): - await asyncio.sleep(delay) - - async def set_boot_source( - self, kernel_image_path: str, enable_console: bool = False - ): - if self.use_jailer: - kernel_filename = Path(kernel_image_path).name - jailer_kernel_image_path = f"/opt/{kernel_filename}" - os.link(kernel_image_path, f"{self.jailer_path}{jailer_kernel_image_path}") - kernel_image_path = jailer_kernel_image_path - - console = "console=ttyS0" if enable_console else "" - data = { - "kernel_image_path": kernel_image_path, - # Add console=ttyS0 for debugging, but it makes the boot twice slower - "boot_args": f"{console} reboot=k panic=1 pci=off ro noapic nomodules random.trust_cpu=on", - } - session = self.get_session() - response: ClientResponse = await session.put( - "http://localhost/boot-source", json=data - ) - response.raise_for_status() - - async def set_rootfs(self, path_on_host: str): - if self.use_jailer: - rootfs_filename = Path(path_on_host).name - jailer_path_on_host = f"/opt/{rootfs_filename}" - os.link(path_on_host, f"{self.jailer_path}/{jailer_path_on_host}") - path_on_host = jailer_path_on_host - - data = { - "drive_id": "rootfs", - "path_on_host": path_on_host, - "is_root_device": True, - "is_read_only": True, - } - session = self.get_session() - response = await session.put("http://localhost/drives/rootfs", json=data) - response.raise_for_status() - - async def set_vsock(self): - data = { - "vsock_id": "1", - "guest_cid": 3, - "uds_path": VSOCK_PATH, - } - session = self.get_session() - response = await session.put("http://localhost/vsock", json=data) - response.raise_for_status() - - async def set_network(self): - """Configure the host network with a tap interface to the VM.""" - name = f"tap{self.vm_id}" - - system(f"ip tuntap add {name} mode tap") - system( - f"ip addr add 172.{self.vm_id // 256}.{self.vm_id % 256}.1/24 dev {name}" - ) - system(f"ip link set {name} up") - system('sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"') - # TODO: Don't fill iptables with duplicate rules; purge rules on delete - system("iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE") - system( - "iptables -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT" - ) - system(f"iptables -A FORWARD -i {name} -o eth0 -j ACCEPT") - - data = { - "iface_id": "eth0", - "guest_mac": f"AA:FC:00:00:00:01", - "host_dev_name": name, - } - session = self.get_session() - response = await session.put( - "http://localhost/network-interfaces/eth0", json=data - ) - logger.debug(response) - logger.debug(await response.text()) - response.raise_for_status() - - async def start_instance(self): - data = { - "action_type": "InstanceStart", - } - session = self.get_session() - response = await session.put("http://localhost/actions", json=data) - response.raise_for_status() - logger.debug(response) - logger.debug(await response.text()) - - async def print_logs(self): - while not self.proc: - await asyncio.sleep(0.01) # Todo: Use signal here - while True: - stdout = await self.proc.stdout.readline() - if stdout: - print(stdout.decode().strip()) - else: - await asyncio.sleep(0.001) - - async def wait_for_init(self): - """Wait for a connection from the init in the VM""" - logger.debug("Waiting for init...") - queue = asyncio.Queue() - - async def unix_client_connected(*_): - await queue.put(True) - - await asyncio.start_unix_server( - unix_client_connected, path=f"{self.vsock_path}_52" - ) - os.system(f"chown jailman:jailman {self.vsock_path}_52") - await queue.get() - logger.debug("...signal from init received") - - async def run_code( - self, code: bytes, entrypoint: str, encoding: str = "plain", scope: dict = None - ): - scope = scope or {} - reader, writer = await asyncio.open_unix_connection(path=self.vsock_path) - - if encoding == Encoding.zip: - code = base64.b64encode(code).decode() - elif encoding == Encoding.plain: - code = code.decode() - else: - raise ValueError(f"Unknown encoding '{encoding}'") - - msg = { - "code": code, - "entrypoint": entrypoint, - "encoding": encoding, - "scope": scope, - } - writer.write(("CONNECT 52\n" + JSONBytesEncoder().encode(msg) + "\n").encode()) - await writer.drain() - - ack = await reader.readline() - logger.debug(f"ack={ack.decode()}") - response = await reader.read() - logger.debug(f"response= <<<\n{response.decode()}>>>") - writer.close() - await writer.wait_closed() - return response - - async def stop(self): - if self.proc: - self.proc.terminate() - self.proc.kill() - await self.get_session().close() - self.get_session.cache_clear() - - name = f"tap{self.vm_id}" - system(f"ip tuntap del {name} mode tap") - - def __del__(self): - loop = asyncio.get_running_loop() - loop.create_task(self.stop()) diff --git a/kernels/build-kernel.sh b/kernels/build-kernel.sh new file mode 100644 index 000000000..61ac7fbc6 --- /dev/null +++ b/kernels/build-kernel.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -euf -o pipefail + +# apt install ncurses-dev flex bison bc + +rm -fr linux-5.10.197 linux-5.10.197.tar linux-5.10.197.tar.sign linux-5.10.197.tar.xz + + +curl -OL "https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.197.tar.xz" +curl -OL "https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.197.tar.sign" +unxz linux-5.10.197.tar.xz + +gpg --locate-keys torvalds@kernel.org gregkh@kernel.org +gpg --verify linux-5.10.197.tar.sign linux-5.10.197.tar + +tar -xvf linux-5.10.197.tar + +cp microvm-kernel-x86_64-5.10.config linux-5.10.197/.config + +cd linux-5.10.197/ +make menuconfig + +make -j$(nproc) vmlinux + +# Copy the updated config locally for documentation +cp linux-5.10.197/.config ./linux.config diff --git a/kernels/linux.config b/kernels/linux.config index 02d05f2d8..e2c590c7f 100644 --- a/kernels/linux.config +++ b/kernels/linux.config @@ -1,16 +1,23 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.20.0 Kernel Configuration -# - -# -# Compiler: gcc (Debian 8.3.0-6) 8.3.0 +# Linux/x86 5.10.197 Kernel Configuration # +CONFIG_CC_VERSION_TEXT="gcc (Debian 12.2.0-14) 12.2.0" CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=80300 +CONFIG_GCC_VERSION=120200 +CONFIG_LD_VERSION=240000000 CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=24000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_CC_HAS_ASM_INLINE=y CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_BUILDTIME_TABLE_SORT=y CONFIG_THREAD_INFO_IN_TASK=y # @@ -27,25 +34,27 @@ CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_BZIP2 is not set # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_XZ is not set # CONFIG_KERNEL_LZO is not set # CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set CONFIG_CROSS_MEMORY_ATTACH=y # CONFIG_USELIB is not set CONFIG_AUDIT=y CONFIG_HAVE_ARCH_AUDITSYSCALL=y CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y # # IRQ subsystem @@ -55,6 +64,7 @@ CONFIG_GENERIC_IRQ_SHOW=y CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y CONFIG_GENERIC_PENDING_IRQ=y CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y CONFIG_IRQ_DOMAIN=y CONFIG_IRQ_DOMAIN_HIERARCHY=y CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y @@ -62,8 +72,9 @@ CONFIG_GENERIC_IRQ_RESERVATION_MODE=y CONFIG_IRQ_FORCED_THREADING=y CONFIG_SPARSE_IRQ=y # CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y CONFIG_ARCH_CLOCKSOURCE_INIT=y CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y CONFIG_GENERIC_TIME_VSYSCALL=y @@ -71,6 +82,8 @@ CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y # # Timers subsystem @@ -82,6 +95,8 @@ CONFIG_NO_HZ_IDLE=y # CONFIG_NO_HZ_FULL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set @@ -100,6 +115,8 @@ CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y # CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + CONFIG_CPU_ISOLATION=y # @@ -109,16 +126,29 @@ CONFIG_TREE_RCU=y # CONFIG_RCU_EXPERT is not set CONFIG_SRCU=y CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_TRACE_RCU=y CONFIG_RCU_STALL_COMMON=y CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + CONFIG_BUILD_BIN2C=y # CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set CONFIG_LOG_BUF_SHIFT=21 CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set @@ -126,10 +156,8 @@ CONFIG_CGROUPS=y CONFIG_PAGE_COUNTER=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set CONFIG_CGROUP_WRITEBACK=y CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y @@ -145,9 +173,11 @@ CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set CONFIG_SOCK_CGROUP_DATA=y CONFIG_NAMESPACES=y CONFIG_UTS_NS=y +CONFIG_TIME_NS=y CONFIG_IPC_NS=y CONFIG_USER_NS=y CONFIG_PID_NS=y @@ -164,10 +194,12 @@ CONFIG_RD_LZMA=y CONFIG_RD_XZ=y CONFIG_RD_LZO=y CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y CONFIG_HAVE_UID16=y CONFIG_SYSCTL_EXCEPTION_TRACE=y CONFIG_HAVE_PCSPKR_PLATFORM=y @@ -193,13 +225,20 @@ CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y CONFIG_AIO=y +CONFIG_IO_URING=y CONFIG_ADVISE_SYSCALLS=y +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y CONFIG_MEMBARRIER=y CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y CONFIG_KALLSYMS_BASE_RELATIVE=y CONFIG_BPF_SYSCALL=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +# CONFIG_BPF_PRELOAD is not set CONFIG_USERFAULTFD=y CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y CONFIG_RSEQ=y @@ -210,6 +249,9 @@ CONFIG_HAVE_PERF_EVENTS=y # Kernel Performance Events And Counters # CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + CONFIG_VM_EVENT_COUNTERS=y CONFIG_SLUB_DEBUG=y # CONFIG_COMPAT_BRK is not set @@ -218,15 +260,17 @@ CONFIG_SLUB=y CONFIG_SLAB_MERGE_DEFAULT=y # CONFIG_SLAB_FREELIST_RANDOM is not set CONFIG_SLAB_FREELIST_HARDENED=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set CONFIG_SLUB_CPU_PARTIAL=y CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y +# end of General setup + CONFIG_64BIT=y CONFIG_X86_64=y CONFIG_X86=y CONFIG_INSTRUCTION_DECODER=y CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" CONFIG_LOCKDEP_SUPPORT=y CONFIG_STACKTRACE_SUPPORT=y CONFIG_MMU=y @@ -237,9 +281,7 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 CONFIG_GENERIC_ISA_DMA=y CONFIG_GENERIC_BUG=y CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_ARCH_HAS_CPU_RELAX=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y @@ -249,11 +291,9 @@ CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y CONFIG_ARCH_WANT_GENERAL_HUGETLB=y CONFIG_ZONE_DMA32=y CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_X86_64_SMP=y CONFIG_ARCH_SUPPORTS_UPROBES=y @@ -271,19 +311,22 @@ CONFIG_X86_FEATURE_NAMES=y CONFIG_X86_X2APIC=y CONFIG_X86_MPPARSE=y # CONFIG_GOLDFISH is not set -CONFIG_RETPOLINE=y -# CONFIG_INTEL_RDT is not set +# CONFIG_X86_CPU_RESCTRL is not set # CONFIG_X86_EXTENDED_PLATFORM is not set +# CONFIG_X86_AMD_PLATFORM_DEVICE is not set CONFIG_SCHED_OMIT_FRAME_POINTER=y CONFIG_HYPERVISOR_GUEST=y CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set CONFIG_PARAVIRT_SPINLOCKS=y -# CONFIG_QUEUED_LOCK_STAT is not set +CONFIG_X86_HV_CALLBACK_VECTOR=y # CONFIG_XEN is not set CONFIG_KVM_GUEST=y -CONFIG_KVM_DEBUG_FS=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +# CONFIG_PVH is not set CONFIG_PARAVIRT_TIME_ACCOUNTING=y CONFIG_PARAVIRT_CLOCK=y +# CONFIG_ACRN_GUEST is not set # CONFIG_MK8 is not set # CONFIG_MPSC is not set # CONFIG_MCORE2 is not set @@ -296,12 +339,16 @@ CONFIG_X86_CMPXCHG64=y CONFIG_X86_CMOV=y CONFIG_X86_MINIMUM_CPU_FAMILY=64 CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y CONFIG_CPU_SUP_INTEL=y CONFIG_CPU_SUP_AMD=y CONFIG_CPU_SUP_HYGON=y CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y CONFIG_HPET_TIMER=y CONFIG_DMI=y +# CONFIG_MAXSMP is not set CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=512 CONFIG_NR_CPUS_DEFAULT=64 @@ -318,20 +365,20 @@ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y # Performance monitoring # # CONFIG_PERF_EVENTS_AMD_POWER is not set +# end of Performance monitoring + CONFIG_X86_16BIT=y CONFIG_X86_ESPFIX64=y CONFIG_X86_VSYSCALL_EMULATION=y -# CONFIG_I8K is not set +CONFIG_X86_IOPL_IOPERM=y # CONFIG_MICROCODE is not set CONFIG_X86_MSR=y CONFIG_X86_CPUID=y # CONFIG_X86_5LEVEL is not set CONFIG_X86_DIRECT_GBPAGES=y # CONFIG_X86_CPA_STATISTICS is not set -CONFIG_ARCH_HAS_MEM_ENCRYPT=y CONFIG_AMD_MEM_ENCRYPT=y # CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set -CONFIG_ARCH_USE_MEMREMAP_PROT=y CONFIG_NUMA=y # CONFIG_NUMA_EMU is not set CONFIG_NODES_SHIFT=10 @@ -353,10 +400,12 @@ CONFIG_X86_PAT=y CONFIG_ARCH_USES_PG_UNCACHED=y CONFIG_ARCH_RANDOM=y CONFIG_X86_SMAP=y -CONFIG_X86_INTEL_UMIP=y -# CONFIG_X86_INTEL_MPX is not set +CONFIG_X86_UMIP=y CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y -CONFIG_SECCOMP=y +CONFIG_X86_INTEL_TSX_MODE_OFF=y +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +# CONFIG_X86_INTEL_TSX_MODE_AUTO is not set +# CONFIG_EFI is not set # CONFIG_HZ_100 is not set CONFIG_HZ_250=y # CONFIG_HZ_300 is not set @@ -366,8 +415,7 @@ CONFIG_SCHED_HRTICK=y # CONFIG_KEXEC is not set CONFIG_KEXEC_FILE=y CONFIG_ARCH_HAS_KEXEC_PURGATORY=y -CONFIG_KEXEC_VERIFY_SIG=y -CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +# CONFIG_KEXEC_SIG is not set # CONFIG_CRASH_DUMP is not set CONFIG_PHYSICAL_START=0x1000000 CONFIG_RELOCATABLE=y @@ -378,10 +426,25 @@ CONFIG_HOTPLUG_CPU=y # CONFIG_DEBUG_HOTPLUG_CPU0 is not set # CONFIG_COMPAT_VDSO is not set CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_XONLY is not set # CONFIG_LEGACY_VSYSCALL_NONE is not set # CONFIG_CMDLINE_BOOL is not set CONFIG_MODIFY_LDT_SYSCALL=y CONFIG_HAVE_LIVEPATCH=y +# end of Processor type and features + +CONFIG_CC_HAS_SLS=y +CONFIG_CC_HAS_RETURN_THUNK=y +CONFIG_SPECULATION_MITIGATIONS=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_RETPOLINE=y +CONFIG_RETHUNK=y +CONFIG_CPU_UNRET_ENTRY=y +CONFIG_CPU_IBPB_ENTRY=y +CONFIG_CPU_IBRS_ENTRY=y +CONFIG_CPU_SRSO=y +# CONFIG_SLS is not set +# CONFIG_GDS_FORCE_MITIGATION is not set CONFIG_ARCH_HAS_ADD_PAGES=y CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y @@ -397,6 +460,7 @@ CONFIG_ARCH_HIBERNATION_HEADER=y # CONFIG_SUSPEND is not set CONFIG_HIBERNATE_CALLBACKS=y CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y CONFIG_PM_STD_PARTITION="" CONFIG_PM_SLEEP=y CONFIG_PM_SLEEP_SMP=y @@ -405,36 +469,81 @@ CONFIG_PM_SLEEP_SMP=y CONFIG_PM=y # CONFIG_PM_DEBUG is not set # CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_CONTAINER=y +# CONFIG_ACPI_HOTPLUG_MEMORY is not set +# CONFIG_ACPI_SBS is not set +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_NFIT is not set +# CONFIG_ACPI_NUMA is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +# CONFIG_ACPI_APEI is not set +# CONFIG_ACPI_DPTF is not set +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_X86_PM_TIMER=y # CONFIG_SFI is not set # # CPU Frequency scaling # CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_GOV_USERSPACE is not set # CONFIG_CPU_FREQ_GOV_ONDEMAND is not set # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y # # CPU frequency scaling drivers # CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +# CONFIG_X86_ACPI_CPUFREQ is not set +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set # CONFIG_X86_P4_CLOCKMOD is not set # # shared options # +# end of CPU Frequency scaling # # CPU Idle @@ -442,34 +551,31 @@ CONFIG_X86_INTEL_PSTATE=y CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options # # Bus options (PCI etc.) # -# CONFIG_PCI is not set -CONFIG_PCI_LOCKLESS_CONFIG=y - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set CONFIG_ISA_DMA_API=y -# CONFIG_PCCARD is not set # CONFIG_X86_SYSFB is not set +# end of Bus options (PCI etc.) # # Binary Emulations # CONFIG_IA32_EMULATION=y -# CONFIG_IA32_AOUT is not set # CONFIG_X86_X32 is not set CONFIG_COMPAT_32=y CONFIG_COMPAT=y CONFIG_COMPAT_FOR_U64_ALIGNMENT=y CONFIG_SYSVIPC_COMPAT=y -CONFIG_X86_DEV_DMA_OPS=y -CONFIG_HAVE_GENERIC_GUP=y +# end of Binary Emulations # # Firmware Drivers @@ -479,14 +585,22 @@ CONFIG_FIRMWARE_MEMMAP=y CONFIG_DMIID=y # CONFIG_DMI_SYSFS is not set CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +# CONFIG_ISCSI_IBFT is not set # CONFIG_FW_CFG_SYSFS is not set # CONFIG_GOOGLE_FIRMWARE is not set # # Tegra firmware driver # +# end of Tegra firmware driver +# end of Firmware Drivers + CONFIG_HAVE_KVM=y # CONFIG_VIRTUALIZATION is not set +CONFIG_AS_AVX512=y +CONFIG_AS_SHA1_NI=y +CONFIG_AS_SHA256_NI=y +CONFIG_AS_TPAUSE=y # # General architecture-dependent options @@ -494,12 +608,14 @@ CONFIG_HAVE_KVM=y CONFIG_CRASH_CORE=y CONFIG_KEXEC_CORE=y CONFIG_HOTPLUG_SMT=y +CONFIG_GENERIC_ENTRY=y # CONFIG_OPROFILE is not set CONFIG_HAVE_OPROFILE=y CONFIG_OPROFILE_NMI_TIMER=y CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y # CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_STATIC_CALL_SELFTEST is not set CONFIG_OPTPROBES=y CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y CONFIG_ARCH_USE_BUILTIN_BSWAP=y @@ -516,8 +632,11 @@ CONFIG_HAVE_DMA_CONTIGUOUS=y CONFIG_GENERIC_SMP_IDLE_THREAD=y CONFIG_ARCH_HAS_FORTIFY_SOURCE=y CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_ASM_MODVERSIONS=y CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y CONFIG_HAVE_RSEQ=y CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y @@ -530,32 +649,34 @@ CONFIG_HAVE_PERF_REGS=y CONFIG_HAVE_PERF_USER_STACK_DUMP=y CONFIG_HAVE_ARCH_JUMP_LABEL=y CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_HAVE_RCU_TABLE_INVALIDATE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y CONFIG_HAVE_CMPXCHG_LOCAL=y CONFIG_HAVE_CMPXCHG_DOUBLE=y CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP=y CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y CONFIG_SECCOMP_FILTER=y CONFIG_HAVE_ARCH_STACKLEAK=y CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_CC_HAS_STACKPROTECTOR_NONE=y CONFIG_STACKPROTECTOR=y CONFIG_STACKPROTECTOR_STRONG=y CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y CONFIG_HAVE_CONTEXT_TRACKING=y CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PMD=y CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y CONFIG_HAVE_ARCH_SOFT_DIRTY=y CONFIG_HAVE_MOD_ARCH_SPECIFIC=y CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y CONFIG_ARCH_HAS_ELF_RANDOMIZE=y CONFIG_HAVE_ARCH_MMAP_RND_BITS=y CONFIG_HAVE_EXIT_THREAD=y @@ -563,7 +684,6 @@ CONFIG_ARCH_MMAP_RND_BITS=28 CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y -CONFIG_HAVE_COPY_THREAD_TLS=y CONFIG_HAVE_STACK_VALIDATION=y CONFIG_HAVE_RELIABLE_STACKTRACE=y CONFIG_OLD_SIGSUSPEND3=y @@ -575,24 +695,34 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_REFCOUNT=y -# CONFIG_REFCOUNT_FULL is not set CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_ARCH_HAS_CC_PLATFORM=y +CONFIG_HAVE_STATIC_CALL=y +CONFIG_HAVE_STATIC_CALL_INLINE=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y # # GCOV-based kernel profiling # # CONFIG_GCOV_KERNEL is not set CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -CONFIG_PLUGIN_HOSTCC="" +# end of GCOV-based kernel profiling + CONFIG_HAVE_GCC_PLUGINS=y +# end of General architecture-dependent options + CONFIG_RT_MUTEXES=y CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SIG=y # CONFIG_MODULE_SIG_FORCE is not set @@ -604,9 +734,12 @@ CONFIG_MODULE_SIG_ALL=y CONFIG_MODULE_SIG_SHA512=y CONFIG_MODULE_SIG_HASH="sha512" # CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_UNUSED_SYMBOLS=y CONFIG_MODULES_TREE_LOOKUP=y CONFIG_BLOCK=y CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_CGROUP_RWSTAT=y CONFIG_BLK_DEV_BSG=y CONFIG_BLK_DEV_BSGLIB=y CONFIG_BLK_DEV_INTEGRITY=y @@ -616,10 +749,11 @@ CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_CMDLINE_PARSER=y CONFIG_BLK_WBT=y # CONFIG_BLK_CGROUP_IOLATENCY is not set -# CONFIG_BLK_WBT_SQ is not set +# CONFIG_BLK_CGROUP_IOCOST is not set CONFIG_BLK_WBT_MQ=y CONFIG_BLK_DEBUG_FS=y # CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set # # Partition Types @@ -631,7 +765,11 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_AMIGA_PARTITION is not set # CONFIG_ATARI_PARTITION is not set # CONFIG_MAC_PARTITION is not set -# CONFIG_MSDOS_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_LDM_PARTITION is not set # CONFIG_SGI_PARTITION is not set # CONFIG_ULTRIX_PARTITION is not set @@ -640,6 +778,8 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_EFI_PARTITION is not set # CONFIG_SYSV68_PARTITION is not set # CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + CONFIG_BLOCK_COMPAT=y CONFIG_BLK_MQ_VIRTIO=y CONFIG_BLK_PM=y @@ -647,16 +787,11 @@ CONFIG_BLK_PM=y # # IO Schedulers # -CONFIG_IOSCHED_NOOP=y -# CONFIG_IOSCHED_DEADLINE is not set -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_CFQ is not set -CONFIG_DEFAULT_NOOP=y -CONFIG_DEFAULT_IOSCHED="noop" # CONFIG_MQ_IOSCHED_DEADLINE is not set # CONFIG_MQ_IOSCHED_KYBER is not set # CONFIG_IOSCHED_BFQ is not set +# end of IO Schedulers + CONFIG_ASN1=y CONFIG_INLINE_SPIN_UNLOCK_IRQ=y CONFIG_INLINE_READ_UNLOCK=y @@ -671,6 +806,7 @@ CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y CONFIG_QUEUED_SPINLOCKS=y CONFIG_ARCH_USE_QUEUED_RWLOCKS=y CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y CONFIG_FREEZER=y @@ -685,6 +821,7 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=y CONFIG_COREDUMP=y +# end of Executable file formats # # Memory Management options @@ -693,12 +830,11 @@ CONFIG_SELECT_MEMORY_MODEL=y CONFIG_SPARSEMEM_MANUAL=y CONFIG_SPARSEMEM=y CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y CONFIG_SPARSEMEM_EXTREME=y CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_NUMA_KEEP_MEMINFO=y CONFIG_MEMORY_ISOLATION=y CONFIG_HAVE_BOOTMEM_INFO_NODE=y CONFIG_MEMORY_HOTPLUG=y @@ -707,9 +843,11 @@ CONFIG_MEMORY_HOTPLUG_SPARSE=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y +# CONFIG_BALLOON_COMPACTION is not set CONFIG_COMPACTION=y +CONFIG_PAGE_REPORTING=y CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y @@ -720,7 +858,6 @@ CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y CONFIG_ARCH_WANTS_THP_SWAP=y CONFIG_THP_SWAP=y -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y CONFIG_CLEANCACHE=y CONFIG_FRONTSWAP=y # CONFIG_CMA is not set @@ -732,15 +869,19 @@ CONFIG_ZPOOL=y CONFIG_GENERIC_EARLY_IOREMAP=y # CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set # CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_ARCH_HAS_ZONE_DEVICE=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y # CONFIG_ZONE_DEVICE is not set CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y CONFIG_ARCH_HAS_PKEYS=y CONFIG_PERCPU_STATS=y # CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + CONFIG_NET=y CONFIG_NET_INGRESS=y +CONFIG_SKB_EXTENSIONS=y # # Networking options @@ -748,11 +889,13 @@ CONFIG_NET_INGRESS=y CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y +CONFIG_UNIX_SCM=y # CONFIG_UNIX_DIAG is not set # CONFIG_TLS is not set CONFIG_XFRM=y CONFIG_XFRM_ALGO=y CONFIG_XFRM_USER=y +# CONFIG_XFRM_USER_COMPAT is not set # CONFIG_XFRM_INTERFACE is not set CONFIG_XFRM_SUB_POLICY=y CONFIG_XFRM_MIGRATE=y @@ -778,13 +921,12 @@ CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y CONFIG_SYN_COOKIES=y +# CONFIG_NET_IPVTI is not set # CONFIG_NET_FOU is not set # CONFIG_INET_AH is not set # CONFIG_INET_ESP is not set # CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set +CONFIG_INET_TABLE_PERTURB_ORDER=16 # CONFIG_INET_DIAG is not set CONFIG_TCP_CONG_ADVANCED=y # CONFIG_TCP_CONG_BIC is not set @@ -816,10 +958,7 @@ CONFIG_IPV6_OPTIMISTIC_DAD=y # CONFIG_INET6_IPCOMP is not set # CONFIG_IPV6_MIP6 is not set # CONFIG_IPV6_ILA is not set -# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET6_XFRM_MODE_TUNNEL is not set -# CONFIG_INET6_XFRM_MODE_BEET is not set -# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +# CONFIG_IPV6_VTI is not set # CONFIG_IPV6_SIT is not set # CONFIG_IPV6_TUNNEL is not set CONFIG_IPV6_MULTIPLE_TABLES=y @@ -830,7 +969,9 @@ CONFIG_IPV6_PIMSM_V2=y CONFIG_IPV6_SEG6_LWTUNNEL=y CONFIG_IPV6_SEG6_HMAC=y CONFIG_IPV6_SEG6_BPF=y +# CONFIG_IPV6_RPL_LWTUNNEL is not set CONFIG_NETLABEL=y +# CONFIG_MPTCP is not set CONFIG_NETWORK_SECMARK=y CONFIG_NET_PTP_CLASSIFY=y CONFIG_NETWORK_PHY_TIMESTAMPING=y @@ -874,11 +1015,8 @@ CONFIG_NF_CT_PROTO_UDPLITE=y # CONFIG_NF_CT_NETLINK is not set # CONFIG_NF_CT_NETLINK_TIMEOUT is not set CONFIG_NF_NAT=y -CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y CONFIG_NETFILTER_SYNPROXY=y # CONFIG_NF_TABLES is not set CONFIG_NETFILTER_XTABLES=y @@ -909,6 +1047,7 @@ CONFIG_NETFILTER_XT_TARGET_NETMAP=y # CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set # CONFIG_NETFILTER_XT_TARGET_RATEEST is not set CONFIG_NETFILTER_XT_TARGET_REDIRECT=y +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y # CONFIG_NETFILTER_XT_TARGET_TEE is not set # CONFIG_NETFILTER_XT_TARGET_TPROXY is not set # CONFIG_NETFILTER_XT_TARGET_SECMARK is not set @@ -963,6 +1102,8 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y # CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set # CONFIG_NETFILTER_XT_MATCH_TIME is not set # CONFIG_NETFILTER_XT_MATCH_U32 is not set +# end of Core Netfilter Configuration + # CONFIG_IP_SET is not set # CONFIG_IP_VS is not set @@ -976,8 +1117,6 @@ CONFIG_NF_DEFRAG_IPV4=y CONFIG_NF_LOG_ARP=y CONFIG_NF_LOG_IPV4=y CONFIG_NF_REJECT_IPV4=y -CONFIG_NF_NAT_IPV4=y -CONFIG_NF_NAT_MASQUERADE_IPV4=y CONFIG_IP_NF_IPTABLES=y # CONFIG_IP_NF_MATCH_AH is not set # CONFIG_IP_NF_MATCH_ECN is not set @@ -997,6 +1136,7 @@ CONFIG_IP_NF_MANGLE=y # CONFIG_IP_NF_RAW is not set # CONFIG_IP_NF_SECURITY is not set # CONFIG_IP_NF_ARPTABLES is not set +# end of IP: Netfilter Configuration # # IPv6: Netfilter Configuration @@ -1006,9 +1146,11 @@ CONFIG_IP_NF_MANGLE=y # CONFIG_NF_DUP_IPV6 is not set # CONFIG_NF_REJECT_IPV6 is not set # CONFIG_NF_LOG_IPV6 is not set -# CONFIG_NF_NAT_IPV6 is not set # CONFIG_IP6_NF_IPTABLES is not set +# end of IPv6: Netfilter Configuration + CONFIG_NF_DEFRAG_IPV6=y +# CONFIG_NF_CONNTRACK_BRIDGE is not set # CONFIG_BRIDGE_NF_EBTABLES is not set # CONFIG_BPFILTER is not set # CONFIG_IP_DCCP is not set @@ -1020,10 +1162,10 @@ CONFIG_NF_DEFRAG_IPV6=y CONFIG_STP=y CONFIG_BRIDGE=y CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_BRIDGE_MRP is not set CONFIG_HAVE_NET_DSA=y # CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set -# CONFIG_DECNET is not set CONFIG_LLC=y # CONFIG_LLC2 is not set # CONFIG_ATALK is not set @@ -1066,6 +1208,7 @@ CONFIG_NET_SCHED=y # CONFIG_NET_SCH_PIE is not set # CONFIG_NET_SCH_INGRESS is not set # CONFIG_NET_SCH_PLUG is not set +# CONFIG_NET_SCH_ETS is not set # CONFIG_NET_SCH_DEFAULT is not set # @@ -1073,12 +1216,9 @@ CONFIG_NET_SCHED=y # CONFIG_NET_CLS=y # CONFIG_NET_CLS_BASIC is not set -# CONFIG_NET_CLS_TCINDEX is not set # CONFIG_NET_CLS_ROUTE4 is not set # CONFIG_NET_CLS_FW is not set # CONFIG_NET_CLS_U32 is not set -# CONFIG_NET_CLS_RSVP is not set -# CONFIG_NET_CLS_RSVP6 is not set # CONFIG_NET_CLS_FLOW is not set # CONFIG_NET_CLS_CGROUP is not set # CONFIG_NET_CLS_BPF is not set @@ -1103,12 +1243,16 @@ CONFIG_NET_CLS_ACT=y # CONFIG_NET_ACT_SIMP is not set # CONFIG_NET_ACT_SKBEDIT is not set # CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_ACT_MPLS is not set # CONFIG_NET_ACT_VLAN is not set # CONFIG_NET_ACT_BPF is not set # CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set # CONFIG_NET_ACT_SKBMOD is not set # CONFIG_NET_ACT_IFE is not set # CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_GATE is not set +# CONFIG_NET_TC_SKB_EXT is not set CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y # CONFIG_DNS_RESOLVER is not set @@ -1116,6 +1260,7 @@ CONFIG_DCB=y # CONFIG_OPENVSWITCH is not set CONFIG_VSOCKETS=y CONFIG_VSOCKETS_DIAG=y +CONFIG_VSOCKETS_LOOPBACK=y CONFIG_VIRTIO_VSOCKETS=y CONFIG_VIRTIO_VSOCKETS_COMMON=y # CONFIG_NETLINK_DIAG is not set @@ -1126,6 +1271,7 @@ CONFIG_MPLS=y # CONFIG_HSR is not set # CONFIG_NET_SWITCHDEV is not set # CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_QRTR is not set # CONFIG_NET_NCSI is not set CONFIG_RPS=y CONFIG_RFS_ACCEL=y @@ -1142,6 +1288,9 @@ CONFIG_NET_FLOW_LIMIT=y # Network testing # # CONFIG_NET_PKTGEN is not set +# end of Network testing +# end of Networking options + # CONFIG_HAMRADIO is not set # CONFIG_CAN is not set # CONFIG_BT is not set @@ -1163,14 +1312,18 @@ CONFIG_LWTUNNEL_BPF=y CONFIG_DST_CACHE=y CONFIG_GRO_CELLS=y CONFIG_NET_SOCK_MSG=y -# CONFIG_NET_DEVLINK is not set -CONFIG_MAY_USE_DEVLINK=y CONFIG_FAILOVER=y +CONFIG_ETHTOOL_NETLINK=y CONFIG_HAVE_EBPF_JIT=y # # Device Drivers # +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +# CONFIG_PCI is not set +# CONFIG_PCCARD is not set # # Generic Driver Options @@ -1188,16 +1341,27 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=y CONFIG_EXTRA_FIRMWARE="" # CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# end of Firmware loader + CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set # CONFIG_TEST_ASYNC_DRIVER_PROBE is not set CONFIG_GENERIC_CPU_AUTOPROBE=y CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options # # Bus devices # +# CONFIG_MHI_BUS is not set +# end of Bus devices + CONFIG_CONNECTOR=y CONFIG_PROC_EVENTS=y # CONFIG_GNSS is not set @@ -1205,6 +1369,13 @@ CONFIG_PROC_EVENTS=y # CONFIG_OF is not set CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y # CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set # CONFIG_BLK_DEV_FD is not set @@ -1217,13 +1388,14 @@ CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set CONFIG_VIRTIO_BLK=y -# CONFIG_VIRTIO_BLK_SCSI is not set # CONFIG_BLK_DEV_RBD is not set # # NVME Support # # CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set +# end of NVME Support # # Misc devices @@ -1231,57 +1403,27 @@ CONFIG_VIRTIO_BLK=y # CONFIG_DUMMY_IRQ is not set # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_SRAM is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set # CONFIG_C2PORT is not set # # EEPROM support # # CONFIG_EEPROM_93CX6 is not set +# end of EEPROM support # # Texas Instruments shared transport line discipline # +# end of Texas Instruments shared transport line discipline # # Altera FPGA firmware download module (requires I2C) # - -# -# Intel MIC & related support -# - -# -# Intel MIC Bus Driver -# - -# -# SCIF Bus Driver -# - -# -# VOP Bus Driver -# - -# -# Intel MIC Host Driver -# - -# -# Intel MIC Card Driver -# - -# -# SCIF Driver -# - -# -# Intel MIC Coprocessor State Management (COSM) Drivers -# - -# -# VOP Driver -# # CONFIG_ECHO is not set +# end of Misc devices + CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -1292,7 +1434,6 @@ CONFIG_SCSI_MOD=y # CONFIG_RAID_ATTRS is not set CONFIG_SCSI=y CONFIG_SCSI_DMA=y -CONFIG_SCSI_MQ_DEFAULT=y CONFIG_SCSI_PROC_FS=y # @@ -1300,7 +1441,6 @@ CONFIG_SCSI_PROC_FS=y # # CONFIG_BLK_DEV_SD is not set # CONFIG_CHR_DEV_ST is not set -# CONFIG_CHR_DEV_OSST is not set # CONFIG_BLK_DEV_SR is not set # CONFIG_CHR_DEV_SG is not set # CONFIG_CHR_DEV_SCH is not set @@ -1317,6 +1457,8 @@ CONFIG_SCSI_ISCSI_ATTRS=y # CONFIG_SCSI_SAS_ATTRS is not set # CONFIG_SCSI_SAS_LIBSAS is not set # CONFIG_SCSI_SRP_ATTRS is not set +# end of SCSI Transports + CONFIG_SCSI_LOWLEVEL=y CONFIG_ISCSI_TCP=y # CONFIG_ISCSI_BOOT_SYSFS is not set @@ -1324,7 +1466,8 @@ CONFIG_ISCSI_TCP=y # CONFIG_SCSI_DEBUG is not set # CONFIG_SCSI_VIRTIO is not set # CONFIG_SCSI_DH is not set -# CONFIG_SCSI_OSD_INITIATOR is not set +# end of SCSI device support + # CONFIG_ATA is not set # CONFIG_MD is not set # CONFIG_TARGET_CORE is not set @@ -1333,12 +1476,15 @@ CONFIG_NETDEVICES=y CONFIG_NET_CORE=y # CONFIG_BONDING is not set # CONFIG_DUMMY is not set +# CONFIG_WIREGUARD is not set # CONFIG_EQUALIZER is not set -# CONFIG_IFB is not set # CONFIG_NET_TEAM is not set # CONFIG_MACVLAN is not set # CONFIG_IPVLAN is not set # CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set # CONFIG_MACSEC is not set # CONFIG_NETCONSOLE is not set CONFIG_TUN=y @@ -1347,16 +1493,21 @@ CONFIG_VETH=y CONFIG_VIRTIO_NET=y # CONFIG_NLMON is not set -# -# CAIF transport drivers -# - # # Distributed Switch Architecture drivers # +# end of Distributed Switch Architecture drivers + # CONFIG_ETHERNET is not set -# CONFIG_MDIO_DEVICE is not set +# CONFIG_NET_SB1000 is not set # CONFIG_PHYLIB is not set +# CONFIG_MDIO_DEVICE is not set + +# +# PCS device drivers +# +# end of PCS device drivers + # CONFIG_PPP is not set # CONFIG_SLIP is not set @@ -1369,10 +1520,10 @@ CONFIG_VIRTIO_NET=y # Enable WiMAX (Networking options) to see the WiMAX drivers # # CONFIG_WAN is not set +# CONFIG_FUJITSU_ES is not set # CONFIG_NETDEVSIM is not set CONFIG_NET_FAILOVER=y # CONFIG_ISDN is not set -# CONFIG_NVM is not set # # Input device support @@ -1408,6 +1559,8 @@ CONFIG_INPUT=y # CONFIG_SERIO is not set CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y # CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support # # Character devices @@ -1421,11 +1574,7 @@ CONFIG_HW_CONSOLE=y CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_UNIX98_PTYS=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_SERIAL_NONSTANDARD is not set -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -CONFIG_DEVMEM=y -# CONFIG_DEVKMEM is not set +CONFIG_LDISC_AUTOLOAD=y # # Serial drivers @@ -1433,6 +1582,8 @@ CONFIG_DEVMEM=y CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set # CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DMA=y @@ -1448,30 +1599,45 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=1 # CONFIG_SERIAL_UARTLITE is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_LANTIQ is not set # CONFIG_SERIAL_SCCNXP is not set # CONFIG_SERIAL_ALTERA_JTAGUART is not set # CONFIG_SERIAL_ALTERA_UART is not set # CONFIG_SERIAL_ARC is not set # CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# end of Serial drivers + +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set +# CONFIG_NULL_TTY is not set +# CONFIG_TRACE_SINK is not set +CONFIG_HVC_DRIVER=y CONFIG_SERIAL_DEV_BUS=y CONFIG_SERIAL_DEV_CTRL_TTYPORT=y -CONFIG_HVC_DRIVER=y CONFIG_VIRTIO_CONSOLE=y # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set -# CONFIG_NVRAM is not set -# CONFIG_R3964 is not set # CONFIG_MWAVE is not set +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set +# CONFIG_NVRAM is not set # CONFIG_RAW_DRIVER is not set +# CONFIG_HPET is not set # CONFIG_HANGCHECK_TIMER is not set # CONFIG_TCG_TPM is not set # CONFIG_TELCLOCK is not set # CONFIG_RANDOM_TRUST_CPU is not set +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set +# end of Character devices # # I2C support # # CONFIG_I2C is not set +# end of I2C support + +# CONFIG_I3C is not set # CONFIG_SPI is not set # CONFIG_SPMI is not set # CONFIG_HSI is not set @@ -1498,10 +1664,12 @@ CONFIG_PTP_1588_CLOCK=y # Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. # CONFIG_PTP_1588_CLOCK_KVM=y +# CONFIG_PTP_1588_CLOCK_VMW is not set +# end of PTP clock support + # CONFIG_PINCTRL is not set # CONFIG_GPIOLIB is not set # CONFIG_W1 is not set -# CONFIG_POWER_AVS is not set CONFIG_POWER_RESET=y # CONFIG_POWER_RESET_RESTART is not set CONFIG_POWER_SUPPLY=y @@ -1514,24 +1682,30 @@ CONFIG_POWER_SUPPLY=y # CONFIG_CHARGER_MAX8903 is not set # CONFIG_HWMON is not set CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set # CONFIG_THERMAL_STATISTICS is not set CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 CONFIG_THERMAL_WRITABLE_TRIPS=y CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y # CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set # CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set CONFIG_THERMAL_GOV_FAIR_SHARE=y CONFIG_THERMAL_GOV_STEP_WISE=y # CONFIG_THERMAL_GOV_BANG_BANG is not set CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set # CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# # CONFIG_INTEL_POWERCLAMP is not set # # ACPI INT340X thermal drivers # +# end of ACPI INT340X thermal drivers +# end of Intel thermal drivers + # CONFIG_WATCHDOG is not set CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set @@ -1541,51 +1715,58 @@ CONFIG_BCMA_POSSIBLE=y # # Multifunction device drivers # -# CONFIG_MFD_AT91_USART is not set -# CONFIG_MFD_CROS_EC is not set # CONFIG_MFD_MADERA is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_INTEL_LPSS_ACPI is not set +# CONFIG_MFD_INTEL_PMC_BXT is not set # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_MT6397 is not set # CONFIG_MFD_SM501 is not set # CONFIG_ABX500_CORE is not set # CONFIG_MFD_SYSCON is not set # CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_TQMX86 is not set # CONFIG_RAVE_SP_CORE is not set +# end of Multifunction device drivers + # CONFIG_REGULATOR is not set # CONFIG_RC_CORE is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set # CONFIG_MEDIA_SUPPORT is not set # # Graphics support # # CONFIG_DRM is not set -# CONFIG_DRM_DP_CEC is not set # -# ACP (Audio CoProcessor) Configuration +# ARM devices # +# end of ARM devices # -# AMD Library routines +# Frame buffer Devices # +# CONFIG_FB is not set +# end of Frame buffer Devices # -# Frame buffer Devices +# Backlight & LCD device support # -# CONFIG_FB is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_LCD_CLASS_DEVICE is not set # CONFIG_BACKLIGHT_CLASS_DEVICE is not set +# end of Backlight & LCD device support # # Console display driver support # CONFIG_VGA_CONSOLE=y -# CONFIG_VGACON_SOFT_SCROLLBACK is not set CONFIG_DUMMY_CONSOLE=y CONFIG_DUMMY_CONSOLE_COLUMNS=80 CONFIG_DUMMY_CONSOLE_ROWS=25 +# end of Console display driver support +# end of Graphics support + # CONFIG_SOUND is not set # @@ -1606,8 +1787,8 @@ CONFIG_HIDRAW=y # CONFIG_HID_AUREAL is not set # CONFIG_HID_BELKIN is not set # CONFIG_HID_CHERRY is not set -# CONFIG_HID_CHICONY is not set # CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set # CONFIG_HID_CMEDIA is not set # CONFIG_HID_CYPRESS is not set # CONFIG_HID_DRAGONRISE is not set @@ -1616,9 +1797,12 @@ CONFIG_HIDRAW=y # CONFIG_HID_EZKEY is not set # CONFIG_HID_GEMBIRD is not set # CONFIG_HID_GFRM is not set +# CONFIG_HID_GLORIOUS is not set +# CONFIG_HID_VIVALDI is not set # CONFIG_HID_KEYTOUCH is not set # CONFIG_HID_KYE is not set # CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set # CONFIG_HID_GYRATION is not set # CONFIG_HID_ICADE is not set # CONFIG_HID_ITE is not set @@ -1627,8 +1811,8 @@ CONFIG_HIDRAW=y # CONFIG_HID_KENSINGTON is not set # CONFIG_HID_LCPOWER is not set # CONFIG_HID_LENOVO is not set -# CONFIG_HID_LOGITECH is not set # CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set # CONFIG_HID_MAYFLASH is not set CONFIG_HID_REDRAGON=y # CONFIG_HID_MICROSOFT is not set @@ -1642,7 +1826,6 @@ CONFIG_HID_REDRAGON=y # CONFIG_HID_PLANTRONICS is not set # CONFIG_HID_PRIMAX is not set # CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set # CONFIG_HID_SPEEDLINK is not set # CONFIG_HID_STEAM is not set # CONFIG_HID_STEELSERIES is not set @@ -1659,9 +1842,11 @@ CONFIG_HID_REDRAGON=y # CONFIG_HID_ZYDACRON is not set # CONFIG_HID_SENSOR_HUB is not set # CONFIG_HID_ALPS is not set +# end of Special HID drivers +# end of HID support + CONFIG_USB_OHCI_LITTLE_ENDIAN=y # CONFIG_USB_SUPPORT is not set -# CONFIG_UWB is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set @@ -1679,11 +1864,13 @@ CONFIG_DMADEVICES=y # # DMA Devices # +CONFIG_DMA_ACPI=y # CONFIG_ALTERA_MSGDMA is not set # CONFIG_INTEL_IDMA64 is not set # CONFIG_QCOM_HIDMA_MGMT is not set # CONFIG_QCOM_HIDMA is not set # CONFIG_DW_DMAC is not set +# CONFIG_SF_PDMA is not set # # DMABUF options @@ -1691,51 +1878,89 @@ CONFIG_DMADEVICES=y CONFIG_SYNC_FILE=y # CONFIG_SW_SYNC is not set # CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# end of DMABUF options + CONFIG_AUXDISPLAY=y # CONFIG_IMG_ASCII_LCD is not set +# CONFIG_CHARLCD_BL_OFF is not set +# CONFIG_CHARLCD_BL_ON is not set +CONFIG_CHARLCD_BL_FLASH=y # CONFIG_UIO is not set +# CONFIG_VFIO is not set CONFIG_VIRT_DRIVERS=y CONFIG_VIRTIO=y CONFIG_VIRTIO_MENU=y CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MEM=m # CONFIG_VIRTIO_INPUT is not set CONFIG_VIRTIO_MMIO=y CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +# CONFIG_VDPA is not set +CONFIG_VHOST_MENU=y +# CONFIG_VHOST_NET is not set +# CONFIG_VHOST_VSOCK is not set +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set # # Microsoft Hyper-V guest support # +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set CONFIG_STAGING=y # CONFIG_COMEDI is not set - -# -# Speakup console speech -# -# CONFIG_SPEAKUP is not set # CONFIG_STAGING_MEDIA is not set # # Android # +# end of Android + # CONFIG_GS_FPGABOOT is not set # CONFIG_UNISYSSPAR is not set -# CONFIG_MOST is not set -# CONFIG_GREYBUS is not set # # Gasket devices # -# CONFIG_XIL_AXIS_FIFO is not set -# CONFIG_EROFS_FS is not set +# end of Gasket devices + +# CONFIG_FIELDBUS_DEV is not set CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACPI_WMI is not set +# CONFIG_ACERHDF is not set +# CONFIG_ACER_WIRELESS is not set +# CONFIG_ASUS_WIRELESS is not set # CONFIG_DCDBAS is not set # CONFIG_DELL_SMBIOS is not set # CONFIG_DELL_RBU is not set +# CONFIG_DELL_SMO8800 is not set +# CONFIG_FUJITSU_TABLET is not set +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_HP_WIRELESS is not set # CONFIG_SENSORS_HDAPS is not set -# CONFIG_INTEL_PUNIT_IPC is not set +# CONFIG_INTEL_HID_EVENT is not set +# CONFIG_INTEL_MENLOW is not set +# CONFIG_INTEL_VBTN is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_SAMSUNG_Q10 is not set +# CONFIG_TOSHIBA_BT_RFKILL is not set +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_ACPI_CMPC is not set +# CONFIG_SYSTEM76_ACPI is not set +# CONFIG_TOPSTAR_LAPTOP is not set +# CONFIG_INTEL_RST is not set +# CONFIG_INTEL_SMARTCONNECT is not set CONFIG_INTEL_TURBO_MAX_3=y +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set +# CONFIG_INTEL_PUNIT_IPC is not set +# CONFIG_INTEL_SCU_PLATFORM is not set # CONFIG_CHROME_PLATFORMS is not set # CONFIG_MELLANOX_PLATFORM is not set +# CONFIG_COMMON_CLK is not set # CONFIG_HWSPINLOCK is not set # @@ -1744,25 +1969,33 @@ CONFIG_INTEL_TURBO_MAX_3=y CONFIG_CLKEVT_I8253=y CONFIG_I8253_LOCK=y CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + CONFIG_MAILBOX=y +CONFIG_PCC=y # CONFIG_ALTERA_MBOX is not set CONFIG_IOMMU_SUPPORT=y # # Generic IOMMU Pagetable Support # +# end of Generic IOMMU Pagetable Support + # CONFIG_IOMMU_DEBUGFS is not set # # Remoteproc drivers # # CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers # # Rpmsg drivers # # CONFIG_RPMSG_QCOM_GLINK_RPM is not set # CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + # CONFIG_SOUNDWIRE is not set # @@ -1772,28 +2005,42 @@ CONFIG_IOMMU_SUPPORT=y # # Amlogic SoC drivers # +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers # # Broadcom SoC drivers # +# end of Broadcom SoC drivers # # NXP/Freescale QorIQ SoC drivers # +# end of NXP/Freescale QorIQ SoC drivers # # i.MX SoC drivers # +# end of i.MX SoC drivers # # Qualcomm SoC drivers # +# end of Qualcomm SoC drivers + # CONFIG_SOC_TI is not set # # Xilinx SoC drivers # # CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + # CONFIG_PM_DEVFREQ is not set # CONFIG_EXTCON is not set # CONFIG_MEMORY is not set @@ -1803,10 +2050,10 @@ CONFIG_IOMMU_SUPPORT=y # # IRQ chip support # -CONFIG_ARM_GIC_MAX_NR=1 +# end of IRQ chip support + # CONFIG_IPACK_BUS is not set # CONFIG_RESET_CONTROLLER is not set -# CONFIG_FMC is not set # # PHY Subsystem @@ -1815,18 +2062,25 @@ CONFIG_ARM_GIC_MAX_NR=1 # CONFIG_BCM_KONA_USB2_PHY is not set # CONFIG_PHY_PXA_28NM_HSIC is not set # CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_INTEL_LGM_EMMC is not set +# end of PHY Subsystem + # CONFIG_POWERCAP is not set # CONFIG_MCB is not set # # Performance monitor support # +# end of Performance monitor support + CONFIG_RAS=y # # Android # # CONFIG_ANDROID is not set +# end of Android + # CONFIG_LIBNVDIMM is not set # CONFIG_DAX is not set # CONFIG_NVMEM is not set @@ -1836,14 +2090,22 @@ CONFIG_RAS=y # # CONFIG_STM is not set # CONFIG_INTEL_TH is not set +# end of HW tracing support + # CONFIG_FPGA is not set +# CONFIG_TEE is not set +# CONFIG_UNISYS_VISORBUS is not set # CONFIG_SIOX is not set # CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers # # File systems # CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set CONFIG_FS_IOMAP=y # CONFIG_EXT2_FS is not set # CONFIG_EXT3_FS is not set @@ -1851,8 +2113,6 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_DEBUG=y CONFIG_JBD2=y CONFIG_JBD2_DEBUG=y @@ -1861,7 +2121,13 @@ CONFIG_FS_MBCACHE=y # CONFIG_JFS_FS is not set # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set -# CONFIG_BTRFS_FS is not set +CONFIG_BTRFS_FS=y +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_FS_DAX is not set @@ -1871,6 +2137,8 @@ CONFIG_EXPORTFS=y CONFIG_FILE_LOCKING=y CONFIG_MANDATORY_FILE_LOCKING=y CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_ALGS=y +# CONFIG_FS_VERITY is not set CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y CONFIG_INOTIFY_USER=y @@ -1883,7 +2151,6 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_QFMT_V1 is not set # CONFIG_QFMT_V2 is not set CONFIG_QUOTACTL=y -CONFIG_QUOTACTL_COMPAT=y # CONFIG_AUTOFS4_FS is not set # CONFIG_AUTOFS_FS is not set # CONFIG_FUSE_FS is not set @@ -1898,19 +2165,25 @@ CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y # Caches # # CONFIG_FSCACHE is not set +# end of Caches # # CD-ROM/DVD Filesystems # -# CONFIG_ISO9660_FS is not set +CONFIG_ISO9660_FS=y +# CONFIG_JOLIET is not set +# CONFIG_ZISOFS is not set # CONFIG_UDF_FS is not set +# end of CD-ROM/DVD Filesystems # -# DOS/FAT/NT Filesystems +# DOS/FAT/EXFAT/NT Filesystems # # CONFIG_MSDOS_FS is not set # CONFIG_VFAT_FS is not set +# CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems # # Pseudo filesystems @@ -1920,16 +2193,20 @@ CONFIG_PROC_KCORE=y CONFIG_PROC_SYSCTL=y CONFIG_PROC_PAGE_MONITOR=y CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y CONFIG_KERNFS=y CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_MEMFD_CREATE=y CONFIG_ARCH_HAS_GIGANTIC_PAGE=y # CONFIG_CONFIGFS_FS is not set +# end of Pseudo filesystems + CONFIG_MISC_FILESYSTEMS=y # CONFIG_ORANGEFS_FS is not set # CONFIG_ADFS_FS is not set @@ -1978,6 +2255,7 @@ CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" # CONFIG_PSTORE_RAM is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set +# CONFIG_EROFS_FS is not set CONFIG_NETWORK_FILESYSTEMS=y # CONFIG_NFS_FS is not set # CONFIG_NFSD is not set @@ -2036,14 +2314,16 @@ CONFIG_NLS_DEFAULT="utf8" # CONFIG_NLS_MAC_ROMANIAN is not set # CONFIG_NLS_MAC_TURKISH is not set # CONFIG_NLS_UTF8 is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems # # Security options # CONFIG_KEYS=y -CONFIG_KEYS_COMPAT=y +# CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y -# CONFIG_BIG_KEYS is not set CONFIG_ENCRYPTED_KEYS=y # CONFIG_KEY_DH_OPERATIONS is not set # CONFIG_SECURITY_DMESG_RESTRICT is not set @@ -2051,7 +2331,6 @@ CONFIG_SECURITY=y CONFIG_SECURITY_WRITABLE_HOOKS=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y -CONFIG_PAGE_TABLE_ISOLATION=y CONFIG_SECURITY_NETWORK_XFRM=y # CONFIG_SECURITY_PATH is not set CONFIG_LSM_MMAP_MIN_ADDR=65536 @@ -2061,16 +2340,19 @@ CONFIG_FORTIFY_SOURCE=y # CONFIG_STATIC_USERMODEHELPER is not set CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_SELINUX_DEVELOP=y CONFIG_SECURITY_SELINUX_AVC_STATS=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 # CONFIG_SECURITY_SMACK is not set # CONFIG_SECURITY_TOMOYO is not set # CONFIG_SECURITY_APPARMOR is not set # CONFIG_SECURITY_LOADPIN is not set # CONFIG_SECURITY_YAMA is not set +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set CONFIG_INTEGRITY=y # CONFIG_INTEGRITY_SIGNATURE is not set CONFIG_INTEGRITY_AUDIT=y @@ -2078,7 +2360,28 @@ CONFIG_INTEGRITY_AUDIT=y # CONFIG_EVM is not set CONFIG_DEFAULT_SECURITY_SELINUX=y # CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y CONFIG_CRYPTO=y # @@ -2088,8 +2391,8 @@ CONFIG_CRYPTO_ALGAPI=y CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y CONFIG_CRYPTO_HASH=y CONFIG_CRYPTO_HASH2=y CONFIG_CRYPTO_RNG=y @@ -2100,22 +2403,29 @@ CONFIG_CRYPTO_AKCIPHER=y CONFIG_CRYPTO_KPP2=y CONFIG_CRYPTO_KPP=y CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=y -CONFIG_CRYPTO_ECDH=y CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_USER is not set CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -# CONFIG_CRYPTO_GF128MUL is not set CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_NULL2=y # CONFIG_CRYPTO_PCRYPT is not set -CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_CRYPTD is not set # CONFIG_CRYPTO_AUTHENC is not set # CONFIG_CRYPTO_TEST is not set +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +CONFIG_CRYPTO_ECC=y +CONFIG_CRYPTO_ECDH=y +# CONFIG_CRYPTO_ECRDSA is not set +# CONFIG_CRYPTO_SM2 is not set +# CONFIG_CRYPTO_CURVE25519 is not set +# CONFIG_CRYPTO_CURVE25519_X86 is not set + # # Authenticated Encryption with Associated Data # @@ -2123,16 +2433,7 @@ CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_GCM is not set # CONFIG_CRYPTO_CHACHA20POLY1305 is not set # CONFIG_CRYPTO_AEGIS128 is not set -# CONFIG_CRYPTO_AEGIS128L is not set -# CONFIG_CRYPTO_AEGIS256 is not set # CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set -# CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2 is not set -# CONFIG_CRYPTO_AEGIS256_AESNI_SSE2 is not set -# CONFIG_CRYPTO_MORUS640 is not set -# CONFIG_CRYPTO_MORUS640_SSE2 is not set -# CONFIG_CRYPTO_MORUS1280 is not set -# CONFIG_CRYPTO_MORUS1280_SSE2 is not set -# CONFIG_CRYPTO_MORUS1280_AVX2 is not set CONFIG_CRYPTO_SEQIV=y # CONFIG_CRYPTO_ECHAINIV is not set @@ -2149,6 +2450,10 @@ CONFIG_CRYPTO_ECB=y # CONFIG_CRYPTO_PCBC is not set CONFIG_CRYPTO_XTS=y # CONFIG_CRYPTO_KEYWRAP is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +# CONFIG_CRYPTO_ADIANTUM is not set +# CONFIG_CRYPTO_ESSIV is not set # # Hash modes @@ -2165,6 +2470,10 @@ CONFIG_CRYPTO_CRC32C=y # CONFIG_CRYPTO_CRC32C_INTEL is not set # CONFIG_CRYPTO_CRC32 is not set # CONFIG_CRYPTO_CRC32_PCLMUL is not set +CONFIG_CRYPTO_XXHASH=y +CONFIG_CRYPTO_BLAKE2B=y +# CONFIG_CRYPTO_BLAKE2S is not set +# CONFIG_CRYPTO_BLAKE2S_X86 is not set CONFIG_CRYPTO_CRCT10DIF=y CONFIG_CRYPTO_CRCT10DIF_PCLMUL=y # CONFIG_CRYPTO_GHASH is not set @@ -2185,6 +2494,7 @@ CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y # CONFIG_CRYPTO_SHA3 is not set # CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set # CONFIG_CRYPTO_TGR192 is not set # CONFIG_CRYPTO_WP512 is not set # CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set @@ -2194,10 +2504,7 @@ CONFIG_CRYPTO_SHA512=y # CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_AES_TI=y -# CONFIG_CRYPTO_AES_X86_64 is not set # CONFIG_CRYPTO_AES_NI_INTEL is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_ARC4 is not set # CONFIG_CRYPTO_BLOWFISH is not set # CONFIG_CRYPTO_BLOWFISH_X86_64 is not set # CONFIG_CRYPTO_CAMELLIA is not set @@ -2211,17 +2518,14 @@ CONFIG_CRYPTO_AES_TI=y # CONFIG_CRYPTO_DES is not set # CONFIG_CRYPTO_DES3_EDE_X86_64 is not set # CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_KHAZAD is not set # CONFIG_CRYPTO_SALSA20 is not set # CONFIG_CRYPTO_CHACHA20 is not set # CONFIG_CRYPTO_CHACHA20_X86_64 is not set -# CONFIG_CRYPTO_SEED is not set # CONFIG_CRYPTO_SERPENT is not set # CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set # CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set # CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set # CONFIG_CRYPTO_SM4 is not set -# CONFIG_CRYPTO_TEA is not set # CONFIG_CRYPTO_TWOFISH is not set # CONFIG_CRYPTO_TWOFISH_X86_64 is not set # CONFIG_CRYPTO_TWOFISH_X86_64_3WAY is not set @@ -2271,19 +2575,42 @@ CONFIG_SYSTEM_TRUSTED_KEYS="" # CONFIG_SECONDARY_TRUSTED_KEYRING is not set CONFIG_SYSTEM_BLACKLIST_KEYRING=y CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# end of Certificates for signature checking # # Library routines # +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set CONFIG_BITREVERSE=y CONFIG_GENERIC_STRNCPY_FROM_USER=y CONFIG_GENERIC_STRNLEN_USER=y CONFIG_GENERIC_NET_UTILS=y CONFIG_GENERIC_FIND_FIRST_BIT=y +# CONFIG_CORDIC is not set +# CONFIG_PRIME_NUMBERS is not set CONFIG_GENERIC_PCI_IOMAP=y CONFIG_GENERIC_IOMAP=y CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +# CONFIG_CRYPTO_LIB_CHACHA is not set +# CONFIG_CRYPTO_LIB_CURVE25519 is not set +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +# CONFIG_CRYPTO_LIB_POLY1305 is not set +# CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_LIB_MEMNEQ=y CONFIG_CRC_CCITT=y CONFIG_CRC16=y CONFIG_CRC_T10DIF=y @@ -2306,6 +2633,7 @@ CONFIG_ZLIB_DEFLATE=y CONFIG_LZO_COMPRESS=y CONFIG_LZO_DECOMPRESS=y CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y CONFIG_ZSTD_DECOMPRESS=y CONFIG_XZ_DEC=y CONFIG_XZ_DEC_X86=y @@ -2322,6 +2650,8 @@ CONFIG_DECOMPRESS_LZMA=y CONFIG_DECOMPRESS_XZ=y CONFIG_DECOMPRESS_LZO=y CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y CONFIG_XARRAY_MULTI=y CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_HAS_IOMEM=y @@ -2330,24 +2660,29 @@ CONFIG_HAS_DMA=y CONFIG_NEED_SG_DMA_LENGTH=y CONFIG_NEED_DMA_MAP_STATE=y CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DMA_DIRECT_OPS=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y CONFIG_SWIOTLB=y +CONFIG_DMA_COHERENT_POOL=y +# CONFIG_DMA_API_DEBUG is not set CONFIG_SGL_ALLOC=y CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_NLATTR=y CONFIG_CLZ_TAB=y -# CONFIG_CORDIC is not set -# CONFIG_DDR is not set CONFIG_IRQ_POLL=y CONFIG_MPILIB=y CONFIG_OID_REGISTRY=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_SG_CHAIN=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y +CONFIG_ARCH_STACKWALK=y CONFIG_SBITMAP=y # CONFIG_STRING_SELFTEST is not set +# end of Library routines # # Kernel hacking @@ -2357,95 +2692,234 @@ CONFIG_SBITMAP=y # printk and dmesg options # CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 CONFIG_CONSOLE_LOGLEVEL_QUIET=4 CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options # # Compile-time checks and compiler options # +# CONFIG_DEBUG_INFO is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FRAME_WARN=2048 CONFIG_STRIP_ASM_SYMS=y -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_SECTION_MISMATCH_WARN_ONLY=y CONFIG_FRAME_POINTER=y CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 CONFIG_MAGIC_SYSRQ_SERIAL=y -# CONFIG_DEBUG_KERNEL is not set +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +CONFIG_HAVE_KCSAN_COMPILER=y +# CONFIG_KCSAN is not set +# end of Generic Kernel Debugging Instruments + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y # # Memory Debugging # # CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set # CONFIG_PAGE_POISONING is not set # CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +# CONFIG_DEBUG_OBJECTS is not set # CONFIG_SLUB_DEBUG_ON is not set # CONFIG_SLUB_STATS is not set CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y # CONFIG_KASAN is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set # -# Debug Lockups and Hangs +# Debug Oops, Lockups and Hangs # -CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y # CONFIG_PANIC_ON_OOPS is not set CONFIG_PANIC_ON_OOPS_VALUE=0 CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +# CONFIG_SCHED_DEBUG is not set CONFIG_SCHED_INFO=y +# CONFIG_SCHEDSTATS is not set +# end of Scheduler Debugging + # CONFIG_DEBUG_TIMEKEEPING is not set # # Lock Debugging (spinlocks, mutexes, etc...) # CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set # CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + CONFIG_STACKTRACE=y # CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set # # RCU Debugging # +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=59 -CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set CONFIG_USER_STACKTRACE_SUPPORT=y CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y CONFIG_HAVE_SYSCALL_TRACEPOINTS=y CONFIG_HAVE_FENTRY=y CONFIG_HAVE_C_RECORDMCOUNT=y CONFIG_TRACING_SUPPORT=y # CONFIG_FTRACE is not set -# CONFIG_DMA_API_DEBUG is not set +# CONFIG_SAMPLES is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# x86 Debugging +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +# CONFIG_X86_DECODER_SELFTEST is not set +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +# CONFIG_DEBUG_BOOT_PARAMS is not set +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_UNWINDER_ORC is not set +CONFIG_UNWINDER_FRAME_POINTER=y +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set CONFIG_RUNTIME_TESTING_MENU=y # CONFIG_LKDTM is not set # CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_MIN_HEAP is not set # CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set # CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set # CONFIG_TEST_KSTRTOX is not set # CONFIG_TEST_PRINTF is not set # CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_BITFIELD is not set # CONFIG_TEST_UUID is not set # CONFIG_TEST_XARRAY is not set # CONFIG_TEST_OVERFLOW is not set @@ -2453,8 +2927,11 @@ CONFIG_RUNTIME_TESTING_MENU=y # CONFIG_TEST_HASH is not set # CONFIG_TEST_IDA is not set # CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set # CONFIG_TEST_USER_COPY is not set # CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set # CONFIG_FIND_BIT_BENCHMARK is not set # CONFIG_TEST_FIRMWARE is not set # CONFIG_TEST_SYSCTL is not set @@ -2462,30 +2939,10 @@ CONFIG_RUNTIME_TESTING_MENU=y # CONFIG_TEST_STATIC_KEYS is not set # CONFIG_TEST_KMOD is not set # CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_TEST_FPU is not set # CONFIG_MEMTEST is not set -CONFIG_BUG_ON_DATA_CORRUPTION=y -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -# CONFIG_DEBUG_WX is not set -CONFIG_DOUBLEFAULT=y -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -CONFIG_OPTIMIZE_INLINING=y -# CONFIG_UNWINDER_ORC is not set -CONFIG_UNWINDER_FRAME_POINTER=y +# end of Kernel Testing and Coverage +# end of Kernel hacking diff --git a/kernels/microvm-kernel-x86_64-5.10.config b/kernels/microvm-kernel-x86_64-5.10.config new file mode 100644 index 000000000..14606258f --- /dev/null +++ b/kernels/microvm-kernel-x86_64-5.10.config @@ -0,0 +1,2932 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 5.10.124 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.1.0" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=120100 +CONFIG_LD_VERSION=238000000 +CONFIG_CLANG_VERSION=0 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_BUILD_BIN2C=y +# CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=21 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +# CONFIG_BPF_PRELOAD is not set +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +CONFIG_SLAB_FREELIST_HARDENED=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_RETPOLINE=y +# CONFIG_X86_CPU_RESCTRL is not set +# CONFIG_X86_EXTENDED_PLATFORM is not set +# CONFIG_X86_AMD_PLATFORM_DEVICE is not set +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_X86_HV_CALLBACK_VECTOR=y +# CONFIG_XEN is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +# CONFIG_PVH is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_ACRN_GUEST is not set +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_DMI=y +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=512 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=128 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +# CONFIG_X86_MCE is not set + +# +# Performance monitoring +# +# CONFIG_PERF_EVENTS_AMD_POWER is not set +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_X86_IOPL_IOPERM=y +# CONFIG_MICROCODE is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +# CONFIG_X86_CPA_STATISTICS is not set +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=10 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_X86_PMEM_LEGACY is not set +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_UMIP=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_X86_INTEL_TSX_MODE_OFF=y +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +# CONFIG_X86_INTEL_TSX_MODE_AUTO is not set +# CONFIG_EFI is not set +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +# CONFIG_KEXEC is not set +CONFIG_KEXEC_FILE=y +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y +# CONFIG_KEXEC_SIG is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +# CONFIG_RANDOMIZE_BASE is not set +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_EMULATE=y +# CONFIG_LEGACY_VSYSCALL_XONLY is not set +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +CONFIG_HAVE_LIVEPATCH=y +# end of Processor type and features + +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +# CONFIG_SUSPEND is not set +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_CONTAINER=y +# CONFIG_ACPI_HOTPLUG_MEMORY is not set +# CONFIG_ACPI_SBS is not set +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_NFIT is not set +# CONFIG_ACPI_NUMA is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +# CONFIG_ACPI_APEI is not set +# CONFIG_ACPI_DPTF is not set +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_X86_PM_TIMER=y +# CONFIG_SFI is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +# CONFIG_X86_ACPI_CPUFREQ is not set +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +# CONFIG_X86_P4_CLOCKMOD is not set + +# +# shared options +# +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_ISA_DMA_API=y +# CONFIG_X86_SYSFB is not set +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +# end of Binary Emulations + +# +# Firmware Drivers +# +# CONFIG_EDD is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +# CONFIG_DMI_SYSFS is not set +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +# CONFIG_ISCSI_IBFT is not set +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +CONFIG_HAVE_KVM=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_AS_AVX512=y +CONFIG_AS_SHA1_NI=y +CONFIG_AS_SHA256_NI=y +CONFIG_AS_TPAUSE=y + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +CONFIG_GENERIC_ENTRY=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_STATIC_CALL_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_ARCH_HAS_CC_PLATFORM=y +CONFIG_HAVE_STATIC_CALL=y +CONFIG_HAVE_STATIC_CALL_INLINE=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +# CONFIG_GCC_PLUGIN_RANDSTRUCT is not set +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +# CONFIG_MODULE_SIG_SHA256 is not set +# CONFIG_MODULE_SIG_SHA384 is not set +CONFIG_MODULE_SIG_SHA512=y +CONFIG_MODULE_SIG_HASH="sha512" +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_UNUSED_SYMBOLS=y +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_CMDLINE_PARSER=y +CONFIG_BLK_WBT=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +# CONFIG_EFI_PARTITION is not set +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +# CONFIG_MQ_IOSCHED_DEADLINE is not set +# CONFIG_MQ_IOSCHED_KYBER is not set +# CONFIG_IOSCHED_BFQ is not set +# end of IO Schedulers + +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +# CONFIG_BALLOON_COMPACTION is not set +CONFIG_COMPACTION=y +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +# CONFIG_CMA is not set +# CONFIG_ZSWAP is not set +CONFIG_ZPOOL=y +# CONFIG_ZBUD is not set +# CONFIG_Z3FOLD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_PTE_DEVMAP=y +# CONFIG_ZONE_DEVICE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +CONFIG_PERCPU_STATS=y +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +# CONFIG_UNIX_DIAG is not set +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_USER_COMPAT is not set +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +# CONFIG_NET_KEY is not set +# CONFIG_XDP_SOCKETS is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +# CONFIG_IP_FIB_TRIE_STATS is not set +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +# CONFIG_NET_IPVTI is not set +# CONFIG_NET_FOU is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +CONFIG_TCP_CONG_CUBIC=y +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_NV is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +# CONFIG_TCP_CONG_DCTCP is not set +# CONFIG_TCP_CONG_CDG is not set +# CONFIG_TCP_CONG_BBR is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +# CONFIG_INET6_AH is not set +# CONFIG_INET6_ESP is not set +# CONFIG_INET6_IPCOMP is not set +# CONFIG_IPV6_MIP6 is not set +# CONFIG_IPV6_ILA is not set +# CONFIG_IPV6_VTI is not set +# CONFIG_IPV6_SIT is not set +# CONFIG_IPV6_TUNNEL is not set +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_SEG6_BPF=y +# CONFIG_IPV6_RPL_LWTUNNEL is not set +CONFIG_NETLABEL=y +# CONFIG_MPTCP is not set +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_FAMILY_BRIDGE=y +# CONFIG_NETFILTER_NETLINK_ACCT is not set +# CONFIG_NETFILTER_NETLINK_QUEUE is not set +# CONFIG_NETFILTER_NETLINK_LOG is not set +# CONFIG_NETFILTER_NETLINK_OSF is not set +CONFIG_NF_CONNTRACK=y +CONFIG_NF_LOG_COMMON=y +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +# CONFIG_NF_CONNTRACK_ZONES is not set +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +# CONFIG_NF_CONNTRACK_LABELS is not set +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +# CONFIG_NF_CONNTRACK_AMANDA is not set +# CONFIG_NF_CONNTRACK_FTP is not set +# CONFIG_NF_CONNTRACK_H323 is not set +# CONFIG_NF_CONNTRACK_IRC is not set +# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set +# CONFIG_NF_CONNTRACK_SNMP is not set +# CONFIG_NF_CONNTRACK_PPTP is not set +# CONFIG_NF_CONNTRACK_SANE is not set +# CONFIG_NF_CONNTRACK_SIP is not set +# CONFIG_NF_CONNTRACK_TFTP is not set +# CONFIG_NF_CT_NETLINK is not set +# CONFIG_NF_CT_NETLINK_TIMEOUT is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=y +# CONFIG_NF_TABLES is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +# CONFIG_NETFILTER_XT_MARK is not set +# CONFIG_NETFILTER_XT_CONNMARK is not set + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set +# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set +# CONFIG_NETFILTER_XT_TARGET_CONNSECMARK is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +# CONFIG_NETFILTER_XT_TARGET_HL is not set +# CONFIG_NETFILTER_XT_TARGET_HMARK is not set +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +# CONFIG_NETFILTER_XT_TARGET_LOG is not set +# CONFIG_NETFILTER_XT_TARGET_MARK is not set +CONFIG_NETFILTER_XT_NAT=y +CONFIG_NETFILTER_XT_TARGET_NETMAP=y +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +CONFIG_NETFILTER_XT_TARGET_REDIRECT=y +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +# CONFIG_NETFILTER_XT_MATCH_BPF is not set +# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set +# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set +# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set +# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set +# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ECN is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set +# CONFIG_NETFILTER_XT_MATCH_HELPER is not set +# CONFIG_NETFILTER_XT_MATCH_HL is not set +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set +# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set +# CONFIG_NETFILTER_XT_MATCH_MAC is not set +# CONFIG_NETFILTER_XT_MATCH_MARK is not set +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +# CONFIG_NETFILTER_XT_MATCH_POLICY is not set +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set +# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set +# CONFIG_NETFILTER_XT_MATCH_STATE is not set +# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set +# CONFIG_NETFILTER_XT_MATCH_STRING is not set +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +# CONFIG_NETFILTER_XT_MATCH_TIME is not set +# CONFIG_NETFILTER_XT_MATCH_U32 is not set +# end of Core Netfilter Configuration + +# CONFIG_IP_SET is not set +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +# CONFIG_NF_SOCKET_IPV4 is not set +# CONFIG_NF_TPROXY_IPV4 is not set +# CONFIG_NF_DUP_IPV4 is not set +CONFIG_NF_LOG_ARP=y +CONFIG_NF_LOG_IPV4=y +CONFIG_NF_REJECT_IPV4=y +CONFIG_IP_NF_IPTABLES=y +# CONFIG_IP_NF_MATCH_AH is not set +# CONFIG_IP_NF_MATCH_ECN is not set +# CONFIG_IP_NF_MATCH_RPFILTER is not set +# CONFIG_IP_NF_MATCH_TTL is not set +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_SYNPROXY=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +# CONFIG_IP_NF_RAW is not set +# CONFIG_IP_NF_SECURITY is not set +# CONFIG_IP_NF_ARPTABLES is not set +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +# CONFIG_NF_SOCKET_IPV6 is not set +# CONFIG_NF_TPROXY_IPV6 is not set +# CONFIG_NF_DUP_IPV6 is not set +# CONFIG_NF_REJECT_IPV6 is not set +# CONFIG_NF_LOG_IPV6 is not set +# CONFIG_IP6_NF_IPTABLES is not set +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=y +# CONFIG_NF_CONNTRACK_BRIDGE is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_BRIDGE_MRP is not set +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +# CONFIG_NET_SCH_HTB is not set +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFB is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_TAPRIO is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +# CONFIG_NET_SCH_MQPRIO is not set +# CONFIG_NET_SCH_SKBPRIO is not set +# CONFIG_NET_SCH_CHOKE is not set +# CONFIG_NET_SCH_QFQ is not set +# CONFIG_NET_SCH_CODEL is not set +# CONFIG_NET_SCH_FQ_CODEL is not set +# CONFIG_NET_SCH_CAKE is not set +# CONFIG_NET_SCH_FQ is not set +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +# CONFIG_NET_SCH_INGRESS is not set +# CONFIG_NET_SCH_PLUG is not set +# CONFIG_NET_SCH_ETS is not set +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +# CONFIG_NET_CLS_U32 is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +# CONFIG_NET_CLS_BPF is not set +# CONFIG_NET_CLS_FLOWER is not set +# CONFIG_NET_CLS_MATCHALL is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +# CONFIG_NET_EMATCH_U32 is not set +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +# CONFIG_NET_ACT_POLICE is not set +# CONFIG_NET_ACT_GACT is not set +# CONFIG_NET_ACT_MIRRED is not set +# CONFIG_NET_ACT_SAMPLE is not set +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_ACT_MPLS is not set +# CONFIG_NET_ACT_VLAN is not set +# CONFIG_NET_ACT_BPF is not set +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_GATE is not set +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +# CONFIG_DNS_RESOLVER is not set +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=y +CONFIG_VSOCKETS_LOOPBACK=y +CONFIG_VIRTIO_VSOCKETS=y +CONFIG_VIRTIO_VSOCKETS_COMMON=y +# CONFIG_NETLINK_DIAG is not set +CONFIG_MPLS=y +# CONFIG_NET_MPLS_GSO is not set +# CONFIG_MPLS_ROUTING is not set +# CONFIG_NET_NSH is not set +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_FAILOVER=y +CONFIG_ETHTOOL_NETLINK=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +# CONFIG_PCI is not set +# CONFIG_PCCARD is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MHI_BUS is not set +# end of Bus devices + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_BLK_DEV_RBD is not set + +# +# NVME Support +# +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_DUMMY_IRQ is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_SRAM is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_93CX6 is not set +# end of EEPROM support + +# +# Texas Instruments shared transport line discipline +# +# end of Texas Instruments shared transport line discipline + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_ECHO is not set +# end of Misc devices + +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +# CONFIG_BLK_DEV_SD is not set +# CONFIG_CHR_DEV_ST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set +# CONFIG_CHR_DEV_SCH is not set +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_SAS_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=y +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_VIRTIO is not set +# CONFIG_SCSI_DH is not set +# end of SCSI device support + +# CONFIG_ATA is not set +# CONFIG_MD is not set +# CONFIG_TARGET_CORE is not set +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +# CONFIG_DUMMY is not set +# CONFIG_WIREGUARD is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_TEAM is not set +# CONFIG_MACVLAN is not set +# CONFIG_IPVLAN is not set +# CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=y +CONFIG_VIRTIO_NET=y +# CONFIG_NLMON is not set + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +# CONFIG_ETHERNET is not set +# CONFIG_NET_SB1000 is not set +# CONFIG_PHYLIB is not set +# CONFIG_MDIO_DEVICE is not set + +# +# PCS device drivers +# +# end of PCS device drivers + +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Host-side USB support is needed for USB Network Adapter support +# +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_NR_UARTS=1 +CONFIG_SERIAL_8250_RUNTIME_UARTS=1 +# CONFIG_SERIAL_8250_EXTENDED is not set +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_LANTIQ is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# end of Serial drivers + +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set +# CONFIG_NULL_TTY is not set +# CONFIG_TRACE_SINK is not set +CONFIG_HVC_DRIVER=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_MWAVE is not set +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set +# CONFIG_NVRAM is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_HPET is not set +# CONFIG_HANGCHECK_TIMER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_TELCLOCK is not set +# CONFIG_RANDOM_TRUST_CPU is not set +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set +# end of Character devices + +# +# I2C support +# +# CONFIG_I2C is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_PTP_1588_CLOCK_KVM=y +# CONFIG_PTP_1588_CLOCK_VMW is not set +# end of PTP clock support + +# CONFIG_PINCTRL is not set +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_HWMON is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +# CONFIG_INTEL_POWERCLAMP is not set + +# +# ACPI INT340X thermal drivers +# +# end of ACPI INT340X thermal drivers +# end of Intel thermal drivers + +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_MADERA is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_INTEL_LPSS_ACPI is not set +# CONFIG_MFD_INTEL_PMC_BXT is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_RAVE_SP_CORE is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_DRM is not set + +# +# ARM devices +# +# end of ARM devices + +# +# Frame buffer Devices +# +# CONFIG_FB is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +# CONFIG_LCD_CLASS_DEVICE is not set +# CONFIG_BACKLIGHT_CLASS_DEVICE is not set +# end of Backlight & LCD device support + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +# end of Console display driver support +# end of Graphics support + +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +CONFIG_HIDRAW=y +# CONFIG_UHID is not set +# CONFIG_HID_GENERIC is not set + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_GLORIOUS is not set +# CONFIG_HID_VIVALDI is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +CONFIG_HID_REDRAGON=y +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +# CONFIG_EDAC is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +# CONFIG_RTC_CLASS is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +# CONFIG_DW_DMAC is not set +# CONFIG_SF_PDMA is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# end of DMABUF options + +CONFIG_AUXDISPLAY=y +# CONFIG_IMG_ASCII_LCD is not set +# CONFIG_CHARLCD_BL_OFF is not set +# CONFIG_CHARLCD_BL_ON is not set +CONFIG_CHARLCD_BL_FLASH=y +# CONFIG_UIO is not set +# CONFIG_VFIO is not set +CONFIG_VIRT_DRIVERS=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MEM=m +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +# CONFIG_VDPA is not set +CONFIG_VHOST_MENU=y +# CONFIG_VHOST_NET is not set +# CONFIG_VHOST_VSOCK is not set +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# end of Android + +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set + +# +# Gasket devices +# +# end of Gasket devices + +# CONFIG_FIELDBUS_DEV is not set +CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACPI_WMI is not set +# CONFIG_ACERHDF is not set +# CONFIG_ACER_WIRELESS is not set +# CONFIG_ASUS_WIRELESS is not set +# CONFIG_DCDBAS is not set +# CONFIG_DELL_SMBIOS is not set +# CONFIG_DELL_RBU is not set +# CONFIG_DELL_SMO8800 is not set +# CONFIG_FUJITSU_TABLET is not set +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_HP_WIRELESS is not set +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_INTEL_HID_EVENT is not set +# CONFIG_INTEL_MENLOW is not set +# CONFIG_INTEL_VBTN is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_SAMSUNG_Q10 is not set +# CONFIG_TOSHIBA_BT_RFKILL is not set +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_ACPI_CMPC is not set +# CONFIG_SYSTEM76_ACPI is not set +# CONFIG_TOPSTAR_LAPTOP is not set +# CONFIG_INTEL_RST is not set +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TURBO_MAX_3=y +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set +# CONFIG_INTEL_PUNIT_IPC is not set +# CONFIG_INTEL_SCU_PLATFORM is not set +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +# CONFIG_COMMON_CLK is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_INTEL_LGM_EMMC is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +# CONFIG_DAX is not set +# CONFIG_NVMEM is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_TEE is not set +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=y +CONFIG_JBD2_DEBUG=y +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +CONFIG_BTRFS_FS=y +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_ALGS=y +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +# CONFIG_QUOTA_DEBUG is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QUOTACTL=y +# CONFIG_AUTOFS4_FS is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_FUSE_FS is not set +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +# CONFIG_UDF_FS is not set +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +# CONFIG_CONFIGFS_FS is not set +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +# CONFIG_CIFS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITY_WRITABLE_HOOKS=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_LSM_MMAP_MIN_ADDR=65536 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL is not set +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +CONFIG_CRYPTO_ECC=y +CONFIG_CRYPTO_ECDH=y +# CONFIG_CRYPTO_ECRDSA is not set +# CONFIG_CRYPTO_SM2 is not set +# CONFIG_CRYPTO_CURVE25519 is not set +# CONFIG_CRYPTO_CURVE25519_X86 is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +CONFIG_CRYPTO_SEQIV=y +# CONFIG_CRYPTO_ECHAINIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_OFB is not set +# CONFIG_CRYPTO_PCBC is not set +CONFIG_CRYPTO_XTS=y +# CONFIG_CRYPTO_KEYWRAP is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +# CONFIG_CRYPTO_ADIANTUM is not set +# CONFIG_CRYPTO_ESSIV is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_CMAC is not set +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32C_INTEL is not set +# CONFIG_CRYPTO_CRC32 is not set +# CONFIG_CRYPTO_CRC32_PCLMUL is not set +# CONFIG_CRYPTO_XXHASH is not set +# CONFIG_CRYPTO_BLAKE2B is not set +# CONFIG_CRYPTO_BLAKE2S is not set +# CONFIG_CRYPTO_BLAKE2S_X86 is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=y +# CONFIG_CRYPTO_GHASH is not set +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_POLY1305_X86_64 is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA1_SSSE3 is not set +# CONFIG_CRYPTO_SHA256_SSSE3 is not set +# CONFIG_CRYPTO_SHA512_SSSE3 is not set +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set +# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AES_TI=y +# CONFIG_CRYPTO_AES_NI_INTEL is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_BLOWFISH_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set +# CONFIG_CRYPTO_DES is not set +# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_CHACHA20_X86_64 is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set +# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set +# CONFIG_CRYPTO_SM4 is not set +# CONFIG_CRYPTO_TWOFISH is not set +# CONFIG_CRYPTO_TWOFISH_X86_64 is not set +# CONFIG_CRYPTO_TWOFISH_X86_64_3WAY is not set +# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +# CONFIG_SECONDARY_TRUSTED_KEYRING is not set +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# end of Certificates for signature checking + +# +# Library routines +# +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +# CONFIG_CORDIC is not set +# CONFIG_PRIME_NUMBERS is not set +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +# CONFIG_CRYPTO_LIB_CHACHA is not set +# CONFIG_CRYPTO_LIB_CURVE25519 is not set +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +# CONFIG_CRYPTO_LIB_POLY1305 is not set +# CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_LIB_MEMNEQ=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +CONFIG_DMA_COHERENT_POOL=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_OID_REGISTRY=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y +CONFIG_ARCH_STACKWALK=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_FRAME_POINTER=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +CONFIG_HAVE_KCSAN_COMPILER=y +# CONFIG_KCSAN is not set +# end of Generic Kernel Debugging Instruments + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Oops, Lockups and Hangs +# +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y +# CONFIG_SCHEDSTATS is not set +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=59 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_SAMPLES is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# x86 Debugging +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +# CONFIG_X86_DECODER_SELFTEST is not set +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +# CONFIG_DEBUG_BOOT_PARAMS is not set +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_UNWINDER_ORC is not set +CONFIG_UNWINDER_FRAME_POINTER=y +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_TEST_FPU is not set +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage +# end of Kernel hacking diff --git a/packaging/Makefile b/packaging/Makefile new file mode 100644 index 000000000..c797cf267 --- /dev/null +++ b/packaging/Makefile @@ -0,0 +1,153 @@ +all: debian-package + +debian-package: debian-package-resources debian-package-code + sudo dpkg-deb --build aleph-vm target/aleph-vm.deb + +debian-package-code: + rm -fr ./aleph-vm/opt/aleph-vm + mkdir -p ./aleph-vm/opt/aleph-vm + cp -r ../src/aleph ./aleph-vm/opt/aleph-vm/ + + # Fake data for diagnostic and benchmarks + mkdir -p ./aleph-vm/opt/aleph-vm/examples/ + cp -r ../examples/example_fastapi ./aleph-vm/opt/aleph-vm/examples/example_fastapi + cp ../examples/program_message_from_aleph.json ./aleph-vm/opt/aleph-vm/examples/program_message_from_aleph.json + cp ../examples/instance_message_from_aleph.json ./aleph-vm/opt/aleph-vm/examples/instance_message_from_aleph.json + cp -r ../examples/data ./aleph-vm/opt/aleph-vm/examples/data + mkdir -p ./aleph-vm/opt/aleph-vm/examples/volumes + # Making a venv to build the wheel. to work arround a strange problem while building the wheel + python3 -m venv build_venv + build_venv/bin/pip install --progress-bar off --upgrade pip setuptools wheel + # Fixing this protobuf dependency version to avoid getting CI errors as version 5.29.0 have this compilation issue + build_venv/bin/pip install --no-cache-dir --progress-bar off --target ./aleph-vm/opt/aleph-vm/ 'aleph-message==0.6.1' 'eth-account==0.10' 'sentry-sdk==1.31.0' 'qmp==1.1.0' 'aleph-superfluid~=0.2.1' 'sqlalchemy[asyncio]>=2.0' 'aiosqlite==0.19.0' 'alembic==1.13.1' 'aiohttp_cors==0.7.0' 'pyroute2==0.7.12' 'python-cpuid==0.1.0' 'solathon==1.0.2' 'protobuf==5.28.3' + build_venv/bin/python3 -m compileall ./aleph-vm/opt/aleph-vm/ + +debian-package-resources: firecracker-bins vmlinux download-ipfs-kubo target/bin/sevctl + rm -fr ./aleph-vm/opt/firecracker + mkdir -p ./aleph-vm/opt/firecracker + cp -pr ./target/vmlinux.bin ./aleph-vm/opt/firecracker/ + cp -pr ./target/firecracker ./aleph-vm/opt/firecracker/ + cp -pr ./target/jailer ./aleph-vm/opt/firecracker/ + cp -pr ./target/kubo/kubo ./aleph-vm/opt/kubo + cp -pr ./target/bin/sevctl ./aleph-vm/opt/sevctl + +firecracker-bins: target-dir build-dir + mkdir -p ./build/firecracker-release + # Download latest release + curl -fsSL https://github.com/firecracker-microvm/firecracker/releases/download/v1.5.0/firecracker-v1.5.0-x86_64.tgz | tar -xz --no-same-owner --directory ./build/firecracker-release + # Copy binaries: + cp ./build/firecracker-release/release-v*/firecracker-v*[!.debug] ./target/firecracker + cp ./build/firecracker-release/release-v*/jailer-v*[!.debug] ./target/jailer + chmod +x ./target/firecracker + chmod +x ./target/jailer + +vmlinux: + #curl -fsSL -o ./target/vmlinux.bin https://s3.amazonaws.com/spec.ccfc.min/img/quickstart_guide/x86_64/kernels/vmlinux.bin + curl -fsSL -o ./target/vmlinux.bin https://ipfs.aleph.cloud/ipfs/bafybeiaj2lf6g573jiulzacvkyw4zzav7dwbo5qbeiohoduopwxs2c6vvy + #cp ../kernels/vmlinux.bin ./target/vmlinux.bin + +download-ipfs-kubo: target-dir build-dir + mkdir -p ./target/kubo + curl -fsSL https://github.com/ipfs/kubo/releases/download/v0.33.2/kubo_v0.33.2_linux-amd64.tar.gz | tar -xz --directory ./target/kubo + +target/bin/sevctl: + cargo install --locked --git https://github.com/virtee/sevctl.git --rev v0.6.0 --target x86_64-unknown-linux-gnu --root ./target + ./target/bin/sevctl -V + +version: + python3 ./version_from_git.py --inplace deb aleph-vm/DEBIAN/control + python3 ./version_from_git.py --inplace __version__ ../src/aleph/vm/version.py + +build-dir: + mkdir -p target + +target-dir: + mkdir -p target + +clean: + rm -fr ./target/* + rm -fr ./build/* + rm -fr ./aleph-vm/opt/aleph-vm/ + rm -fr ./aleph-vm/opt/firecracker/ + rm -fr ./aleph-vm/opt/kubo/ + rm -fr ./aleph-vm/opt/aleph-vm/ + rm -fr ./sevctl/target/ + +all-podman-debian-12: version + cd .. && podman build -t localhost/aleph-vm-packaging-debian-12:latest -f ./packaging/debian-12.dockerfile . + mkdir -p ./target + podman run --rm -ti \ + -w /opt/packaging \ + -v ./target:/opt/packaging/target \ + localhost/aleph-vm-packaging-debian-12:latest \ + make + file target/aleph-vm.deb + mv target/aleph-vm.deb target/aleph-vm.debian-12.deb + +all-podman-ubuntu-2204: version + # Ensure the sevctl submodule is checked out first. + git submodule init + cd .. && podman build -t localhost/aleph-vm-packaging-ubuntu-2204:latest -f ./packaging/ubuntu-22.04.dockerfile . + mkdir -p ./target + podman run --rm -ti \ + -w /opt/packaging \ + -v ./target:/opt/packaging/target \ + localhost/aleph-vm-packaging-ubuntu-2204:latest \ + make + file target/aleph-vm.deb + mv target/aleph-vm.deb target/aleph-vm.ubuntu-22.04.deb + +all-podman-ubuntu-2404: version + cd .. && podman build -t localhost/aleph-vm-packaging-ubuntu-2404:latest -f ./packaging/ubuntu-24.04.dockerfile . + mkdir -p ./target + podman run --rm -ti \ + -w /opt/packaging \ + -v ./target:/opt/packaging/target \ + localhost/aleph-vm-packaging-ubuntu-2404:latest \ + make + file target/aleph-vm.deb + mv target/aleph-vm.deb target/aleph-vm.ubuntu-24.04.deb + +# extract Python requirements from Debian 12 container +requirements-debian-12: all-podman-debian-12 + podman run --rm -ti \ + -v ./target/aleph-vm.debian-12.deb:/opt/packaging/target/aleph-vm.deb:ro \ + -v ./extract_requirements.sh:/opt/extract_requirements.sh:ro \ + -v ./requirements-debian-12.txt:/mnt/requirements-debian-12.txt \ + debian:bookworm \ + bash -c "/opt/extract_requirements.sh /mnt/requirements-debian-12.txt" + +# extract Python requirements from Ubuntu 22.04 container +requirements-ubuntu-22.04: all-podman-ubuntu-2204 + podman run --rm -ti \ + -v ./target/aleph-vm.ubuntu-22.04.deb:/opt/packaging/target/aleph-vm.deb:ro \ + -v ./extract_requirements.sh:/opt/extract_requirements.sh:ro \ + -v ./requirements-ubuntu-22.04.txt:/mnt/requirements-ubuntu-22.04.txt \ + ubuntu:jammy \ + bash -c "/opt/extract_requirements.sh /mnt/requirements-ubuntu-22.04.txt" + +# extract Python requirements from Ubuntu 24.04 container +requirements-ubuntu-24.04: all-podman-ubuntu-2404 + podman run --rm -ti \ + -v ./target/aleph-vm.ubuntu-24.04.deb:/opt/packaging/target/aleph-vm.deb:ro \ + -v ./extract_requirements.sh:/opt/extract_requirements.sh:ro \ + -v ./requirements-ubuntu-24.04.txt:/mnt/requirements-ubuntu-24.04.txt \ + ubuntu:noble \ + bash -c "/opt/extract_requirements.sh /mnt/requirements-ubuntu-24.04.txt" + +# run on host in order to sign with GPG +repository-bookworm: + cd ./repositories/bookworm && reprepro -Vb . includedeb bookworm ../../target/aleph-vm.debian-12.deb && cd .. + +# run on host in order to sign with GPG +repository-jammy: + cd ./repositories/jammy && reprepro -Vb . includedeb jammy ../../target/aleph-vm.ubuntu-22.04.deb && cd .. + +# run on host in order to sign with GPG +repository-noble: + cd ./repositories/noble && reprepro -Vb . includedeb noble ../../target/aleph-vm.ubuntu-24.04.deb && cd .. + +repositories: repository-bookworm repository-jammy repository-noble + +all-podman: all-podman-debian-12 all-podman-ubuntu-2204 all-podman-ubuntu-2404 repositories + diff --git a/packaging/aleph-vm/DEBIAN/conffiles b/packaging/aleph-vm/DEBIAN/conffiles new file mode 100644 index 000000000..e1994d290 --- /dev/null +++ b/packaging/aleph-vm/DEBIAN/conffiles @@ -0,0 +1 @@ +/etc/aleph-vm/supervisor.env diff --git a/packaging/aleph-vm/DEBIAN/control b/packaging/aleph-vm/DEBIAN/control new file mode 100644 index 000000000..6b42eea41 --- /dev/null +++ b/packaging/aleph-vm/DEBIAN/control @@ -0,0 +1,8 @@ +Package: aleph-vm +Version: 0.1.8 +Architecture: all +Maintainer: Aleph.im +Description: Aleph.im VM execution engine +Depends: python3,python3-pip,python3-aiohttp,python3-msgpack,python3-aiodns,python3-alembic,python3-sqlalchemy,python3-setproctitle,redis,python3-aioredis,python3-psutil,sudo,acl,curl,systemd-container,squashfs-tools,debootstrap,python3-packaging,python3-cpuinfo,python3-nftables,python3-jsonschema,cloud-image-utils,ndppd,python3-yaml,python3-dotenv,python3-schedule,qemu-system-x86,qemu-utils,python3-systemd,python3-dbus,btrfs-progs,nftables,python3-jwcrypto +Section: aleph-im +Priority: Extra diff --git a/packaging/aleph-vm/DEBIAN/postinst b/packaging/aleph-vm/DEBIAN/postinst new file mode 100755 index 000000000..913e8c411 --- /dev/null +++ b/packaging/aleph-vm/DEBIAN/postinst @@ -0,0 +1,26 @@ +#!/bin/bash +set -euf -o pipefail + +if ! id -u jailman > /dev/null 2>&1; then + useradd jailman +fi + +rm -fr /srv/jailer # Upgrade from < 0.1.11 +rm -fr /tmp/aleph # Upgrade from < 0.1.11 +mkdir -p /var/lib/aleph/vm/jailer + +# Create the IPFS directory if it does not exist +if [ ! -d "/var/lib/ipfs" ]; then + mkdir -p /var/lib/ipfs + # Set appropriate permissions if needed + chown ipfs:ipfs /var/lib/ipfs +fi + +# Systemd is absent from containers +if ! [[ -v container ]]; then + systemctl daemon-reload + systemctl enable ipfs.service + systemctl restart ipfs.service + systemctl enable aleph-vm-supervisor.service + systemctl restart aleph-vm-supervisor.service +fi diff --git a/packaging/aleph-vm/DEBIAN/postrm b/packaging/aleph-vm/DEBIAN/postrm new file mode 100755 index 000000000..41f9b7ffe --- /dev/null +++ b/packaging/aleph-vm/DEBIAN/postrm @@ -0,0 +1,13 @@ +#!/bin/bash +set -euf -o pipefail + +rm -fr /srv/jailer # Upgrade from < 0.1.11 +rm -fr /tmp/aleph/ # Upgrade from < 0.1.11 +rm -fr /var/lib/aleph/vm/jailer + +if [ "$1" = "purge" ]; then + # Remove the directory when the package is purged + rm -rf /var/lib/ipfs +fi + +systemctl daemon-reload diff --git a/packaging/aleph-vm/DEBIAN/preinst b/packaging/aleph-vm/DEBIAN/preinst new file mode 100755 index 000000000..4fb97a8be --- /dev/null +++ b/packaging/aleph-vm/DEBIAN/preinst @@ -0,0 +1,17 @@ +#!/bin/bash +set -uf -o pipefail + +# Documentation: https://www.debian.org/doc/debian-policy/ch-maintainerscripts.html + +# Systemd is absent from containers +if ! [[ -v container ]]; then + # Stop the service during an upgrade. + # The service does not exist during a new install and will fail, this is okay + systemctl stop aleph-vm-supervisor.service +fi + +set -e + +# We will not delete this user on uninstall since there may be files owned by that user in /var/lib/ipfs +addgroup --system ipfs +adduser --system --ingroup ipfs ipfs diff --git a/packaging/aleph-vm/DEBIAN/prerm b/packaging/aleph-vm/DEBIAN/prerm new file mode 100755 index 000000000..4e88fe7fc --- /dev/null +++ b/packaging/aleph-vm/DEBIAN/prerm @@ -0,0 +1,5 @@ +#!/bin/bash +set -euf -o pipefail + +systemctl disable aleph-vm-supervisor.service +systemctl stop aleph-vm-supervisor.service diff --git a/packaging/aleph-vm/etc/aleph-vm/supervisor.env b/packaging/aleph-vm/etc/aleph-vm/supervisor.env new file mode 100644 index 000000000..b661ee90f --- /dev/null +++ b/packaging/aleph-vm/etc/aleph-vm/supervisor.env @@ -0,0 +1,4 @@ +# System logs make boot ~2x slower +ALEPH_VM_PRINT_SYSTEM_LOGS=False +ALEPH_VM_DOMAIN_NAME=vm.example.org +ALEPH_VM_PAYMENT_RECEIVER_ADDRESS= diff --git a/packaging/aleph-vm/etc/ipfs/KUBO.md b/packaging/aleph-vm/etc/ipfs/KUBO.md new file mode 100644 index 000000000..a21a9fa4f --- /dev/null +++ b/packaging/aleph-vm/etc/ipfs/KUBO.md @@ -0,0 +1,4 @@ +The IP range `86.84.0.0/16` is managed by `KPN Internet` is filtered out due to +an abuse letter sent to a node operator by Hetzner regarding "an attack" from the node. +The content of this "attack" appears as legit IPFS traffic +(TCP packets from port 4001 to port 4001 and UDP packets from port 4001 to port 46024). diff --git a/packaging/aleph-vm/etc/needrestart/conf.d/aleph-vm.conf b/packaging/aleph-vm/etc/needrestart/conf.d/aleph-vm.conf new file mode 100644 index 000000000..74963eebf --- /dev/null +++ b/packaging/aleph-vm/etc/needrestart/conf.d/aleph-vm.conf @@ -0,0 +1,3 @@ +# Do not restart Aleph Network Services +$nrconf{override_rc}{qr(^aleph-vm-supervisor)} = 0; +$nrconf{override_rc}{qr(^aleph-vm-controller@.*\.service$)} = 0; diff --git a/packaging/aleph-vm/etc/systemd/system/aleph-vm-controller@.service b/packaging/aleph-vm/etc/systemd/system/aleph-vm-controller@.service new file mode 100644 index 000000000..7bbfc67d8 --- /dev/null +++ b/packaging/aleph-vm/etc/systemd/system/aleph-vm-controller@.service @@ -0,0 +1,22 @@ +[Unit] +Description=Aleph VM %i Controller +After=network.target + +[Service] +Type=simple +RestartSec=5s +PrivateTmp=yes +NoNewPrivileges=true +WorkingDirectory=/opt/aleph-vm +Environment=PYTHONPATH=/opt/aleph-vm/:$PYTHONPATH +ExecStart=/usr/bin/python3 -m aleph.vm.controllers --config=/var/lib/aleph/vm/%i-controller.json +Restart=on-failure +# KillMode=Mixed is used so initially only the Python controller process receives the SIGTERM signal. +# The controller catches it and sends a QEMU command to shut down the Guest VM, allowing it to clean up +# properly and avoid disk corruption. +# After 30s (TimeoutStopSec), if the process is still running, both the controller and subprocesses receive SIGKILL. +KillMode=mixed +TimeoutStopSec=30 + +[Install] +WantedBy=multi-user.target diff --git a/packaging/aleph-vm/etc/systemd/system/aleph-vm-supervisor.service b/packaging/aleph-vm/etc/systemd/system/aleph-vm-supervisor.service new file mode 100644 index 000000000..755d971b1 --- /dev/null +++ b/packaging/aleph-vm/etc/systemd/system/aleph-vm-supervisor.service @@ -0,0 +1,18 @@ +[Unit] +Description=Aleph.im VM execution engine +After=network.target ipfs.service +Wants=ipfs.service + +[Service] +User=0 +Group=0 +WorkingDirectory=/opt/aleph-vm +Environment=PYTHONPATH=/opt/aleph-vm/:$PYTHONPATH +Environment=PYTHONDONTWRITEBYTECODE="enabled" +EnvironmentFile=/etc/aleph-vm/supervisor.env +ExecStart=python3 -m aleph.vm.orchestrator --print-settings +Restart=always +RestartSec=10s + +[Install] +WantedBy=multi-user.target diff --git a/packaging/aleph-vm/etc/systemd/system/ipfs.service b/packaging/aleph-vm/etc/systemd/system/ipfs.service new file mode 100644 index 000000000..0708ae8f1 --- /dev/null +++ b/packaging/aleph-vm/etc/systemd/system/ipfs.service @@ -0,0 +1,93 @@ +# Source: https://github.com/ipfs/kubo/blob/master/misc/systemd/ipfs-hardened.service + +# This file will be overwritten on package upgrades, avoid customizations here. +# +# To make persistent changes, create file in +# "/etc/systemd/system/ipfs.service.d/overwrite.conf" with +# `systemctl edit ipfs.service`. This file will be parsed after this +# file has been parsed. +# +# To overwrite a variable, like ExecStart you have to specify it once +# blank and a second time with a new value, like: +# ExecStart= +# ExecStart=/usr/bin/ipfs daemon --flag1 --flag2 +# +# For more info about custom unit files see systemd.unit(5). + +# This service file enables systemd-hardening features compatible with IPFS, +# while breaking compatibility with the fuse-mount function. Use this one only +# if you don't need the fuse-mount functionality. + +[Unit] +Description=InterPlanetary File System (IPFS) daemon +Documentation=https://docs.ipfs.tech/ +After=network.target + +[Service] +# hardening +ReadOnlyPaths="/opt/kubo/" "/etc/ipfs" +ReadWritePaths="/var/lib/ipfs/" +NoNewPrivileges=true +ProtectSystem=strict +ProtectKernelTunables=true +ProtectKernelModules=true +ProtectKernelLogs=true +PrivateDevices=true +DevicePolicy=closed +ProtectControlGroups=true +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 AF_NETLINK +ProtectHostname=true +PrivateTmp=true +ProtectClock=true +LockPersonality=true +RestrictNamespaces=true +RestrictRealtime=true +MemoryDenyWriteExecute=true +SystemCallArchitectures=native +SystemCallFilter=@system-service +SystemCallFilter=~@privileged +ProtectHome=true +RemoveIPC=true +RestrictSUIDSGID=true +CapabilityBoundingSet=CAP_NET_BIND_SERVICE +# set memory limit to avoid taking all the CRN ressource and getting OOM +# https://github.com/ipfs/kubo/blob/master/docs/config.md#swarmresourcemgrmaxmemory +Environment=GOMEMLIMIT=1900m +MemoryHigh=2G +MemoryMax=4G + +# enable for 1-1024 port listening +#AmbientCapabilities=CAP_NET_BIND_SERVICE +# enable to specify a custom path see docs/environment-variables.md for further documentations +#Environment=IPFS_PATH=/custom/ipfs/path +# enable to specify a higher limit for open files/connections +#LimitNOFILE=1000000 + +# Avoid a permission denier error when running `lstat /home/ipfs/.config/ipfs/denylists` +# due to checking $XDG_CONFIG_HOME/ipfs/denylists/ +Environment=XDG_CONFIG_HOME=/etc + +#don't use swap +MemorySwapMax=0 + +# Don't timeout on startup. Opening the IPFS repo can take a long time in some cases (e.g., when +# badger is recovering) and migrations can delay startup. +# +# Ideally, we'd be a bit smarter about this but there's no good way to do that without hooking +# systemd dependencies deeper into go-ipfs. +TimeoutStartSec=infinity + +Type=notify +User=ipfs +Group=ipfs +Environment=IPFS_PATH="/var/lib/ipfs" +ExecStartPre=/opt/kubo/ipfs init +ExecStartPre=/opt/kubo/ipfs config --json Gateway.PublicGateways '{"localhost": {"UseSubdomains": false, "Paths": ["/ipfs", "/ipns"]}}' +ExecStartPre=/opt/kubo/ipfs config --json Reprovider.Strategy '"roots"' +ExecStartPre=/opt/kubo/ipfs config --json Swarm.ResourceMgr '{"MaxMemory" : "1GB"}' +ExecStart=/opt/kubo/ipfs daemon --migrate=true --init-profile=server +Restart=on-failure +KillSignal=SIGINT + +[Install] +WantedBy=default.target diff --git a/packaging/debian-12.dockerfile b/packaging/debian-12.dockerfile new file mode 100644 index 000000000..b0a383797 --- /dev/null +++ b/packaging/debian-12.dockerfile @@ -0,0 +1,17 @@ +FROM rust:1.79.0-bookworm + +RUN apt-get update && apt-get -y upgrade && apt-get install -y \ + make \ + git \ + curl \ + sudo \ + python3-pip \ + python3-venv \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt +COPY ../src/aleph ./src/aleph +COPY ../packaging ./packaging +COPY ../kernels ./kernels + +COPY ../examples/ ./examples diff --git a/packaging/extract_requirements.sh b/packaging/extract_requirements.sh new file mode 100755 index 000000000..d443a0099 --- /dev/null +++ b/packaging/extract_requirements.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -euf -o pipefail + +export DEBIAN_FRONTEND=noninteractive + +apt update +apt --yes install /opt/packaging/target/aleph-vm.deb +pip freeze > "$1" diff --git a/packaging/repositories/bookworm/conf/distributions b/packaging/repositories/bookworm/conf/distributions new file mode 100644 index 000000000..3891c6001 --- /dev/null +++ b/packaging/repositories/bookworm/conf/distributions @@ -0,0 +1,13 @@ +Origin: Aleph-IM +Label: aleph-im +Suite: stable +Codename: bookworm +Version: 3.0 +Architectures: amd64 source +Components: contrib +#UDebComponents: main +Description: Aleph-im packages +SignWith: yes +#DebOverride: override +#UDebOverride: override +#DscOverride: srcoverride diff --git a/packaging/repositories/jammy/conf/distributions b/packaging/repositories/jammy/conf/distributions new file mode 100644 index 000000000..2d5872786 --- /dev/null +++ b/packaging/repositories/jammy/conf/distributions @@ -0,0 +1,13 @@ +Origin: Aleph-IM +Label: aleph-im +Suite: stable +Codename: jammy +Version: 3.0 +Architectures: amd64 source +Components: contrib +#UDebComponents: main +Description: Aleph-im packages +SignWith: yes +#DebOverride: override +#UDebOverride: override +#DscOverride: srcoverride diff --git a/packaging/ubuntu-22.04.dockerfile b/packaging/ubuntu-22.04.dockerfile new file mode 100644 index 000000000..aae40df6e --- /dev/null +++ b/packaging/ubuntu-22.04.dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:22.04 + +RUN apt-get update && apt-get -y upgrade && apt-get install -y \ + make \ + git \ + curl \ + sudo \ + python3-pip \ + python3-venv \ + cargo \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt +COPY ../src/aleph ./src/aleph +COPY ../packaging ./packaging +COPY ../kernels ./kernels + +COPY ../examples/ ./examples diff --git a/packaging/ubuntu-24.04.dockerfile b/packaging/ubuntu-24.04.dockerfile new file mode 100644 index 000000000..1be20519f --- /dev/null +++ b/packaging/ubuntu-24.04.dockerfile @@ -0,0 +1,18 @@ +FROM ubuntu:24.04 + +RUN apt-get update && apt-get -y upgrade && apt-get install -y \ + make \ + git \ + curl \ + sudo \ + python3-pip \ + python3-venv \ + cargo \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt +COPY ../src/aleph ./src/aleph +COPY ../packaging ./packaging +COPY ../kernels ./kernels + +COPY ../examples/ ./examples diff --git a/packaging/version_from_git.py b/packaging/version_from_git.py new file mode 100755 index 000000000..10252dc2a --- /dev/null +++ b/packaging/version_from_git.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 + +""" +Set the version number of a package based on the current repository: + +Use the tag it one is available for the current commit. +Else default to the short commit id, prefixed by the name of the current branch. + +Pass the path to the target file to edit in argument. +""" + +import os.path +import re +import subprocess +import sys + +script_path, *args, format_, target_file_path = sys.argv + +for arg in args: + if arg not in ("--inplace", "--stdout"): + print( + "Usage: version_from_git.py [target FILE PATH] [FORMAT] [OPTION...]\n\n" + "set the version number of a Debian package based on the current git commit\n\n" + "supported formats are 'deb' and 'setup.py'\n\n" + " --help print this message\n" + " --inplace edit file in place\n" + " --inplace edit file in place\n" + " --stdout print the result on stdout\n" + ) + sys.exit(1) + +if not os.path.isfile(target_file_path): + print(f"No such file: '{target_file_path}'") + sys.exit(2) + + +def get_git_version(): + output = subprocess.check_output(("git", "describe", "--tags")) + return output.decode().strip() + + +version = get_git_version() + +with open(target_file_path) as target_file: + target_content = target_file.read() + +if format_ == "deb": + updated_content = re.sub(r"(Version:)\w*(.*)", f"\\1 {version}", target_content) +elif format_ == "setup.py": + updated_content = re.sub(r"(version)\w*=(.*)'", f"\\1='{version}'", target_content) +elif format_ == "__version__": + updated_content = re.sub(r"(__version__)\w*(.*)", f"\\1 = '{version}'", target_content) +else: + print(f"Format must be 'deb', 'setup.py' or '__version__', not '{format_}'") + +if "--inplace" in args: + with open(target_file_path, "w") as target_file: + target_file.write(updated_content) + +if "--stdout" in args: + print(updated_content) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..3a12314d9 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,255 @@ +[build-system] +build-backend = "hatchling.build" + +requires = [ "hatch-vcs", "hatchling" ] + +[project] +name = "aleph-vm" +description = "Aleph.im VM execution engine" +readme = "README.md" +keywords = [ ] +license = { file = "LICENSE" } +authors = [ + { name = "Hugo Herter", email = "git@hugoherter.com" }, +] +requires-python = ">=3.10" +classifiers = [ + "Development Status :: 4 - Beta", + "Environment :: Console", + "Framework :: aiohttp", + "Intended Audience :: Information Technology", + "License :: OSI Approved :: MIT License", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: System :: Distributed Computing", +] +dynamic = [ "version" ] + +# Upon adding or updating dependencies, update `packaging/Makefile` for the Debian package +dependencies = [ + "aiodns==3.1", + "aiohttp==3.10.11", + "aiohttp-cors~=0.7.0", + "aioredis==1.3.1", + "aiosqlite==0.19", + "alembic==1.13.1", + "aleph-message==0.6.1", + "aleph-superfluid~=0.2.1", + "dbus-python==1.3.2", + "eth-account~=0.10", + "jsonschema==4.19.1", + "jwcrypto==1.5.6", + "msgpack==1.0.7", + "nftables @ git+https://salsa.debian.org/pkg-netfilter-team/pkg-nftables#egg=nftables&subdirectory=py", + "packaging==23.2", + # Fixing this protobuf dependency version to avoid getting CI errors as version 5.29.0 have this compilation issue + "protobuf==5.28.3", + "psutil==5.9.5", + "py-cpuinfo==9", + "pydantic[dotenv]~=1.10.13", + "pyroute2==0.7.12", + "python-cpuid==0.1.1", + "pyyaml==6.0.1", + "qmp==1.1", + "schedule==1.2.1", + "sentry-sdk==2.8", + "setproctitle==1.3.3", + "solathon==1.0.2", + "sqlalchemy[asyncio]>=2", + "systemd-python==235", +] + +urls.Discussions = "https://community.aleph.im/" +urls.Documentation = "https://docs.aleph.im/nodes/compute/" +urls.Issues = "https://github.com/aleph-im/aleph-vm/issues" +urls.Source = "https://github.com/aleph-im/aleph-vm" +scripts.aleph-vm = "aleph.vm.orchestrator.cli:main" + +[tool.hatch.version] +source = "vcs" + +[tool.hatch.build.targets.wheel] +packages = [ "src/aleph" ] + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.hatch.envs.default] +platforms = [ "linux" ] +dependencies = [ + # "git+https://salsa.debian.org/pkg-netfilter-team/pkg-nftables#egg=nftables&subdirectory=py", +] + +[tool.hatch.envs.default.scripts] +orchestrator = "aleph-vm orchestrator run {args:--help}" +config = "aleph-vm orchestrator config {args:--help}" +check = "aleph-vm controller run {args:--help}" + +[tool.hatch.envs.testing] +type = "virtual" +system-packages = true +dependencies = [ + "eth_typing==4.3.1", # Temp fix for bug in CI with 5.0.0 + "pytest==8.2.1", + "pytest-cov==5.0.0", + "pytest-mock==3.14.0", + "pytest-asyncio==0.23.7", + "pytest-aiohttp==1.0.5", +] +[tool.hatch.envs.testing.scripts] +test = "pytest {args:tests}" +test-cov = "pytest --durations=10 --cov {args:tests}" +cov-report = [ + "- coverage combine", + "coverage report", +] +cov = [ + "test-cov", + "cov-report", +] + +[[tool.hatch.envs.all.matrix]] +python = [ "3.10", "3.11", "3.12" ] + +[tool.hatch.envs.linting] +detached = true +dependencies = [ + "mypy==1.8.0", + "ruff==0.4.6", + "isort==5.13.2", + "yamlfix==1.16.1", + "pyproject-fmt==2.2.1", +] +[tool.hatch.envs.linting.scripts] +typing = "mypy {args:src/aleph/vm/ tests/ examples/example_fastapi runtimes/aleph-debian-12-python}" +# Check +style = [ + "ruff format --diff {args:.}", + "isort --check-only --profile black {args:.}", + "yamlfix --check .", + "pyproject-fmt --check pyproject.toml", +] +# Do modification +fmt = [ + "ruff format {args:.}", + "isort --profile black {args:.}", + "yamlfix .", + "pyproject-fmt pyproject.toml", + "style", +] +all = [ + "style", + "typing", +] + +[tool.black] +target-version = [ "py310" ] +line-length = 120 +#skip-string-normalization = true + +[tool.ruff] +target-version = "py310" +line-length = 120 +src = [ "src" ] +lint.select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +lint.ignore = [ + "ISC001", + # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/#single-line-implicit-string-concatenation-isc001 + # # Allow non-abstract empty methods in abstract base classes + # "B027", + # # Allow boolean positional values in function calls, like `dict.get(... True)` + # "FBT003", + # # Ignore checks for possible passwords + # "S105", "S106", "S107", + # # Ignore complexity + # "C901", "PLR0911", "PLR0912", "PLR0913", "PLR0915", + # Allow the use of assert statements + "S101", +] +#[tool.ruff.flake8-tidy-imports] +#ban-relative-imports = "all" +#unfixable = [ +# # Don't touch unused imports +# "F401", +#] + +# Tests can use magic values, assertions, and relative imports +lint.per-file-ignores."tests/**/*" = [ "PLR2004", "S101", "TID252" ] + +[tool.isort] +profile = "black" +extra_standard_library = [ "packaging" ] + +[tool.pytest.ini_options] +pythonpath = [ + "src", +] +testpaths = [ + "tests", +] +norecursedirs = [ + "runtimes/aleph-debian-11-python/rootfs/", + "runtimes/aleph-debian-12-python/rootfs/", +] + +[tool.coverage.run] +source_pkgs = [ "aleph.vm", "tests" ] +branch = true +parallel = true +omit = [ + "src/aleph/vm/__about__.py", +] + +[tool.coverage.paths] +aleph_vm = [ "src/aleph/vm", "*/aleph-vm/src/aleph/vm" ] +tests = [ "tests", "*/aleph-vm/tests" ] + +[tool.coverage.report] +exclude_lines = [ + "no cov", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] + +[tool.mypy] +python_version = "3.10" +install_types = true +non_interactive = true +ignore_missing_imports = true +explicit_package_bases = true +check_untyped_defs = true + +[tool.yamlfix] +sequence_style = "keep_style" +preserve_quotes = true +whitelines = 1 +section_whitelines = 2 diff --git a/runtimes/aleph-alpine-3.13-python/create_disk_image.sh b/runtimes/aleph-alpine-3.13-python/create_disk_image.sh deleted file mode 100644 index 671565cae..000000000 --- a/runtimes/aleph-alpine-3.13-python/create_disk_image.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/sh - -set -euf - -curl -fsSL -o ./alpine-miniroot.tgz https://dl-cdn.alpinelinux.org/alpine/v3.13/releases/x86_64/alpine-minirootfs-3.13.3-x86_64.tar.gz - -dd if=/dev/zero of=./rootfs.ext4 bs=1M count=100 -mkfs.ext4 ./rootfs.ext4 -mkdir -p /mnt/rootfs -mount ./rootfs.ext4 /mnt/rootfs -tar --preserve-permissions --same-owner -xf ./alpine-miniroot.tgz --directory /mnt/rootfs - -cat /etc/resolv.conf > /mnt/rootfs/etc/resolv.conf - -chroot /mnt/rootfs /bin/sh < /etc/securetty -EOT - -echo "PermitRootLogin yes" >> /mnt/rootfs/etc/ssh/sshd_config - -# Generate SSH host keys -systemd-nspawn -D /mnt/rootfs/ ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key -systemd-nspawn -D /mnt/rootfs/ ssh-keygen -q -N "" -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -systemd-nspawn -D /mnt/rootfs/ ssh-keygen -q -N "" -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -systemd-nspawn -D /mnt/rootfs/ ssh-keygen -q -N "" -t ed25519 -f /etc/ssh/ssh_host_ed25519_key - -cat < /mnt/rootfs/etc/inittab -# /etc/inittab - -::sysinit:/sbin/init sysinit -::sysinit:/sbin/init boot -::wait:/sbin/init default - -# Set up a couple of getty's -tty1::respawn:/sbin/getty 38400 tty1 -tty2::respawn:/sbin/getty 38400 tty2 -tty3::respawn:/sbin/getty 38400 tty3 -tty4::respawn:/sbin/getty 38400 tty4 -tty5::respawn:/sbin/getty 38400 tty5 -tty6::respawn:/sbin/getty 38400 tty6 - -# Put a getty on the serial port -ttyS0::respawn:/sbin/getty -L ttyS0 115200 vt100 - -# Stuff to do for the 3-finger salute -::ctrlaltdel:/sbin/reboot - -# Stuff to do before rebooting -::shutdown:/sbin/init shutdown -EOT - -# Custom init -mv /mnt/rootfs/sbin/init /mnt/rootfs/sbin/init.copy -cp ./init0.sh /mnt/rootfs/sbin/init -cp ./init1.py /mnt/rootfs/root/init1.py -chmod +x /mnt/rootfs/sbin/init -chmod +x /mnt/rootfs/root/init1.py - -umount /mnt/rootfs diff --git a/runtimes/aleph-alpine-3.13-python/init1.py b/runtimes/aleph-alpine-3.13-python/init1.py deleted file mode 100644 index 038318484..000000000 --- a/runtimes/aleph-alpine-3.13-python/init1.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/python3 -import asyncio -import json -import os -import socket -import subprocess -import sys -import traceback -from base64 import b64decode -from os import system -from io import StringIO -from contextlib import redirect_stdout - -s = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) -s.bind((socket.VMADDR_CID_ANY, 52)) -s.listen() - -# Send we are ready -s0 = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) -s0.connect((2, 52)) -s0.close() - -print("INIT1 READY") - - -class Encoding: - plain = "plain" - zip = "zip" - - -async def run_python_code_http(code: str, entrypoint: str, encoding: str, scope: dict): - if encoding == Encoding.zip: - # Unzip in /opt and import the entrypoint from there - decoded: bytes = b64decode(code) - open("/opt/archive.zip", "wb").write(decoded) - os.system("unzip /opt/archive.zip -d /opt") - sys.path.append("/opt") - module_name, app_name = entrypoint.split(":", 1) - module = __import__(module_name) - app = getattr(module, app_name) - elif encoding == Encoding.plain: - # Execute the code and extract the entrypoint - locals = {} - exec(code, globals(), locals) - app = locals[entrypoint] - else: - raise ValueError(f"Unknown encoding '{encoding}'") - - with StringIO() as buf, redirect_stdout(buf): - # Execute in the same process, saves ~20ms than a subprocess - async def receive(): - pass - - send_queue = asyncio.Queue() - - async def send(dico): - await send_queue.put(dico) - - await app(scope, receive, send) - headers = await send_queue.get() - body = await send_queue.get() - output = buf.getvalue() - return headers, body, output - - -while True: - client, addr = s.accept() - data = client.recv(1000_1000) # Max 1 Mo - print("CID: {} port:{} data: {}".format(addr[0], addr[1], data)) - - msg = data.decode().strip() - - print("msg", [msg]) - if msg == "halt": - system("sync") - client.send(b"STOP\n") - sys.exit() - elif msg.startswith("!"): - # Shell - msg = msg[1:] - try: - output = subprocess.check_output(msg, stderr=subprocess.STDOUT, shell=True) - client.send(output) - except subprocess.CalledProcessError as error: - client.send(str(error).encode() + b"\n" + error.output) - else: - # Python - msg_ = json.loads(msg) - code = msg_["code"] - entrypoint = msg_["entrypoint"] - scope = msg_["scope"] - encoding = msg_["encoding"] - try: - headers, body, output = asyncio.get_event_loop().run_until_complete( - run_python_code_http( - code, entrypoint=entrypoint, encoding=encoding, scope=scope - ) - ) - client.send(body["body"]) - except Exception as error: - client.send(str(error).encode() + str(traceback.format_exc()).encode()) - - print("...DONE") - client.close() diff --git a/runtimes/aleph-debian-12-python/create_disk_image.sh b/runtimes/aleph-debian-12-python/create_disk_image.sh new file mode 100755 index 000000000..98fbb2766 --- /dev/null +++ b/runtimes/aleph-debian-12-python/create_disk_image.sh @@ -0,0 +1,100 @@ +#!/bin/sh + +rm ./rootfs.squashfs + +set -euf + +rm -fr ./rootfs +mkdir ./rootfs + +debootstrap --variant=minbase bookworm ./rootfs http://deb.debian.org/debian/ + +chroot ./rootfs /bin/sh < /etc/locale.gen +locale-gen en_US.UTF-8 + +echo "Pip installing aleph-sdk-python" +mkdir -p /opt/aleph/libs +# Fixing this protobuf dependency version to avoid getting CI errors as version 5.29.0 have this compilation issue. +pip3 install --target /opt/aleph/libs 'aleph-sdk-python==1.0.0' 'aleph-message==0.4.9' 'fastapi~=0.109.2' 'protobuf==5.28.3' + +# Compile Python code to bytecode for faster execution +# -o2 is needed to compile with optimization level 2 which is what we launch init1.py ("python -OO") +# otherwise they are not used +python3 -m compileall -o 2 -f /usr/local/lib/python3.11 +python3 -m compileall -o 2 -f /opt/aleph/libs + +echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config +echo "PasswordAuthentication no" >> /etc/ssh/sshd_config +echo "ChallengeResponseAuthentication no" >> /etc/ssh/sshd_config +echo "PermitRootLogin yes" >> /etc/ssh/sshd_config + +mkdir -p /overlay + +# Set up a login terminal on the serial console (ttyS0): +ln -s agetty /etc/init.d/agetty.ttyS0 +echo ttyS0 > /etc/securetty +EOT + +cat < ./rootfs/etc/inittab +# /etc/inittab + +::sysinit:/sbin/init sysinit +::sysinit:/sbin/init boot +::wait:/sbin/init default + +# Set up a couple of getty's +tty1::respawn:/sbin/getty 38400 tty1 +tty2::respawn:/sbin/getty 38400 tty2 +tty3::respawn:/sbin/getty 38400 tty3 +tty4::respawn:/sbin/getty 38400 tty4 +tty5::respawn:/sbin/getty 38400 tty5 +tty6::respawn:/sbin/getty 38400 tty6 + +# Put a getty on the serial port +ttyS0::respawn:/sbin/getty -L ttyS0 115200 vt100 + +# Stuff to do for the 3-finger salute +::ctrlaltdel:/sbin/reboot + +# Stuff to do before rebooting +::shutdown:/sbin/init shutdown +EOT + +# Reduce size +rm -fr ./rootfs/root/.cache +rm -fr ./rootfs/var/cache +mkdir -p ./rootfs/var/cache/apt/archives/partial +rm -fr ./rootfs/usr/share/doc +rm -fr ./rootfs/usr/share/man +rm -fr ./rootfs/var/lib/apt/lists/ + +# Custom init +cp ./init0.sh ./rootfs/sbin/init +cp ./init1.py ./rootfs/root/init1.py +cp ./loading.html ./rootfs/root/loading.html +chmod +x ./rootfs/sbin/init +chmod +x ./rootfs/root/init1.py + +mksquashfs ./rootfs/ ./rootfs.squashfs diff --git a/runtimes/aleph-alpine-3.13-python/init0.sh b/runtimes/aleph-debian-12-python/init0.sh similarity index 53% rename from runtimes/aleph-alpine-3.13-python/init0.sh rename to runtimes/aleph-debian-12-python/init0.sh index db66b5146..7acdca08f 100644 --- a/runtimes/aleph-alpine-3.13-python/init0.sh +++ b/runtimes/aleph-debian-12-python/init0.sh @@ -2,10 +2,13 @@ set -euf -echo "=== My Bash RC ===" - mount -t proc proc /proc -o nosuid,noexec,nodev +log() { + echo "$(awk '{print $1}' /proc/uptime)" '|S' "$@" +} +log "init0.sh is launching" + # Switch root from read-only ext4 to to read-write overlay mkdir -p /overlay /bin/mount -t tmpfs -o noatime,mode=0755 tmpfs /overlay @@ -17,11 +20,6 @@ pivot_root /mnt /mnt/rom mount --move /rom/proc /proc mount --move /rom/dev /dev -echo "Mounts" - -ls / -ls /dev - mkdir -p /dev/pts mkdir -p /dev/shm @@ -31,18 +29,30 @@ mount -t tmpfs run /run -o mode=0755,nosuid,nodev mount -t devpts devpts /dev/pts -o mode=0620,gid=5,nosuid,noexec mount -t tmpfs shm /dev/shm -omode=1777,nosuid,nodev -# TODO: Move in init1 -ip addr add 172.16.0.2/24 dev eth0 -ip link set eth0 up -ip route add default via 172.16.0.1 dev eth0 -ip addr +# Required by Docker +cgroupfs-mount +update-alternatives --set iptables /usr/sbin/iptables-legacy +update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy -echo "Net up" +# Enable the following to force the storage driver used by Docker. +# See https://docs.docker.com/storage/storagedriver/select-storage-driver/ +#echo '{\n"storage-driver": "overlay2"\n}\n' > /etc/docker/daemon.json + +# List block devices +lsblk + +#cat /proc/sys/kernel/random/entropy_avail # TODO: Move in init1 -#/usr/sbin/sshd -E /var/log/sshd & -# -#echo "SSH UP" +mkdir -p /run/sshd +/usr/sbin/sshd & +log "SSH UP" + +log "Setup socat" +socat UNIX-LISTEN:/tmp/socat-socket,fork,reuseaddr VSOCK-CONNECT:2:53 & +log "Socat ready" + +export PYTHONPATH=/opt/aleph/libs # Replace this script with the manager exec /root/init1.py diff --git a/runtimes/aleph-debian-12-python/init1.py b/runtimes/aleph-debian-12-python/init1.py new file mode 100644 index 000000000..80ab0fd84 --- /dev/null +++ b/runtimes/aleph-debian-12-python/init1.py @@ -0,0 +1,625 @@ +#!/usr/bin/python3 -OO +import base64 +import logging +from pathlib import Path + +logging.basicConfig( + level=logging.DEBUG, + format="%(relativeCreated)4f |V %(levelname)s | %(message)s", +) +logger = logging.getLogger(__name__) + +logger.debug("Imports starting") + +import asyncio +import ctypes +import os +import socket +import subprocess +import sys +import traceback +from collections.abc import AsyncIterable +from contextlib import redirect_stdout +from dataclasses import dataclass, field +from enum import Enum +from io import StringIO +from os import system +from shutil import make_archive +from typing import Any, Literal, NewType, Optional, Union, cast + +import aiohttp +import msgpack + +logger.debug("Imports finished") + +__version__ = "2.0.0" +ASGIApplication = NewType("ASGIApplication", Any) # type: ignore + + +class Encoding(str, Enum): + plain = "plain" + zip = "zip" + squashfs = "squashfs" + + +class Interface(str, Enum): + asgi = "asgi" + executable = "executable" + + +class ShutdownException(Exception): + pass + + +@dataclass +class Volume: + mount: str + device: str + read_only: bool + + +@dataclass +class ConfigurationPayload: + input_data: bytes + interface: Interface + vm_hash: str + code: bytes + encoding: Encoding + entrypoint: str + ip: Optional[str] = None + ipv6: Optional[str] = None + route: Optional[str] = None + ipv6_gateway: Optional[str] = None + dns_servers: list[str] = field(default_factory=list) + volumes: list[Volume] = field(default_factory=list) + variables: Optional[dict[str, str]] = None + authorized_keys: Optional[list[str]] = None + + +@dataclass +class RunCodePayload: + scope: dict + + +# Open a socket to receive instructions from the host +s = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) +s.bind((socket.VMADDR_CID_ANY, 52)) +s.listen() + +# Send the host that we are ready +s0 = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) +s0.connect((2, 52)) +s0.sendall(msgpack.dumps({"version": __version__})) +s0.close() + +# Configure aleph-client to use the guest API +os.environ["ALEPH_INIT_VERSION"] = __version__ +os.environ["ALEPH_API_HOST"] = "http://localhost" +os.environ["ALEPH_API_UNIX_SOCKET"] = "/tmp/socat-socket" +os.environ["ALEPH_REMOTE_CRYPTO_HOST"] = "http://localhost" +os.environ["ALEPH_REMOTE_CRYPTO_UNIX_SOCKET"] = "/tmp/socat-socket" + +logger.debug("init1.py is launching") + + +def setup_hostname(hostname: str): + os.environ["ALEPH_ADDRESS_TO_USE"] = hostname + system(f"hostname {hostname}") + + +def setup_variables(variables: Optional[dict[str, str]]): + if variables is None: + return + for key, value in variables.items(): + os.environ[key] = value + + +def setup_network( + ipv4: Optional[str], + ipv6: Optional[str], + ipv4_gateway: Optional[str], + ipv6_gateway: Optional[str], + dns_servers: Optional[list[str]] = None, +): + """Setup the system with info from the host.""" + dns_servers = dns_servers or [] + if not os.path.exists("/sys/class/net/eth0"): + logger.error("No network interface eth0") + return + + # Configure loopback networking + system("ip addr add 127.0.0.1/8 dev lo brd + scope host") + system("ip addr add ::1/128 dev lo") + system("ip link set lo up") + + # Forward compatibility with future supervisors that pass the mask with the IP. + if ipv4 and ("/" not in ipv4): + logger.warning("Not passing the mask with the IP is deprecated and will be unsupported") + ipv4 = f"{ipv4}/24" + + addresses = [ip for ip in [ipv4, ipv6] if ip] + gateways = [gateway for gateway in [ipv4_gateway, ipv6_gateway] if gateway] + + for address in addresses: + system(f"ip addr add {address} dev eth0") + + # Interface must be up before a route can use it + if addresses: + system("ip link set eth0 up") + else: + logger.debug("No ip address provided") + + for gateway in gateways: + system(f"ip route add default via {gateway} dev eth0") + + if not gateways: + logger.debug("No ip gateway provided") + + with open("/etc/resolv.conf", "wb") as resolvconf_fd: + for server in dns_servers: + resolvconf_fd.write(f"nameserver {server}\n".encode()) + + +def setup_input_data(input_data: bytes): + logger.debug("Extracting data") + if input_data: + # Unzip in /data + if not os.path.exists("/opt/input.zip"): + open("/opt/input.zip", "wb").write(input_data) + os.makedirs("/data", exist_ok=True) + os.system("unzip -q /opt/input.zip -d /data") + + +def setup_authorized_keys(authorized_keys: list[str]) -> None: + path = Path("/root/.ssh/authorized_keys") + path.parent.mkdir(exist_ok=True) + path.write_text("\n".join(key for key in authorized_keys)) + + +def setup_volumes(volumes: list[Volume]): + for volume in volumes: + logger.debug(f"Mounting /dev/{volume.device} on {volume.mount}") + os.makedirs(volume.mount, exist_ok=True) + if volume.read_only: + system(f"mount -t squashfs -o ro /dev/{volume.device} {volume.mount}") + else: + system(f"mount -o rw /dev/{volume.device} {volume.mount}") + + system("mount") + + +async def wait_for_lifespan_event_completion( + application: ASGIApplication, event: Union[Literal["startup", "shutdown"]] +): + """ + Send the startup lifespan signal to the ASGI app. + Specification: https://asgi.readthedocs.io/en/latest/specs/lifespan.html + """ + + lifespan_completion = asyncio.Event() + + async def receive(): + return { + "type": f"lifespan.{event}", + } + + async def send(response: dict): + response_type = response.get("type") + if response_type == f"lifespan.{event}.complete": + lifespan_completion.set() + return + else: + logger.warning(f"Unexpected response to {event}: {response_type}") + + while not lifespan_completion.is_set(): + await application( + scope={ + "type": "lifespan", + }, + receive=receive, + send=send, + ) + + +async def setup_code_asgi(code: bytes, encoding: Encoding, entrypoint: str) -> ASGIApplication: + # Allow importing packages from /opt/packages, give it priority + sys.path.insert(0, "/opt/packages") + + logger.debug("Extracting code") + app: ASGIApplication + if encoding == Encoding.squashfs: + sys.path.insert(0, "/opt/code") + module_name, app_name = entrypoint.split(":", 1) + logger.debug("import module") + module = __import__(module_name) + for level in module_name.split(".")[1:]: + module = getattr(module, level) + app = getattr(module, app_name) + elif encoding == Encoding.zip: + # Unzip in /opt and import the entrypoint from there + if not os.path.exists("/opt/archive.zip"): + open("/opt/archive.zip", "wb").write(code) + logger.debug("Run unzip") + os.system("unzip -q /opt/archive.zip -d /opt") + sys.path.insert(0, "/opt") + module_name, app_name = entrypoint.split(":", 1) + logger.debug("import module") + module = __import__(module_name) + for level in module_name.split(".")[1:]: + module = getattr(module, level) + logger.debug("import done") + app = getattr(module, app_name) + elif encoding == Encoding.plain: + # Execute the code and extract the entrypoint + locals: dict[str, Any] = {} + exec(code, globals(), locals) + app = locals[entrypoint] + else: + raise ValueError(f"Unknown encoding '{encoding}'") + await wait_for_lifespan_event_completion(application=app, event="startup") + return ASGIApplication(app) + + +def setup_code_executable(code: bytes, encoding: Encoding, entrypoint: str) -> subprocess.Popen: + logger.debug("Extracting code") + if encoding == Encoding.squashfs: + path = f"/opt/code/{entrypoint}" + if not os.path.isfile(path): + os.system("find /opt/code/") + raise FileNotFoundError(f"No such file: {path}") + os.system(f"chmod +x {path}") + elif encoding == Encoding.zip: + open("/opt/archive.zip", "wb").write(code) + logger.debug("Run unzip") + os.makedirs("/opt/code", exist_ok=True) + os.system("unzip /opt/archive.zip -d /opt/code") + path = f"/opt/code/{entrypoint}" + if not os.path.isfile(path): + os.system("find /opt/code") + raise FileNotFoundError(f"No such file: {path}") + os.system(f"chmod +x {path}") + elif encoding == Encoding.plain: + os.makedirs("/opt/code", exist_ok=True) + path = f"/opt/code/executable {entrypoint}" + open(path, "wb").write(code) + os.system(f"chmod +x {path}") + else: + raise ValueError(f"Unknown encoding '{encoding}'. This should never happen.") + + process = subprocess.Popen(path) + return process + + +async def setup_code( + code: bytes, + encoding: Encoding, + entrypoint: str, + interface: Interface, +) -> Union[ASGIApplication, subprocess.Popen]: + if interface == Interface.asgi: + return await setup_code_asgi(code=code, encoding=encoding, entrypoint=entrypoint) + elif interface == Interface.executable: + return setup_code_executable(code=code, encoding=encoding, entrypoint=entrypoint) + else: + raise ValueError("Invalid interface. This should never happen.") + + +async def run_python_code_http(application: ASGIApplication, scope: dict) -> tuple[dict, dict, str, Optional[bytes]]: + logger.debug("Running code") + # Execute in the same process, saves ~20ms than a subprocess + + # The body should not be part of the ASGI scope itself + scope_body: bytes = scope.pop("body") + + async def receive(): + type_ = "http.request" if scope["type"] in ("http", "websocket") else "aleph.message" + return {"type": type_, "body": scope_body, "more_body": False} + + send_queue: asyncio.Queue = asyncio.Queue() + + async def send(dico): + await send_queue.put(dico) + + # TODO: Better error handling + logger.debug("Awaiting application...") + await application(scope, receive, send) + + logger.debug("Waiting for headers") + headers: dict + if scope["type"] == "http": + headers = await send_queue.get() + else: + headers = {} + + logger.debug("Waiting for body") + response_body: dict = await send_queue.get() + + logger.debug("Waiting for buffer") + + logger.debug(f"Headers {headers}") + logger.debug(f"Body {response_body}") + + # Since Python code runs asynchronously in the same process, sharing the global sys.stdout, prints from an + # individual call cannot be isolated from other calls. + output = "" + + logger.debug("Getting output data") + output_data: bytes + if os.path.isdir("/data") and os.listdir("/data"): + make_archive("/opt/output", "zip", "/data") + with open("/opt/output.zip", "rb") as output_zipfile: + output_data = output_zipfile.read() + else: + output_data = b"" + + logger.debug("Returning result") + return headers, response_body, output, output_data + + +async def make_request(session, scope): + async with session.request( + scope["method"], + url="http://localhost:8080{}".format(scope["path"]), + params=scope["query_string"], + headers=[(a.decode("utf-8"), b.decode("utf-8")) for a, b in scope["headers"]], + data=scope.get("body", None), + ) as resp: + headers = { + "headers": [(a.encode("utf-8"), b.encode("utf-8")) for a, b in resp.headers.items()], + "status": resp.status, + } + body = {"body": await resp.content.read()} + return headers, body + + +def show_loading(): + body = {"body": Path("/root/loading.html").read_text()} + headers = { + "headers": [ + [b"Content-Type", b"text/html"], + [b"Connection", b"keep-alive"], + [b"Keep-Alive", b"timeout=5"], + [b"Transfer-Encoding", b"chunked"], + ], + "status": 503, + } + return headers, body + + +async def run_executable_http(scope: dict) -> tuple[dict, dict, str, Optional[bytes]]: + logger.debug("Calling localhost") + + tries = 0 + headers = None + body = None + + timeout = aiohttp.ClientTimeout(total=5) + async with aiohttp.ClientSession(timeout=timeout) as session: + while not body: + try: + tries += 1 + headers, body = await make_request(session, scope) + except aiohttp.ClientConnectorError: + if tries > 20: + headers, body = show_loading() + await asyncio.sleep(0.05) + + output = "" # Process stdout is not captured per request + output_data = None + logger.debug("Returning result") + return headers, body, output, output_data + + +async def process_instruction( + instruction: bytes, + interface: Interface, + application: Union[ASGIApplication, subprocess.Popen], +) -> AsyncIterable[bytes]: + if instruction == b"halt": + logger.info("Received halt command") + system("sync") + logger.debug("Filesystems synced") + if isinstance(application, subprocess.Popen): + application.terminate() + logger.debug("Application terminated") + # application.communicate() + else: + await wait_for_lifespan_event_completion(application=application, event="shutdown") + yield b"STOP\n" + logger.debug("Supervisor informed of halt") + raise ShutdownException + elif instruction.startswith(b"!"): + # Execute shell commands in the form `!ls /` + msg = instruction[1:].decode() + try: + process_output = subprocess.check_output(msg, stderr=subprocess.STDOUT, shell=True) + yield process_output + except subprocess.CalledProcessError as error: + yield str(error).encode() + b"\n" + error.output + else: + # Python + logger.debug("msgpack.loads (") + msg_ = msgpack.loads(instruction, raw=False) + logger.debug("msgpack.loads )") + payload = RunCodePayload(**msg_) + + output: Optional[str] = None + try: + headers: dict + body: dict + output_data: Optional[bytes] + + if interface == Interface.asgi: + application = cast(ASGIApplication, application) + headers, body, output, output_data = await run_python_code_http( + application=application, scope=payload.scope + ) + elif interface == Interface.executable: + headers, body, output, output_data = await run_executable_http(scope=payload.scope) + else: + raise ValueError("Unknown interface. This should never happen") + + result = { + "headers": headers, + "body": body, + "output": output, + "output_data": output_data, + } + yield msgpack.dumps(result, use_bin_type=True) + except Exception as error: + yield msgpack.dumps( + { + "error": str(error), + "traceback": str(traceback.format_exc()), + "output": output, + } + ) + + +def receive_data_length(client) -> int: + """Receive the length of the data to follow.""" + buffer = b"" + for _ in range(9): + byte = client.recv(1) + if byte == b"\n": + break + else: + buffer += byte + return int(buffer) + + +def load_configuration(data: bytes) -> ConfigurationPayload: + msg_ = msgpack.loads(data, raw=False) + msg_["volumes"] = [Volume(**volume_dict) for volume_dict in msg_.get("volumes")] + return ConfigurationPayload(**msg_) + + +def receive_config(client) -> ConfigurationPayload: + length = receive_data_length(client) + data = b"" + while len(data) < length: + data += client.recv(1024 * 1024) + return load_configuration(data) + + +def setup_system(config: ConfigurationPayload): + # Linux host names are limited to 63 characters. We therefore use the base32 representation + # of the item_hash instead of its common base16 representation. + item_hash_binary: bytes = base64.b16decode(config.vm_hash.encode().upper()) + hostname = base64.b32encode(item_hash_binary).decode().strip("=").lower() + setup_hostname(hostname) + + setup_variables(config.variables) + setup_volumes(config.volumes) + setup_network( + ipv4=config.ip, + ipv6=config.ipv6, + ipv4_gateway=config.route, + ipv6_gateway=config.ipv6_gateway, + dns_servers=config.dns_servers, + ) + setup_input_data(config.input_data) + if authorized_keys := config.authorized_keys: + setup_authorized_keys(authorized_keys) + logger.debug("Setup finished") + + +def umount_volumes(volumes: list[Volume]): + "Umount user related filesystems" + system("sync") + for volume in volumes: + logger.debug(f"Umounting /dev/{volume.device} on {volume.mount}") + system(f"umount {volume.mount}") + + +async def main() -> None: + client, addr = s.accept() + + logger.debug("Receiving setup...") + config = receive_config(client) + setup_system(config) + + try: + app: Union[ASGIApplication, subprocess.Popen] = await setup_code( + config.code, config.encoding, config.entrypoint, config.interface + ) + client.send(msgpack.dumps({"success": True})) + except Exception as error: + client.send( + msgpack.dumps( + { + "success": False, + "error": str(error), + "traceback": str(traceback.format_exc()), + } + ) + ) + logger.exception("Program could not be started") + raise + + class ServerReference: + "Reference used to close the server from within `handle_instruction" + + server: asyncio.AbstractServer + + server_reference = ServerReference() + + async def handle_instruction(reader, writer): + data = await reader.read(1000_1000) # Max 1 Mo + + logger.debug("Init received msg") + if logger.level <= logging.DEBUG: + data_to_print = f"{data[:500]}..." if len(data) > 500 else data + logger.debug(f"<<<\n\n{data_to_print}\n\n>>>") + + try: + async for result in process_instruction(instruction=data, interface=config.interface, application=app): + writer.write(result) + await writer.drain() + + logger.debug("Instruction processed") + except ShutdownException: + logger.info("Initiating shutdown") + writer.write(b"STOPZ\n") + await writer.drain() + logger.debug("Shutdown confirmed to supervisor") + server_reference.server.close() + logger.debug("Supervisor socket server closed") + finally: + writer.close() + + server = await asyncio.start_server(handle_instruction, sock=s) + server_reference.server = server + + addr = server.sockets[0].getsockname() + print(f"Serving on {addr}") + + try: + async with server: + await server.serve_forever() + except asyncio.CancelledError: + logger.debug("Server was properly cancelled") + finally: + logger.warning("System shutdown") + server.close() + logger.debug("Server closed") + umount_volumes(config.volumes) + logger.debug("User volumes unmounted") + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + asyncio.run(main()) + + logger.info("Unmounting system filesystems") + system("umount /dev/shm") + system("umount /dev/pts") + system("umount -a") + + logger.info("Sending reboot syscall") + # Send reboot syscall, see man page + # https://man7.org/linux/man-pages/man2/reboot.2.html + libc = ctypes.CDLL(None) + libc.syscall(169, 0xFEE1DEAD, 672274793, 0x1234567, None) + # The exit should not happen due to system halt. + sys.exit(0) diff --git a/runtimes/aleph-debian-12-python/loading.html b/runtimes/aleph-debian-12-python/loading.html new file mode 100644 index 000000000..da9128c40 --- /dev/null +++ b/runtimes/aleph-debian-12-python/loading.html @@ -0,0 +1,346 @@ + + + VM Loading + + + + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    Whoops!
    +
    Seems like your VM is still loading, please wait...
    +
    + + Refresh! +
    + +
    + + diff --git a/runtimes/aleph-debian-12-python/update_inits.sh b/runtimes/aleph-debian-12-python/update_inits.sh new file mode 100755 index 000000000..55a1c99b1 --- /dev/null +++ b/runtimes/aleph-debian-12-python/update_inits.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +rm ./rootfs.squashfs + +set -euf + +cp ./init0.sh ./rootfs/sbin/init +cp ./init1.py ./rootfs/root/init1.py +chmod +x ./rootfs/sbin/init +chmod +x ./rootfs/root/init1.py + +mksquashfs ./rootfs/ ./rootfs.squashfs + +echo "OK" diff --git a/runtimes/instance-rootfs/create-debian-12-disk.sh b/runtimes/instance-rootfs/create-debian-12-disk.sh new file mode 100755 index 000000000..cfa0130a5 --- /dev/null +++ b/runtimes/instance-rootfs/create-debian-12-disk.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +set -euf + +# Variables +ROOTFS_FILE="./debian-12.btrfs" +MOUNT_ORIGIN_DIR="/mnt/debian" +MOUNT_DIR="/mnt/vm" +IMAGE_URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.tar.xz" +IMAGE_NAME="debian-12-genericcloud.tar.xz" +IMAGE_RAW_NAME="disk.raw" + +# Cleanup previous run +umount "$MOUNT_ORIGIN_DIR" || true +umount "$MOUNT_DIR" || true +rm -f "$ROOTFS_FILE" + +# Prepare directories +mkdir -p "$MOUNT_ORIGIN_DIR" +mkdir -p "$MOUNT_DIR" + +# Download Debian image +echo "Downloading Debian 12 image" +curl -L "$IMAGE_URL" -o "$IMAGE_NAME" + +# Allocate 1GB rootfs.btrfs file +echo "Allocate 1GB $ROOTFS_FILE file" +fallocate -l 1G "$ROOTFS_FILE" +mkfs.btrfs -m single --label root "$ROOTFS_FILE" +mount "$ROOTFS_FILE" "$MOUNT_DIR" + +# Extract Debian image +echo "Extracting Debian 12 image" +tar xvf "$IMAGE_NAME" + +# Mount first partition of Debian Image +LOOPDISK=$(losetup --find --show $IMAGE_RAW_NAME) +partx -u "$LOOPDISK" +mount "$LOOPDISK"p1 "$MOUNT_ORIGIN_DIR" + +# Fix boot partition missing +sed -i '$d' "$MOUNT_ORIGIN_DIR"/etc/fstab + +# Copy Debian image to rootfs +echo "Copying Debian 12 image to $ROOTFS_FILE file" +cp -vap "$MOUNT_ORIGIN_DIR/." "$MOUNT_DIR" + +# Cleanup and unmount +umount "$MOUNT_ORIGIN_DIR" +partx -d "$LOOPDISK" +losetup -d "$LOOPDISK" +umount "$MOUNT_DIR" +rm "$IMAGE_RAW_NAME" +rm "$IMAGE_NAME" diff --git a/runtimes/instance-rootfs/create-debian-12-qemu-disk.sh b/runtimes/instance-rootfs/create-debian-12-qemu-disk.sh new file mode 100755 index 000000000..1a4df4235 --- /dev/null +++ b/runtimes/instance-rootfs/create-debian-12-qemu-disk.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -euf + +# Variables +ROOTFS_FILENAME="./rootfs.img" +IMAGE_URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2" +IMAGE_NAME="debian-12-genericcloud-amd64.qcow2" + +# Cleanup previous run +rm -f "$ROOTFS_FILENAME" + +# Download Ubuntu image +echo "Downloading Debian 12 image" +curl -L "$IMAGE_URL" -o "$IMAGE_NAME" + +# Rename final file +mv "$IMAGE_NAME" "$ROOTFS_FILENAME" diff --git a/runtimes/instance-rootfs/create-ubuntu-22-04-disk.sh b/runtimes/instance-rootfs/create-ubuntu-22-04-disk.sh new file mode 100755 index 000000000..e6a4589e9 --- /dev/null +++ b/runtimes/instance-rootfs/create-ubuntu-22-04-disk.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -euf + +# Variables +ROOTFS_FILE="./ubuntu-22-04.btrfs" +ROOTFS_DIR="./rootfs" +MOUNT_DIR="/mnt/vm" +IMAGE_URL="https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64-root.tar.xz" +IMAGE_NAME="jammy-server-cloudimg-root.tar.xz" + +# Cleanup previous run +umount "$MOUNT_DIR" || true +rm -f "$ROOTFS_FILE" +rm -rf "$ROOTFS_DIR" + +# Prepare directories +mkdir -p "$MOUNT_DIR" +mkdir -p "$ROOTFS_DIR" + +# Download Ubuntu image +echo "Downloading Ubuntu 22.04 image" +curl -L "$IMAGE_URL" -o "$IMAGE_NAME" + +# Allocate 1,4 GB rootfs.btrfs file +echo "Allocate 1,4 GB $ROOTFS_FILE file" +fallocate -l 1400M "$ROOTFS_FILE" +mkfs.btrfs "$ROOTFS_FILE" +mount "$ROOTFS_FILE" "$MOUNT_DIR" + +# Extract Ubuntu image to rootfs +echo "Extracting Ubuntu 22.04 image" +tar xvf "$IMAGE_NAME" -C "$MOUNT_DIR" + +# Cleanup and unmount +umount "$MOUNT_DIR" +rm -rf "$ROOTFS_DIR" +rm "$IMAGE_NAME" diff --git a/runtimes/instance-rootfs/create-ubuntu-22-04-qemu-disk.sh b/runtimes/instance-rootfs/create-ubuntu-22-04-qemu-disk.sh new file mode 100755 index 000000000..71738ba77 --- /dev/null +++ b/runtimes/instance-rootfs/create-ubuntu-22-04-qemu-disk.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -euf + +# Variables +ROOTFS_FILENAME="./rootfs.img" +IMAGE_URL="https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64-disk-kvm.img" +IMAGE_NAME="jammy-server-cloudimg-amd64-disk-kvm.img" + +# Cleanup previous run +rm -f "$ROOTFS_FILENAME" + +# Download Ubuntu image +echo "Downloading Ubuntu 22.04 image" +curl -L "$IMAGE_URL" -o "$IMAGE_NAME" + +# Rename final file +mv "$IMAGE_NAME" "$ROOTFS_FILENAME" diff --git a/runtimes/instance-rootfs/create-ubuntu-24-04-qemu-disk.sh b/runtimes/instance-rootfs/create-ubuntu-24-04-qemu-disk.sh new file mode 100755 index 000000000..1c2115e47 --- /dev/null +++ b/runtimes/instance-rootfs/create-ubuntu-24-04-qemu-disk.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -euf + +# Variables +ROOTFS_FILENAME="./rootfs.img" +IMAGE_URL="https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" +IMAGE_NAME="noble-server-cloudimg-amd64.img" + +# Cleanup previous run +rm -f "$ROOTFS_FILENAME" + +# Download Ubuntu image +echo "Downloading Ubuntu 24.04 image" +curl -L "$IMAGE_URL" -o "$IMAGE_NAME" + +# Rename final file +mv "$IMAGE_NAME" "$ROOTFS_FILENAME" diff --git a/runtimes/ovmf/README.md b/runtimes/ovmf/README.md new file mode 100644 index 000000000..83d028779 --- /dev/null +++ b/runtimes/ovmf/README.md @@ -0,0 +1,24 @@ +# OVMF build for Confidential VMs + +The files in this directory build a version of OVMF able to store SEV secrets +in a physical memory region that will then be accessible by Grub. The final OVMF image +also include Grub in order to measure OVMF+Grub before loading secrets inside +the VM. + +This process relies on the patch sets produced by James Bottomley: +https://listman.redhat.com/archives/edk2-devel-archive/2020-November/msg01247.html + +## Build instructions + +As this requires a patched version of Grub, it is advised to build both tools inside a container. + + +e.g using podman +``` +# Clone grub and edk2, and apply the patches +bash ./download_dependencies.sh +podman run -v ./build_ovmf.sh:/opt/build_ovmf.sh -v ./downloads:/opt/downloads\ + ubuntu:22.04 bash /opt/download_dependencies.sh +# The OVMF.fd file will be in `downloads/edk2/Build/AmdSev/RELEASE_GCC5/FV/OVMF.fd +cp downloads/edk2/Build/AmdSev/RELEASE_GCC5/FV/OVMF.fd confidential-OVMF.fd +``` diff --git a/runtimes/ovmf/build_ovmf.sh b/runtimes/ovmf/build_ovmf.sh new file mode 100644 index 000000000..3b31bbf0f --- /dev/null +++ b/runtimes/ovmf/build_ovmf.sh @@ -0,0 +1,35 @@ +#! /bin/bash +# Script to build OVMF + Grub for confidential computing. The resulting image will be +# a single firmware image containing OVMF and Grub so that the entirety of the unencrypted +# boot code can be measured before feeding secrets to the VM. + +set -eo pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +GRUB_DIR="${SCRIPT_DIR}/downloads/grub" +EDK2_DIR="${SCRIPT_DIR}/downloads/edk2" + +if [ ! -d "${GRUB_DIR}" ]; then + echo "Grub directory not found: ${GRUB_DIR}" >&2 +fi + +if [ ! -d "${EDK2_DIR}" ]; then + echo "EDK2 directory not found: ${EDK2_DIR}" >&2 +fi + +apt-get update +# Packages for Grub +apt-get install -y autoconf autopoint binutils bison flex gcc gettext git make pkg-config python3 python-is-python3 +# Packages for OVMF (there are some duplicates with Grub, kept for documentation) +apt-get install -y bison build-essential dosfstools flex iasl libgmp3-dev libmpfr-dev mtools nasm subversion texinfo uuid-dev + +cd $GRUB_DIR +./bootstrap +./configure --prefix /usr/ --with-platform=efi --target=x86_64 +make +make install + +# Build OVMF +cd $EDK2_DIR +OvmfPkg/build.sh -b RELEASE -p OvmfPkg/AmdSev/AmdSevX64.dsc diff --git a/runtimes/ovmf/download_dependencies.sh b/runtimes/ovmf/download_dependencies.sh new file mode 100644 index 000000000..178820d99 --- /dev/null +++ b/runtimes/ovmf/download_dependencies.sh @@ -0,0 +1,39 @@ +#! /bin/bash + +set -eo pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +DOWNLOAD_DIR="${SCRIPT_DIR}/downloads" +PATCH_DIR="${SCRIPT_DIR}/patches" + +GRUB_GIT_REPOSITORY="https://github.com/aleph-im/grub.git" +GRUB_COMMIT="aleph/efi-secrets" +GRUB_DIR="${DOWNLOAD_DIR}/grub" + +EDK2_GIT_REPOSITORY="https://github.com/tianocore/edk2.git" +EDK2_COMMIT="edk2-stable202205" +EDK2_DIR="${DOWNLOAD_DIR}/edk2" + +# Download Grub +git clone --depth 1 --branch "${GRUB_COMMIT}" ${GRUB_GIT_REPOSITORY} "${GRUB_DIR}" + +# Download EDK2 (=OVMF) +git clone --recurse-submodules "${EDK2_GIT_REPOSITORY}" "${EDK2_DIR}" + + + + +# Apply patches to EDK2 +EDK2_PATCH_DIR="${PATCH_DIR}/edk2" +pushd "${EDK2_DIR}" > /dev/null +git checkout "${EDK2_COMMIT}" +git submodule update +# Default user is needed by git am. only set it for the repo if not set already +if ! git config user.name > /dev/null; then + git config --local user.name "Your Name" +fi +if ! git config user.email > /dev/null; then + git config --local user.email "you@example.com" +fi +git am --ignore-space-change --ignore-whitespace "${EDK2_PATCH_DIR}/0001-Fix-invokation-of-cryptomount-s-for-AMD-SEV.patch" +popd > /dev/null diff --git a/runtimes/ovmf/patches/edk2/0001-Fix-invokation-of-cryptomount-s-for-AMD-SEV.patch b/runtimes/ovmf/patches/edk2/0001-Fix-invokation-of-cryptomount-s-for-AMD-SEV.patch new file mode 100644 index 000000000..5c4f5e290 --- /dev/null +++ b/runtimes/ovmf/patches/edk2/0001-Fix-invokation-of-cryptomount-s-for-AMD-SEV.patch @@ -0,0 +1,58 @@ +From b3f1d358cc4098fb59a778d5340018a4e73ff87f Mon Sep 17 00:00:00 2001 +From: Olivier Desenfans +Date: Thu, 30 Jun 2022 10:38:18 +0200 +Subject: [PATCH] Fix invokation of cryptomount -s for AMD SEV + +The current implementation targeted the first version of James +Bottomley's Grub patches. These patches have since been updated +to move the secret loading part from a dedicated command to +a secret-finding module that must be invoked with + +cryptomount -s MOD + +Fixed the name of the Grub module which was renamed from sevsecret +to efisecret. +--- + OvmfPkg/AmdSev/Grub/grub.cfg | 10 ++-------- + OvmfPkg/AmdSev/Grub/grub.sh | 2 +- + 2 files changed, 3 insertions(+), 9 deletions(-) + +diff --git a/OvmfPkg/AmdSev/Grub/grub.cfg b/OvmfPkg/AmdSev/Grub/grub.cfg +index 17be94277a..331baf798c 100644 +--- a/OvmfPkg/AmdSev/Grub/grub.cfg ++++ b/OvmfPkg/AmdSev/Grub/grub.cfg +@@ -10,16 +10,10 @@ + ## + + echo "Entering grub config" +-sevsecret ++cryptomount -s efisecret + if [ $? -ne 0 ]; then +- echo "Failed to locate anything in the SEV secret area, prompting for password" ++ echo "Failed to mount root securely, retrying with password prompt" + cryptomount -a +-else +- cryptomount -s +- if [ $? -ne 0 ]; then +- echo "Failed to mount root securely, retrying with password prompt" +- cryptomount -a +- fi + fi + set root= + for f in (crypto*); do +diff --git a/OvmfPkg/AmdSev/Grub/grub.sh b/OvmfPkg/AmdSev/Grub/grub.sh +index 99807d7291..abec80d7da 100644 +--- a/OvmfPkg/AmdSev/Grub/grub.sh ++++ b/OvmfPkg/AmdSev/Grub/grub.sh +@@ -44,7 +44,7 @@ GRUB_MODULES=" + linux + linuxefi + reboot +- sevsecret ++ efisecret + " + basedir=$(dirname -- "$0") + +-- +2.25.1 + diff --git a/src/aleph/__init__.py b/src/aleph/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/__init__.py b/src/aleph/vm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/conf.py b/src/aleph/vm/conf.py new file mode 100644 index 000000000..5de7816c0 --- /dev/null +++ b/src/aleph/vm/conf.py @@ -0,0 +1,530 @@ +import ipaddress +import logging +import os +import re +import shutil +from collections.abc import Iterable +from decimal import Decimal +from enum import Enum +from os.path import abspath, exists, isdir, isfile, join +from pathlib import Path +from subprocess import CalledProcessError, check_output +from typing import Any, List, Literal, NewType, Optional + +from aleph_message.models import Chain +from aleph_message.models.execution.environment import HypervisorType +from pydantic import BaseSettings, Field, HttpUrl +from pydantic.env_settings import DotenvType, env_file_sentinel + +from aleph.vm.orchestrator.chain import STREAM_CHAINS +from aleph.vm.utils import ( + check_amd_sev_es_supported, + check_amd_sev_supported, + file_hashes_differ, + is_command_available, +) + +logger = logging.getLogger(__name__) + +Url = NewType("Url", str) + +# This variable may not be set from an environment variable +ALLOW_DEVELOPER_SSH_KEYS = object() + + +class DnsResolver(str, Enum): + detect = "detect" # Detect the resolver used by the system + resolv_conf = "resolv.conf" # Simply copy from /etc/resolv.conf + resolvectl = "resolvectl" # Systemd-resolved, common on Ubuntu + + +class IPv6AllocationPolicy(str, Enum): + static = "static" # Compute the IP address based on the VM item hash. + dynamic = "dynamic" # Assign an available IP address. + + +class SnapshotCompressionAlgorithm(str, Enum): + gz = "gzip" + + +def etc_resolv_conf_dns_servers(): + with open("/etc/resolv.conf") as resolv_file: + for line in resolv_file.readlines(): + ip = re.findall(r"^nameserver\s+([\w.]+)$", line) + if ip: + yield ip[0] + + +def resolvectl_dns_servers(interface: str) -> Iterable[str]: + """ + Use resolvectl to list available DNS servers (IPv4 and IPv6). + + Note: we used to use systemd-resolve for Debian 11. + This command is not available anymore on Ubuntu 22.04 and is actually a symlink + to resolvectl. + + Example output for `resolvectl dns -i eth0`: + Link 2 (eth0): 67.207.67.3 67.207.67.2 2a02:2788:fff0:5::140 + """ + output = check_output(["/usr/bin/resolvectl", "dns", "-i", interface], text=True) + # Split on the first colon only to support IPv6 addresses. + link, servers = output.split(":", maxsplit=1) + for server in servers.split(): + yield server.strip() + + +def get_default_interface() -> str | None: + """Returns the default network interface""" + with open("/proc/net/route") as f: + for line in f.readlines(): + parts = line.strip().split() + if parts[1] == "00000000": # Indicates default route + return parts[0] + return None + + +def obtain_dns_ips(dns_resolver: DnsResolver, network_interface: str) -> list[str]: + # The match syntax is not yet available as of Python 3.9 + # match dns_resolver: + if dns_resolver == DnsResolver.detect: + # Use a try-except approach since resolvectl can be present but disabled and raise the following + # "Failed to get global data: Unit dbus-org.freedesktop.resolve1.service not found." + try: + return list(resolvectl_dns_servers(interface=network_interface)) + except (FileNotFoundError, CalledProcessError) as error: + if Path("/etc/resolv.conf").exists(): + return list(etc_resolv_conf_dns_servers()) + else: + msg = "No DNS resolver found" + raise FileNotFoundError(msg) from error + + elif dns_resolver == DnsResolver.resolv_conf: + return list(etc_resolv_conf_dns_servers()) + + elif dns_resolver == DnsResolver.resolvectl: + return list(resolvectl_dns_servers(interface=network_interface)) + + else: + msg = "No DNS resolve defined, this should never happen." + raise AssertionError(msg) + + +class Settings(BaseSettings): + SUPERVISOR_HOST: str = "127.0.0.1" + SUPERVISOR_PORT: int = 4020 + + # Public domain name + DOMAIN_NAME: str = Field( + default="localhost", + description="Default public domain name", + ) + + START_ID_INDEX: int = 4 + PREALLOC_VM_COUNT: int = 0 + REUSE_TIMEOUT: float = 60 * 60.0 + WATCH_FOR_MESSAGES: bool = True + WATCH_FOR_UPDATES: bool = True + + API_SERVER: str = "https://official.aleph.cloud" + IPFS_SERVER: Url = Url("http://localhost:8080/ipfs") + # Connect to the Quad9 VPN provider using their IPv4 and IPv6 addresses. + CONNECTIVITY_IPV4_URL: str = "https://9.9.9.9/" + CONNECTIVITY_IPV6_URL: str = "https://[2620:fe::fe]/" + CONNECTIVITY_DNS_HOSTNAME: str = "example.org" + + USE_JAILER: bool = True + # Changelog: PRINT_SYSTEM_LOGS use to print the MicroVM logs with the supervisor output. + # They are now in separate journald entries, disabling the settings disable the logs output of Firecracker VM (only) + # via the serial console. This break the logs endpoint for program, as such disabling it in prod is not recommended. + PRINT_SYSTEM_LOGS: bool = True + IGNORE_TRACEBACK_FROM_DIAGNOSTICS: bool = True + LOG_LEVEL: str = "INFO" + DEBUG_ASYNCIO: bool = False + + # Networking does not work inside Docker/Podman + ALLOW_VM_NETWORKING: bool = True + NETWORK_INTERFACE: str | None = None + IPV4_ADDRESS_POOL: str = Field( + default="172.16.0.0/12", + description="IPv4 address range used to provide networks to VMs.", + ) + IPV4_NETWORK_PREFIX_LENGTH: int = Field( + default=24, + description="Individual VM network prefix length in bits", + ) + IPV6_ADDRESS_POOL: str = Field( + default="fc00:1:2:3::/64", + description="IPv6 address range assigned to the host. Example: 1111:2222:3333:4444::/64. " + "Defaults to a local address range for compatibility with hosts not yet configured for IPv6.", + ) + IPV6_ALLOCATION_POLICY: IPv6AllocationPolicy = Field(default=IPv6AllocationPolicy.static) + IPV6_SUBNET_PREFIX: int = Field( + default=124, + description="IPv6 subnet prefix for VMs. Made configurable for testing.", + ) + IPV6_FORWARDING_ENABLED: bool = Field( + default=True, + description="Enable IPv6 forwarding on the host. Required for IPv6 connectivity in VMs.", + ) + NFTABLES_CHAIN_PREFIX = "aleph" + USE_NDP_PROXY: bool = Field( + default=True, + description="Use the Neighbor Discovery Protocol Proxy to respond to Router Solicitation for instances on IPv6", + ) + + DNS_RESOLUTION: DnsResolver | None = Field( + default=DnsResolver.detect, + description="Method used to resolve the dns server if DNS_NAMESERVERS is not present.", + ) + DNS_NAMESERVERS: list[str] | None = None + DNS_NAMESERVERS_IPV4: list[str] | None + DNS_NAMESERVERS_IPV6: list[str] | None + + FIRECRACKER_PATH: Path = Path("/opt/firecracker/firecracker") + JAILER_PATH: Path = Path("/opt/firecracker/jailer") + SEV_CTL_PATH: Path = Path("/opt/sevctl") + LINUX_PATH: Path = Path("/opt/firecracker/vmlinux.bin") + INIT_TIMEOUT: float = 20.0 + + CONNECTOR_URL = Url("http://localhost:4021") + + CACHE_ROOT: Path = Path("/var/cache/aleph/vm") + MESSAGE_CACHE: Optional[Path] = Field( + None, + description="Default to CACHE_ROOT/message", + ) + CODE_CACHE: Optional[Path] = Field(None, description="Default to CACHE_ROOT/code") + RUNTIME_CACHE: Optional[Path] = Field(None, description="Default to CACHE_ROOT/runtime") + DATA_CACHE: Optional[Path] = Field(None, description="Default to CACHE_ROOT/data") + + EXECUTION_ROOT: Path = Path("/var/lib/aleph/vm") + JAILER_BASE_DIRECTORY: Optional[Path] = Field(None, description="Default to EXECUTION_ROOT/jailer") + EXECUTION_DATABASE: Optional[Path] = Field( + None, description="Location of database file. Default to EXECUTION_ROOT/executions.sqlite3" + ) + EXECUTION_LOG_ENABLED: bool = False + EXECUTION_LOG_DIRECTORY: Optional[Path] = Field( + None, description="Location of executions log. Default to EXECUTION_ROOT/executions/" + ) + + PERSISTENT_VOLUMES_DIR: Path = Field( + None, description="Persistent volumes location. Default to EXECUTION_ROOT/volumes/persistent/" + ) + JAILER_BASE_DIR: Path = Field(None) + + MAX_PROGRAM_ARCHIVE_SIZE: int = 10_000_000 # 10 MB + MAX_DATA_ARCHIVE_SIZE: int = 10_000_000 # 10 MB + + PAYMENT_MONITOR_INTERVAL: float = Field( + default=60.0, + description="Interval in seconds between payment checks", + ) + PAYMENT_RECEIVER_ADDRESS: str = Field( + default="", + description="Address of the account receiving payments", + ) + # This address is the ALEPH SuperToken on SuperFluid Testnet + PAYMENT_PRICING_AGGREGATE: str = "" # TODO: Missing + + # Use to check PAYG payment + RPC_AVAX: HttpUrl = Field( + default=STREAM_CHAINS[Chain.AVAX].rpc, + description="RPC API Endpoint for AVAX chain", + ) + + RPC_BASE: HttpUrl = Field( + default=STREAM_CHAINS[Chain.BASE].rpc, + description="RPC API Endpoint for BASE chain", + ) + + PAYMENT_BUFFER: Decimal = Field( + default=Decimal("0.0000000001"), + description="Buffer to add to the required payment to prevent floating point errors", + ) + + SNAPSHOT_FREQUENCY: int = Field( + default=0, + description="Snapshot frequency interval in minutes. It will create a VM snapshot every X minutes. " + "If set to zero, snapshots are disabled.", + ) + + SNAPSHOT_COMPRESSION_ALGORITHM: SnapshotCompressionAlgorithm = Field( + default=SnapshotCompressionAlgorithm.gz, + description="Snapshot compression algorithm.", + ) + + # hashlib.sha256(b"secret-token").hexdigest() + ALLOCATION_TOKEN_HASH: str = "151ba92f2eb90bce67e912af2f7a5c17d8654b3d29895b042107ea312a7eebda" + + ENABLE_QEMU_SUPPORT: bool = Field(default=True) + INSTANCE_DEFAULT_HYPERVISOR: HypervisorType | None = Field( + default=HypervisorType.firecracker, # User Firecracker + description="Default hypervisor to use on running instances, can be Firecracker or QEmu", + ) + + ENABLE_CONFIDENTIAL_COMPUTING: bool = Field( + default=False, + description="Enable Confidential Computing using AMD-SEV. It will test if the host is compatible " + "with SEV and SEV-ES", + ) + + CONFIDENTIAL_DIRECTORY: Optional[Path] = Field( + None, + description="Confidential Computing default directory. Default to EXECUTION_ROOT/confidential", + ) + + CONFIDENTIAL_SESSION_DIRECTORY: Optional[Path] = Field(None, description="Default to EXECUTION_ROOT/sessions") + + ENABLE_GPU_SUPPORT: bool = Field( + default=False, + description="Enable GPU pass-through support to VMs, only allowed for QEmu hypervisor", + ) + + # Settings to get from the network aggregates + SETTINGS_AGGREGATE_ADDRESS: str = "0xFba561a84A537fCaa567bb7A2257e7142701ae2A" + + # Tests on programs + FAKE_DATA_PROGRAM: Path | None = None + BENCHMARK_FAKE_DATA_PROGRAM: Path = Path(abspath(join(__file__, "../../../../examples/example_fastapi"))) + + FAKE_DATA_MESSAGE: Path = Path(abspath(join(__file__, "../../../../examples/program_message_from_aleph.json"))) + FAKE_DATA_DATA: Path | None = Path(abspath(join(__file__, "../../../../examples/data/"))) + FAKE_DATA_RUNTIME: Path = Path( + abspath(join(__file__, "../../../../runtimes/aleph-debian-12-python/rootfs.squashfs")) + ) + FAKE_DATA_VOLUME: Path | None = Path(abspath(join(__file__, "../../../../examples/volumes/volume-venv.squashfs"))) + + # Tests on instances + + TEST_INSTANCE_ID: str | None = Field( + default=None, # TODO: Use a valid item_hash here + description="Identifier of the instance message used when testing the launch of an instance from the network", + ) + + USE_FAKE_INSTANCE_BASE: bool = False + FAKE_INSTANCE_BASE: Path = Path(abspath(join(__file__, "../../../../runtimes/instance-rootfs/debian-12.btrfs"))) + FAKE_QEMU_INSTANCE_BASE: Path = Path(abspath(join(__file__, "../../../../runtimes/instance-rootfs/rootfs.img"))) + FAKE_INSTANCE_ID: str = Field( + default="decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca", + description="Identifier used for the 'fake instance' message defined in " + "examples/instance_message_from_aleph.json", + ) + FAKE_INSTANCE_MESSAGE = Path(abspath(join(__file__, "../../../../examples/instance_message_from_aleph.json"))) + FAKE_INSTANCE_QEMU_MESSAGE = Path(abspath(join(__file__, "../../../../examples/qemu_message_from_aleph.json"))) + + CHECK_FASTAPI_VM_ID: str = "63faf8b5db1cf8d965e6a464a0cb8062af8e7df131729e48738342d956f29ace" + LEGACY_CHECK_FASTAPI_VM_ID: str = "67705389842a0a1b95eaa408b009741027964edc805997475e95c505d642edd8" + + # Developer options + + SENTRY_DSN: str | None = None + SENTRY_TRACES_SAMPLE_RATE: float = Field(ge=0, le=1.0, default=0.1) + DEVELOPER_SSH_KEYS: list[str] | None = [] + # Using an object here forces the value to come from Python code and not from an environment variable. + USE_DEVELOPER_SSH_KEYS: Literal[False] | object = False + + # Fields + SENSITIVE_FIELDS: list[str] = Field( + default=["SENTRY_DSN"], + description="Sensitive fields, redacted from `--print-settings`.", + ) + + def update(self, **kwargs): + for key, value in kwargs.items(): + if key != key.upper(): + logger.warning(f"Setting {key} is not uppercase") + if hasattr(self, key): + setattr(self, key, value) + else: + msg = f"Unknown setting '{key}'" + raise ValueError(msg) + + def check(self): + """Check that the settings are valid. Call this method after self.setup().""" + assert Path("/dev/kvm").exists(), "KVM not found on `/dev/kvm`." + assert isfile(self.FIRECRACKER_PATH), f"File not found {self.FIRECRACKER_PATH}" + assert isfile(self.JAILER_PATH), f"File not found {self.JAILER_PATH}" + assert isfile(self.LINUX_PATH), f"File not found {self.LINUX_PATH}" + assert self.NETWORK_INTERFACE, "Network interface is not specified" + assert self.CONNECTOR_URL.startswith("http://") or self.CONNECTOR_URL.startswith("https://") + if self.ALLOW_VM_NETWORKING: + assert exists( + f"/sys/class/net/{self.NETWORK_INTERFACE}" + ), f"Network interface {self.NETWORK_INTERFACE} does not exist" + + _, ipv4_pool_length = settings.IPV4_ADDRESS_POOL.split("/") + assert ( + int(ipv4_pool_length) <= settings.IPV4_NETWORK_PREFIX_LENGTH + ), "The IPv4 address pool prefix must be shorter than an individual VM network prefix" + + if self.FAKE_DATA_PROGRAM: + assert self.FAKE_DATA_PROGRAM, "Local fake program directory not specified" + assert self.FAKE_DATA_MESSAGE, "Local fake message not specified" + assert self.FAKE_DATA_DATA, "Local fake data directory not specified" + assert self.FAKE_DATA_RUNTIME, "Local runtime .squashfs build not specified" + assert self.FAKE_DATA_VOLUME, "Local data volume .squashfs not specified" + + assert isdir( + self.FAKE_DATA_PROGRAM + ), f"Local fake program directory is missing, no directory '{self.FAKE_DATA_PROGRAM}'" + assert isfile(self.FAKE_DATA_MESSAGE), f"Local fake message '{self.FAKE_DATA_MESSAGE}' not found" + assert isdir(self.FAKE_DATA_DATA), f"Local fake data directory '{self.FAKE_DATA_DATA}' is missing" + assert isfile( + self.FAKE_DATA_RUNTIME + ), f"Local runtime '{self.FAKE_DATA_RUNTIME}' is missing, did you build it ?" + assert isfile( + self.FAKE_DATA_VOLUME + ), f"Local data volume '{self.FAKE_DATA_VOLUME}' is missing, did you build it ?" + + assert is_command_available("setfacl"), "Command `setfacl` not found, run `apt install acl`" + if self.USE_NDP_PROXY: + assert is_command_available("ndppd"), "Command `ndppd` not found, run `apt install ndppd`" + + # Necessary for cloud-init customisation of instance + assert is_command_available( + "cloud-localds" + ), "Command `cloud-localds` not found, run `apt install cloud-image-utils`" + + if self.ENABLE_QEMU_SUPPORT: + # Qemu support + assert is_command_available("qemu-img"), "Command `qemu-img` not found, run `apt install qemu-utils`" + assert is_command_available( + "qemu-system-x86_64" + ), "Command `qemu-system-x86_64` not found, run `apt install qemu-system-x86`" + + if self.ENABLE_CONFIDENTIAL_COMPUTING: + assert self.SEV_CTL_PATH.is_file(), f"File not found {self.SEV_CTL_PATH}" + assert check_amd_sev_supported(), "SEV feature isn't enabled, enable it in BIOS" + assert check_amd_sev_es_supported(), "SEV-ES feature isn't enabled, enable it in BIOS" + # Not available on the test machine yet + # assert check_amd_sev_snp_supported(), "SEV-SNP feature isn't enabled, enable it in BIOS" + assert self.ENABLE_QEMU_SUPPORT, "Qemu Support is needed for confidential computing and it's disabled, " + "enable it setting the env variable `ENABLE_QEMU_SUPPORT=True` in configuration" + if self.ENABLE_GPU_SUPPORT: + assert self.ENABLE_QEMU_SUPPORT, "Qemu Support is needed for GPU support and it's disabled, " + + def setup(self): + """Setup the environment defined by the settings. Call this method after loading the settings.""" + + # Update chain RPC + STREAM_CHAINS[Chain.AVAX].rpc = str(self.RPC_AVAX) + STREAM_CHAINS[Chain.BASE].rpc = str(self.RPC_BASE) + + if self.MESSAGE_CACHE: + os.makedirs(self.MESSAGE_CACHE, exist_ok=True) + if self.CODE_CACHE: + os.makedirs(self.CODE_CACHE, exist_ok=True) + if self.RUNTIME_CACHE: + os.makedirs(self.RUNTIME_CACHE, exist_ok=True) + if self.DATA_CACHE: + os.makedirs(self.DATA_CACHE, exist_ok=True) + + os.makedirs(self.EXECUTION_ROOT, exist_ok=True) + + # If the Linux kernel provided is on another device than the execution root, + # copy it to the execution root to allow hardlink creation within jailer directories. + if os.stat(self.LINUX_PATH).st_dev != os.stat(self.EXECUTION_ROOT).st_dev: + logger.info("The Linux kernel is on another device than the execution root. Creating a copy.") + linux_path_on_device = self.EXECUTION_ROOT / "vmlinux.bin" + + # Only copy if the hash of the file differ. + if file_hashes_differ(self.LINUX_PATH, linux_path_on_device): + shutil.copy(self.LINUX_PATH, linux_path_on_device) + + self.LINUX_PATH = linux_path_on_device + + if self.EXECUTION_LOG_DIRECTORY: + os.makedirs(self.EXECUTION_LOG_DIRECTORY, exist_ok=True) + if self.PERSISTENT_VOLUMES_DIR: + os.makedirs(self.PERSISTENT_VOLUMES_DIR, exist_ok=True) + if self.CONFIDENTIAL_DIRECTORY: + os.makedirs(self.CONFIDENTIAL_DIRECTORY, exist_ok=True) + if self.CONFIDENTIAL_SESSION_DIRECTORY: + os.makedirs(self.CONFIDENTIAL_SESSION_DIRECTORY, exist_ok=True) + + self.API_SERVER = self.API_SERVER.rstrip("/") + + if not self.NETWORK_INTERFACE: + self.NETWORK_INTERFACE = get_default_interface() + + if self.DNS_NAMESERVERS is None and self.DNS_RESOLUTION and self.NETWORK_INTERFACE: + self.DNS_NAMESERVERS = obtain_dns_ips( + dns_resolver=self.DNS_RESOLUTION, + network_interface=self.NETWORK_INTERFACE, + ) + + if not self.DNS_NAMESERVERS_IPV4: + self.DNS_NAMESERVERS_IPV4 = [] + if not self.DNS_NAMESERVERS_IPV6: + self.DNS_NAMESERVERS_IPV6 = [] + if self.DNS_NAMESERVERS: + for server in self.DNS_NAMESERVERS: + ip_addr = ipaddress.ip_address(server) + if isinstance(ip_addr, ipaddress.IPv4Address): + self.DNS_NAMESERVERS_IPV4.append(server) + if isinstance(ip_addr, ipaddress.IPv6Address): + self.DNS_NAMESERVERS_IPV6.append(server) + + if not settings.ENABLE_QEMU_SUPPORT: + # If QEmu is not supported, ignore the setting and use Firecracker by default + settings.INSTANCE_DEFAULT_HYPERVISOR = HypervisorType.firecracker + + def display(self) -> str: + attributes: dict[str, Any] = {} + + for attr in self.__dict__.keys(): + if attr != attr.upper(): + # Settings are expected to be ALL_UPPERCASE, other attributes snake_case or CamelCase + continue + + if getattr(self, attr) and attr in self.SENSITIVE_FIELDS: + attributes[attr] = "" + else: + attributes[attr] = getattr(self, attr) + + return "\n".join(f"{self.Config.env_prefix}{attribute} = {value}" for attribute, value in attributes.items()) + + def __init__( + self, + _env_file: DotenvType | None = env_file_sentinel, + _env_file_encoding: str | None = None, + _env_nested_delimiter: str | None = None, + _secrets_dir: Path | None = None, + **values: Any, + ) -> None: + super().__init__(_env_file, _env_file_encoding, _env_nested_delimiter, _secrets_dir, **values) + if not self.MESSAGE_CACHE: + self.MESSAGE_CACHE = self.CACHE_ROOT / "message" + if not self.CODE_CACHE: + self.CODE_CACHE = self.CACHE_ROOT / "code" + if not self.RUNTIME_CACHE: + self.RUNTIME_CACHE = self.CACHE_ROOT / "runtime" + if not self.DATA_CACHE: + self.DATA_CACHE = self.CACHE_ROOT / "data" + if not self.CONFIDENTIAL_DIRECTORY: + self.CONFIDENTIAL_DIRECTORY = self.CACHE_ROOT / "confidential" + if not self.JAILER_BASE_DIRECTORY: + self.JAILER_BASE_DIRECTORY = self.EXECUTION_ROOT / "jailer" + if not self.PERSISTENT_VOLUMES_DIR: + self.PERSISTENT_VOLUMES_DIR = self.EXECUTION_ROOT / "volumes" / "persistent" + if not self.EXECUTION_DATABASE: + self.EXECUTION_DATABASE = self.EXECUTION_ROOT / "executions.sqlite3" + if not self.EXECUTION_LOG_DIRECTORY: + self.EXECUTION_LOG_DIRECTORY = self.EXECUTION_ROOT / "executions" + if not self.JAILER_BASE_DIR: + self.JAILER_BASE_DIR = self.EXECUTION_ROOT / "jailer" + if not self.CONFIDENTIAL_SESSION_DIRECTORY: + self.CONFIDENTIAL_SESSION_DIRECTORY = self.EXECUTION_ROOT / "sessions" + + class Config: + env_prefix = "ALEPH_VM_" + case_sensitive = False + env_file = ".env" + + +def make_db_url(): + return f"sqlite+aiosqlite:///{settings.EXECUTION_DATABASE}" + + +# Settings singleton +settings = Settings() diff --git a/src/aleph/vm/constants.py b/src/aleph/vm/constants.py new file mode 100644 index 000000000..9701259e0 --- /dev/null +++ b/src/aleph/vm/constants.py @@ -0,0 +1,5 @@ +KiB = 1024 +MiB = 1024 * 1024 +GiB = 1024 * 1024 * 1024 +HOUR = 60 * 60 +MINUTE = 60 diff --git a/src/aleph/vm/controllers/__init__.py b/src/aleph/vm/controllers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/controllers/__main__.py b/src/aleph/vm/controllers/__main__.py new file mode 100644 index 000000000..f3cef3171 --- /dev/null +++ b/src/aleph/vm/controllers/__main__.py @@ -0,0 +1,150 @@ +import argparse +import asyncio +import json +import logging +import signal +import sys +from asyncio.subprocess import Process +from pathlib import Path + +from aleph.vm.hypervisors.firecracker.microvm import MicroVM +from aleph.vm.hypervisors.qemu.qemuvm import QemuVM +from aleph.vm.hypervisors.qemu_confidential.qemuvm import QemuConfidentialVM +from aleph.vm.network.hostnetwork import Network, make_ipv6_allocator + +from .configuration import ( + Configuration, + HypervisorType, + QemuConfidentialVMConfiguration, + QemuVMConfiguration, + VMConfiguration, +) + +logger = logging.getLogger(__name__) + + +def configuration_from_file(path: Path): + with open(path) as f: + data = json.load(f) + return Configuration.parse_obj(data) + + +def parse_args(args): + parser = argparse.ArgumentParser(prog="instance", description="Aleph.im Instance Client") + parser.add_argument("-c", "--config", dest="config_path", required=True) + parser.add_argument( + "-i", + "--initialize-network-settings", + dest="initialize_network_settings", + action="store_true", + default=False, + ) + parser.add_argument( + "-p", + "--print-settings", + dest="print_settings", + action="store_true", + default=False, + ) + parser.add_argument( + "-vv", + "--very-verbose", + dest="loglevel", + help="set loglevel to DEBUG", + action="store_const", + const=logging.DEBUG, + default=logging.INFO, + ) + return parser.parse_args(args) + + +async def execute_persistent_vm(config: Configuration): + if config.hypervisor == HypervisorType.firecracker: + assert isinstance(config.vm_configuration, VMConfiguration) + execution = MicroVM( + vm_id=config.vm_id, + vm_hash=config.vm_hash, + firecracker_bin_path=config.vm_configuration.firecracker_bin_path, + jailer_base_directory=config.settings.JAILER_BASE_DIR, + use_jailer=config.vm_configuration.use_jailer, + jailer_bin_path=config.vm_configuration.jailer_bin_path, + init_timeout=config.vm_configuration.init_timeout, + ) + + execution.prepare_start() + process = await execution.start(config.vm_configuration.config_file_path) + elif isinstance(config.vm_configuration, QemuConfidentialVMConfiguration): # FIXME + assert isinstance(config.vm_configuration, QemuConfidentialVMConfiguration) + execution = QemuConfidentialVM(config.vm_hash, config.vm_configuration) + process = await execution.start() + else: + assert isinstance(config.vm_configuration, QemuVMConfiguration) + execution = QemuVM(config.vm_hash, config.vm_configuration) + process = await execution.start() + + return execution, process + + +async def handle_persistent_vm(config: Configuration, execution: MicroVM | QemuVM, process: Process): + # Catch the terminating signal and send a proper message to the vm to stop it so it close files properly + loop = asyncio.get_event_loop() + + def callback(): + """Callback for the signal handler to stop the VM and cleanup properly on SIGTERM.""" + logger.debug("Received SIGTERM") + loop.create_task(execution.stop()) + + loop.add_signal_handler(signal.SIGTERM, callback) + + await process.wait() + logger.warning(f"Process terminated with {process.returncode}") + + +async def run_persistent_vm(config: Configuration): + execution, process = await execute_persistent_vm(config) + await handle_persistent_vm(config=config, execution=execution, process=process) + return execution, process + + +def main(): + args = parse_args(sys.argv[1:]) + + config_path = Path(args.config_path) + if not config_path.is_file(): + logger.error(f"Configuration file {config_path} not found") + exit(1) + + config = configuration_from_file(config_path) + + log_format = "%(asctime)s | %(levelname)s | %(message)s" + logging.basicConfig( + level=args.loglevel, + format=log_format, + ) + + if args.print_settings: + print(config.settings.display()) + + config.settings.check() + + if args.initialize_network_settings: + network = Network( + vm_ipv4_address_pool_range=config.settings.IPV4_ADDRESS_POOL, + vm_network_size=config.settings.IPV4_NETWORK_PREFIX_LENGTH, + external_interface=config.settings.NETWORK_INTERFACE, + ipv6_allocator=make_ipv6_allocator( + allocation_policy=config.settings.IPV6_ALLOCATION_POLICY, + address_pool=config.settings.IPV6_ADDRESS_POOL, + subnet_prefix=config.settings.IPV6_SUBNET_PREFIX, + ), + use_ndp_proxy=config.settings.USE_NDP_PROXY, + ipv6_forwarding_enabled=config.settings.IPV6_FORWARDING_ENABLED, + ) + + network.setup() + + asyncio.run(run_persistent_vm(config)) + + +if __name__ == "__main__": + main() diff --git a/src/aleph/vm/controllers/configuration.py b/src/aleph/vm/controllers/configuration.py new file mode 100644 index 000000000..fb4b4ff1f --- /dev/null +++ b/src/aleph/vm/controllers/configuration.py @@ -0,0 +1,83 @@ +import logging +from enum import Enum +from pathlib import Path + +from pydantic import BaseModel + +from aleph.vm.conf import Settings, settings + +logger = logging.getLogger(__name__) + + +class VMConfiguration(BaseModel): + use_jailer: bool + firecracker_bin_path: Path + jailer_bin_path: Path + config_file_path: Path + init_timeout: float + + +class QemuVMHostVolume(BaseModel): + mount: str + path_on_host: Path + read_only: bool + + +class QemuGPU(BaseModel): + pci_host: str + + +class QemuVMConfiguration(BaseModel): + qemu_bin_path: str + cloud_init_drive_path: str | None + image_path: str + monitor_socket_path: Path + qmp_socket_path: Path + vcpu_count: int + mem_size_mb: int + interface_name: str | None + host_volumes: list[QemuVMHostVolume] + gpus: list[QemuGPU] + + +class QemuConfidentialVMConfiguration(BaseModel): + qemu_bin_path: str + cloud_init_drive_path: str | None + image_path: str + monitor_socket_path: Path + qmp_socket_path: Path + vcpu_count: int + mem_size_mb: int + interface_name: str | None + host_volumes: list[QemuVMHostVolume] + gpus: list[QemuGPU] + ovmf_path: Path + sev_session_file: Path + sev_dh_cert_file: Path + sev_policy: int + + +class HypervisorType(str, Enum): + qemu = "qemu" + firecracker = "firecracker" + + +class Configuration(BaseModel): + vm_id: int + vm_hash: str + settings: Settings + vm_configuration: QemuConfidentialVMConfiguration | QemuVMConfiguration | VMConfiguration + hypervisor: HypervisorType = HypervisorType.firecracker + + +def save_controller_configuration(vm_hash: str, configuration: Configuration) -> Path: + """Save VM configuration to be used by the controller service""" + config_file_path = Path(f"{settings.EXECUTION_ROOT}/{vm_hash}-controller.json") + with config_file_path.open("w") as controller_config_file: + controller_config_file.write( + configuration.json( + by_alias=True, exclude_none=True, indent=4, exclude={"settings": {"USE_DEVELOPER_SSH_KEYS"}} + ) + ) + config_file_path.chmod(0o644) + return config_file_path diff --git a/src/aleph/vm/controllers/firecracker/__init__.py b/src/aleph/vm/controllers/firecracker/__init__.py new file mode 100644 index 000000000..a28769581 --- /dev/null +++ b/src/aleph/vm/controllers/firecracker/__init__.py @@ -0,0 +1,4 @@ +from .instance import AlephFirecrackerInstance +from .program import AlephFirecrackerProgram + +__all__ = ("AlephFirecrackerProgram", "AlephFirecrackerInstance") diff --git a/src/aleph/vm/controllers/firecracker/executable.py b/src/aleph/vm/controllers/firecracker/executable.py new file mode 100644 index 000000000..7249be9c7 --- /dev/null +++ b/src/aleph/vm/controllers/firecracker/executable.py @@ -0,0 +1,339 @@ +""" +This module contains abstract class for executables (programs and instances) running inside Firecracker MicroVMs. +""" + +import asyncio +import logging +from dataclasses import dataclass, field +from multiprocessing import Process, set_start_method +from os.path import exists, isfile +from pathlib import Path +from typing import Generic, TypeVar + +from aiohttp import ClientResponseError +from aleph_message.models import ExecutableContent, ItemHash +from aleph_message.models.execution.environment import MachineResources +from aleph_message.models.execution.volume import PersistentVolume + +from aleph.vm.conf import settings +from aleph.vm.controllers.configuration import ( + Configuration, + VMConfiguration, + save_controller_configuration, +) +from aleph.vm.controllers.firecracker.snapshots import CompressedDiskVolumeSnapshot +from aleph.vm.controllers.interface import AlephVmControllerInterface +from aleph.vm.guest_api.__main__ import run_guest_api +from aleph.vm.hypervisors.firecracker.microvm import FirecrackerConfig, MicroVM +from aleph.vm.network.firewall import teardown_nftables_for_vm +from aleph.vm.network.interfaces import TapInterface +from aleph.vm.storage import chown_to_jailman, get_volume_path + +try: + import psutil # type: ignore [no-redef] +except ImportError: + psutil = None # type: ignore [assignment] + +logger = logging.getLogger(__name__) + +try: + set_start_method("spawn") +except RuntimeError as error: + if error.args == ("context has already been set",): + logger.info("Start method has already been set") + pass + else: + raise error + + +class ResourceDownloadError(ClientResponseError): + """An error occurred while downloading a VM resource file""" + + def __init__(self, error: ClientResponseError): + super().__init__( + request_info=error.request_info, + history=error.history, + status=error.status, + message=error.message, + headers=error.headers, + ) + + +@dataclass +class Volume: + mount: str + device: str + read_only: bool + + +@dataclass +class HostVolume: + mount: str + path_on_host: Path + read_only: bool + + +@dataclass +class BaseConfiguration: + vm_hash: ItemHash + ip: str | None = None + route: str | None = None + dns_servers: list[str] = field(default_factory=list) + volumes: list[Volume] = field(default_factory=list) + variables: dict[str, str] | None = None + + +@dataclass +class ConfigurationResponse: + success: bool + error: str | None = None + traceback: str | None = None + + +class AlephFirecrackerResources: + """Resources required to start a Firecracker VM""" + + message_content: ExecutableContent + + kernel_image_path: Path + rootfs_path: Path + volumes: list[HostVolume] + namespace: str + + def __init__(self, message_content: ExecutableContent, namespace: str): + self.message_content = message_content + self.namespace = namespace + + def to_dict(self): + return self.__dict__ + + async def download_kernel(self): + # Assumes kernel is already present on the host + self.kernel_image_path = Path(settings.LINUX_PATH) + assert isfile(self.kernel_image_path) + + async def download_volumes(self): + volumes = [] + # TODO: Download in parallel and prevent duplicated volume names + for i, volume in enumerate(self.message_content.volumes): + # only persistant volume has name and mount + if isinstance(volume, PersistentVolume): + if not volume.name: + volume.name = f"unamed_volume_{i}" + if not volume.mount: + volume.mount = f"/mnt/{volume.name}" + volumes.append( + HostVolume( + mount=volume.mount, + path_on_host=(await get_volume_path(volume=volume, namespace=self.namespace)), + read_only=volume.is_read_only(), + ) + ) + self.volumes = volumes + + async def download_all(self): + await asyncio.gather( + self.download_kernel(), + self.download_volumes(), + ) + + +class VmSetupError(Exception): + pass + + +class VmInitNotConnectedError(Exception): + pass + + +ConfigurationType = TypeVar("ConfigurationType") + + +class AlephFirecrackerExecutable(Generic[ConfigurationType], AlephVmControllerInterface): + vm_id: int + vm_hash: ItemHash + resources: AlephFirecrackerResources + enable_console: bool + enable_networking: bool + hardware_resources: MachineResources + tap_interface: TapInterface | None = None + fvm: MicroVM + vm_configuration: ConfigurationType | None + guest_api_process: Process | None = None + is_instance: bool + persistent: bool + _firecracker_config: FirecrackerConfig | None = None + controller_configuration: Configuration | None = None + support_snapshot: bool + + @property + def resources_path(self) -> Path: + return Path(self.fvm.namespace_path) + + def __init__( + self, + vm_id: int, + vm_hash: ItemHash, + resources: AlephFirecrackerResources, + enable_networking: bool = False, + enable_console: bool | None = None, + hardware_resources: MachineResources | None = None, + tap_interface: TapInterface | None = None, + persistent: bool = False, + prepare_jailer: bool = True, + ): + self.vm_id = vm_id + self.vm_hash = vm_hash + self.resources = resources + if enable_console is None: + enable_console = settings.PRINT_SYSTEM_LOGS + self.enable_console = enable_console + self.enable_networking = enable_networking and settings.ALLOW_VM_NETWORKING + self.hardware_resources = hardware_resources or MachineResources() + self.tap_interface = tap_interface + self.persistent = persistent + + self.fvm = MicroVM( + vm_id=self.vm_id, + vm_hash=vm_hash, + firecracker_bin_path=settings.FIRECRACKER_PATH, + jailer_base_directory=settings.JAILER_BASE_DIR, + use_jailer=settings.USE_JAILER, + jailer_bin_path=settings.JAILER_PATH, + init_timeout=settings.INIT_TIMEOUT, + enable_log=enable_console, + ) + if prepare_jailer: + self.fvm.prepare_jailer() + + # These properties are set later in the setup and configuration. + self.vm_configuration = None + self.guest_api_process = None + self._firecracker_config = None + + def to_dict(self): + """Dict representation of the virtual machine. Used to record resource usage and for JSON serialization.""" + if self.fvm.proc and psutil: + # The firecracker process is still running and process information can be obtained from `psutil`. + try: + p = psutil.Process(self.fvm.proc.pid) + pid_info = { + "status": p.status(), + "create_time": p.create_time(), + "cpu_times": p.cpu_times(), + "cpu_percent": p.cpu_percent(), + "memory_info": p.memory_info(), + "io_counters": p.io_counters(), + "open_files": p.open_files(), + "connections": p.connections(), + "num_threads": p.num_threads(), + "num_ctx_switches": p.num_ctx_switches(), + } + except psutil.NoSuchProcess: + logger.warning("Cannot read process metrics (process not found)") + pid_info = None + else: + pid_info = None + + return { + "process": pid_info, + **self.__dict__, + } + + async def setup(self): + # self._firecracker_config = FirecrackerConfig(...) + raise NotImplementedError() + + async def start(self): + logger.debug(f"Starting VM={self.vm_id}") + + if not self.fvm: + msg = "No VM found. Call setup() before start()" + raise ValueError(msg) + + if self.is_instance or self.persistent: + msg = "VM should be started using SystemD Manager class" + raise ValueError(msg) + + try: + firecracker_config_path = await self.fvm.save_configuration_file(self._firecracker_config) + await self.fvm.start(firecracker_config_path) + logger.debug("setup done") + except Exception: + # Stop the VM and clear network interfaces in case any error prevented the start of the virtual machine. + logger.error("VM startup failed, cleaning up network") + await self.fvm.teardown() + teardown_nftables_for_vm(self.vm_id) + if self.tap_interface: + await self.tap_interface.delete() + raise + + await self.wait_for_init() + logger.debug(f"started fvm {self.vm_id}") + await self.load_configuration() + + async def wait_for_init(self) -> None: + """Wait for the init process of the virtual machine to be ready. + May be empty.""" + return + + async def configure(self): + """Configure the VM by saving controller service configuration""" + if self.persistent: + firecracker_config_path = await self.fvm.save_configuration_file(self._firecracker_config) + vm_configuration = VMConfiguration( + firecracker_bin_path=self.fvm.firecracker_bin_path, + use_jailer=self.fvm.use_jailer, + jailer_bin_path=self.fvm.jailer_bin_path, + init_timeout=self.fvm.init_timeout, + config_file_path=firecracker_config_path, + ) + + configuration = Configuration( + vm_id=self.vm_id, + vm_hash=self.vm_hash, + settings=settings, + vm_configuration=vm_configuration, + ) + + save_controller_configuration(self.vm_hash, configuration) + + async def load_configuration(self): + """Load configuration settings for programs.""" + return + + async def start_guest_api(self): + vsock_path = Path(f"{self.fvm.vsock_path}_53") + + # Ensure that the directory where the VSOCK socket will be created exists + vsock_path.parent.mkdir(parents=True, exist_ok=True) + logger.debug(f"starting guest API for {self.vm_id} on {vsock_path}") + + vm_hash = self.vm_hash + self.guest_api_process = Process( + target=run_guest_api, + args=(vsock_path, vm_hash, settings.SENTRY_DSN, settings.DOMAIN_NAME), + ) + self.guest_api_process.start() + while not exists(vsock_path): + await asyncio.sleep(0.01) + await chown_to_jailman(Path(vsock_path)) + logger.debug(f"started guest API for {self.vm_id}") + + async def stop_guest_api(self): + if self.guest_api_process and self.guest_api_process.is_alive(): + self.guest_api_process.terminate() + await asyncio.sleep(5) + if self.guest_api_process.is_alive(): + self.guest_api_process.kill() + + async def teardown(self): + if self.fvm: + await self.fvm.teardown() + teardown_nftables_for_vm(self.vm_id) + if self.tap_interface: + await self.tap_interface.delete() + await self.stop_guest_api() + + async def create_snapshot(self) -> CompressedDiskVolumeSnapshot: + raise NotImplementedError() diff --git a/src/aleph/vm/controllers/firecracker/instance.py b/src/aleph/vm/controllers/firecracker/instance.py new file mode 100644 index 000000000..da423ef73 --- /dev/null +++ b/src/aleph/vm/controllers/firecracker/instance.py @@ -0,0 +1,262 @@ +import asyncio +import base64 +import json +import logging +from pathlib import Path +from tempfile import NamedTemporaryFile + +import yaml +from aleph_message.models import ItemHash +from aleph_message.models.execution.environment import MachineResources + +from aleph.vm.conf import settings +from aleph.vm.hypervisors.firecracker.config import ( + BootSource, + Drive, + FirecrackerConfig, + MachineConfig, + NetworkInterface, + Vsock, +) +from aleph.vm.hypervisors.firecracker.microvm import setfacl +from aleph.vm.network.interfaces import TapInterface +from aleph.vm.storage import create_devmapper, create_volume_file +from aleph.vm.utils import ( + HostNotFoundError, + NotEnoughDiskSpaceError, + check_disk_space, + ping, + run_in_subprocess, +) + +from .executable import ( + AlephFirecrackerExecutable, + AlephFirecrackerResources, + BaseConfiguration, +) +from .snapshots import CompressedDiskVolumeSnapshot, DiskVolume, DiskVolumeSnapshot + +logger = logging.getLogger(__name__) + + +class AlephInstanceResources(AlephFirecrackerResources): + async def download_runtime(self): + self.rootfs_path = await create_devmapper(self.message_content.rootfs, self.namespace) + assert self.rootfs_path.is_block_device(), f"Runtime not found on {self.rootfs_path}" + + async def download_all(self): + await asyncio.gather( + self.download_kernel(), + self.download_runtime(), + self.download_volumes(), + ) + + +class AlephFirecrackerInstance(AlephFirecrackerExecutable): + vm_configuration: BaseConfiguration + resources: AlephInstanceResources + latest_snapshot: DiskVolumeSnapshot | None + is_instance = True + support_snapshot = False + + def __init__( + self, + vm_id: int, + vm_hash: ItemHash, + resources: AlephInstanceResources, + enable_networking: bool = False, + enable_console: bool | None = None, + hardware_resources: MachineResources | None = None, + tap_interface: TapInterface | None = None, + prepare_jailer: bool = True, + ): + self.latest_snapshot = None + persistent = True + super().__init__( + vm_id, + vm_hash, + resources, + enable_networking, + enable_console, + hardware_resources or MachineResources(), + tap_interface, + persistent, + prepare_jailer, + ) + + async def setup(self): + logger.debug("instance setup started") + await setfacl() + + cloud_init_drive = await self._create_cloud_init_drive() + + self._firecracker_config = FirecrackerConfig( + boot_source=BootSource( + kernel_image_path=Path(self.fvm.enable_kernel(self.resources.kernel_image_path)), + boot_args=BootSource.args(enable_console=self.enable_console, writable=True), + ), + drives=[ + Drive( + drive_id="rootfs", + path_on_host=self.fvm.enable_rootfs(self.resources.rootfs_path), + is_root_device=True, + is_read_only=False, + ), + cloud_init_drive, + ] + + [ + self.fvm.enable_drive(volume.path_on_host, read_only=volume.read_only) + for volume in self.resources.volumes + ], + machine_config=MachineConfig( + vcpu_count=self.hardware_resources.vcpus, + mem_size_mib=self.hardware_resources.memory, + ), + vsock=Vsock(), + network_interfaces=( + [NetworkInterface(iface_id="eth0", host_dev_name=self.tap_interface.device_name)] + if self.enable_networking and self.tap_interface + else [] + ), + ) + + async def wait_for_init(self) -> None: + """Wait for the init process of the instance to be ready.""" + assert self.enable_networking and self.tap_interface, f"Network not enabled for VM {self.vm_id}" + + ip = self.get_ip() + if not ip: + msg = "Host IP not available" + raise ValueError(msg) + + ip = ip.split("/", 1)[0] + + attempts = 30 + timeout_seconds = 2 + + for attempt in range(attempts): + try: + await ping(ip, packets=1, timeout=timeout_seconds) + return + except HostNotFoundError: + if attempt < (attempts - 1): + continue + else: + raise + + async def create_snapshot(self) -> CompressedDiskVolumeSnapshot: + """Create a VM snapshot""" + volume_path = await create_volume_file(self.resources.message_content.rootfs, self.resources.namespace) + volume = DiskVolume(path=volume_path) + + if not check_disk_space(volume.size): + raise NotEnoughDiskSpaceError + + snapshot = await volume.take_snapshot() + compressed_snapshot = await snapshot.compress(settings.SNAPSHOT_COMPRESSION_ALGORITHM) + + if self.latest_snapshot: + self.latest_snapshot.delete() + + self.latest_snapshot = snapshot + return compressed_snapshot + + def _get_hostname(self) -> str: + item_hash_binary: bytes = base64.b16decode(self.vm_hash.encode().upper()) + return base64.b32encode(item_hash_binary).decode().strip("=").lower() + + def _encode_user_data(self) -> bytes: + """Creates user data configuration file for cloud-init tool""" + + ssh_authorized_keys: list[str] | None + if settings.USE_DEVELOPER_SSH_KEYS: + ssh_authorized_keys = settings.DEVELOPER_SSH_KEYS or [] + else: + ssh_authorized_keys = self.resources.message_content.authorized_keys or [] + + config: dict[str, str | bool | list[str]] = { + "hostname": self._get_hostname(), + "disable_root": False, + "ssh_pwauth": False, + "ssh_authorized_keys": ssh_authorized_keys, + # Avoid the resize error because we already do it on the VM disk creation stage + "resize_rootfs": False, + } + + cloud_config_header = "#cloud-config\n" + config_output = yaml.safe_dump(config, default_flow_style=False, sort_keys=False) + + return (cloud_config_header + config_output).encode() + + def _create_network_file(self) -> bytes: + """Creates network configuration file for cloud-init tool""" + + assert self.enable_networking and self.tap_interface, f"Network not enabled for VM {self.vm_id}" + + ip = self.get_ip() + route = self.get_ip_route() + ipv6 = self.get_ipv6() + ipv6_gateway = self.get_ipv6_gateway() + + nameservers_ip = [] + if ip: + nameservers_ip = settings.DNS_NAMESERVERS_IPV4 + if ipv6: + nameservers_ip += settings.DNS_NAMESERVERS_IPV6 + network = { + "ethernets": { + "eth0": { + "dhcp4": False, + "dhcp6": False, + "addresses": [ip, ipv6], + "gateway4": route, + "gateway6": ipv6_gateway, + "nameservers": { + "addresses": nameservers_ip, + }, + }, + }, + "version": 2, + } + + return yaml.safe_dump(network, default_flow_style=False, sort_keys=False).encode() + + def _create_metadata_file(self) -> bytes: + """Creates metadata configuration file for cloud-init tool""" + + metadata = { + "instance-id": f"iid-instance-{self.vm_id}", + "local-hostname": self._get_hostname(), + } + + return json.dumps(metadata).encode() + + async def _create_cloud_init_drive(self) -> Drive: + """Creates the cloud-init volume to configure and setup the VM""" + + disk_image_path = settings.EXECUTION_ROOT / f"cloud-init-{self.vm_hash}.img" + + with NamedTemporaryFile() as user_data_config_file: + user_data = self._encode_user_data() + user_data_config_file.write(user_data) + user_data_config_file.flush() + with NamedTemporaryFile() as network_config_file: + network_config = self._create_network_file() + network_config_file.write(network_config) + network_config_file.flush() + with NamedTemporaryFile() as metadata_config_file: + metadata_config = self._create_metadata_file() + metadata_config_file.write(metadata_config) + metadata_config_file.flush() + + await run_in_subprocess( + [ + "cloud-localds", + f"--network-config={network_config_file.name}", + str(disk_image_path), + user_data_config_file.name, + metadata_config_file.name, + ] + ) + + return self.fvm.enable_drive(disk_image_path, read_only=True) diff --git a/src/aleph/vm/controllers/firecracker/program.py b/src/aleph/vm/controllers/firecracker/program.py new file mode 100644 index 000000000..1bc5e05ed --- /dev/null +++ b/src/aleph/vm/controllers/firecracker/program.py @@ -0,0 +1,445 @@ +from __future__ import annotations + +import asyncio +import dataclasses +import logging +import os.path +from asyncio import StreamReader, StreamWriter +from dataclasses import dataclass, field +from enum import Enum +from pathlib import Path + +import msgpack +from aiohttp import ClientResponseError +from aleph_message.models import ExecutableContent, ItemHash +from aleph_message.models.execution.base import Encoding +from aleph_message.models.execution.environment import MachineResources + +from aleph.vm.conf import settings +from aleph.vm.hypervisors.firecracker.config import ( + BootSource, + Drive, + FirecrackerConfig, + MachineConfig, + NetworkInterface, + Vsock, +) +from aleph.vm.hypervisors.firecracker.microvm import RuntimeConfiguration, setfacl +from aleph.vm.network.interfaces import TapInterface +from aleph.vm.storage import get_code_path, get_data_path, get_runtime_path +from aleph.vm.utils import MsgpackSerializable + +from .executable import ( + AlephFirecrackerExecutable, + AlephFirecrackerResources, + ResourceDownloadError, + VmInitNotConnectedError, + VmSetupError, + Volume, +) + +logger = logging.getLogger(__name__) + + +class FileTooLargeError(Exception): + pass + + +def read_input_data(path_to_data: Path | None) -> bytes | None: + if not path_to_data: + return None + + if os.path.getsize(path_to_data) > settings.MAX_DATA_ARCHIVE_SIZE: + msg = "Data file too large to pass as an inline zip" + raise FileTooLargeError(msg) + + return path_to_data.read_bytes() + + +class Interface(str, Enum): + asgi = "asgi" + executable = "executable" + + @classmethod + def from_entrypoint(cls, entrypoint: str): + """Determine the interface type (Python ASGI or executable HTTP service) from the entrypoint of the program.""" + # Only Python ASGI entrypoints contain a column `:` in their name. + # We use this to differentiate Python ASGI programs from executable HTTP service mode. + if ":" in entrypoint: + return cls.asgi + else: + return cls.executable + + +@dataclass +class ProgramVmConfiguration(MsgpackSerializable): + interface: Interface + vm_hash: ItemHash + ip: str | None = None + ipv6: str | None = None + route: str | None = None + dns_servers: list[str] = field(default_factory=list) + volumes: list[Volume] = field(default_factory=list) + variables: dict[str, str] | None = None + + +@dataclass +class ConfigurationPayload(MsgpackSerializable): + pass + + +@dataclass +class ConfigurationPayloadV1(ConfigurationPayload): + """ + Configuration payload for runtime v1. + """ + + input_data: bytes | None + interface: Interface + vm_hash: str + encoding: Encoding + entrypoint: str + code: bytes | None + ip: str | None + route: str | None + dns_servers: list[str] + volumes: list[Volume] + variables: dict[str, str] | None + + @classmethod + def from_program_config(cls, program_config: ProgramConfiguration) -> ConfigurationPayload: + """Converts a program configuration into a configuration payload + to be sent to a runtime. + """ + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for k, v in dataclasses.asdict(program_config).items() if k in field_names}) + + +@dataclass +class ConfigurationPayloadV2(ConfigurationPayloadV1): + """ + Configuration payload for runtime v2. + Adds support for IPv6. + """ + + ipv6: str | None + ipv6_gateway: str | None + authorized_keys: list[str] | None + + +@dataclass +class ProgramConfiguration: + """Configuration passed to the init of the virtual machine in order to start the program.""" + + input_data: bytes | None + interface: Interface + vm_hash: str + encoding: Encoding + entrypoint: str + code: bytes | None = None + ip: str | None = None + ipv6: str | None = None + route: str | None = None + ipv6_gateway: str | None = None + dns_servers: list[str] = field(default_factory=list) + volumes: list[Volume] = field(default_factory=list) + variables: dict[str, str] | None = None + authorized_keys: list[str] | None = None + + def to_runtime_format(self, runtime_config: RuntimeConfiguration) -> ConfigurationPayload: + if runtime_config.version == "1.0.0": + return ConfigurationPayloadV1.from_program_config(self) + + if runtime_config.version != "2.0.0": + logger.warning("This runtime version may be unsupported: %s", runtime_config.version) + + return ConfigurationPayloadV2.from_program_config(self) + + +@dataclass +class ConfigurationResponse: + """Response received from the virtual machine in response to a request.""" + + success: bool + error: str | None = None + traceback: str | None = None + + +@dataclass +class RunCodePayload(MsgpackSerializable): + """Information passed to the init of the virtual machine to launch a function/path of the program.""" + + scope: dict + + +class AlephProgramResources(AlephFirecrackerResources): + """Resources required by the virtual machine in order to launch the program. + Extends the resources required by all Firecracker VMs.""" + + code_path: Path + code_encoding: Encoding + code_entrypoint: str + data_path: Path | None + + def __init__(self, message_content: ExecutableContent, namespace: str): + super().__init__(message_content, namespace) + self.code_encoding = message_content.code.encoding + self.code_entrypoint = message_content.code.entrypoint + + async def download_code(self) -> None: + code_ref: str = self.message_content.code.ref + try: + self.code_path = await get_code_path(code_ref) + except ClientResponseError as error: + raise ResourceDownloadError(error) from error + assert self.code_path.is_file(), f"Code not found on '{self.code_path}'" + + async def download_runtime(self) -> None: + runtime_ref: str = self.message_content.runtime.ref + try: + self.rootfs_path = await get_runtime_path(runtime_ref) + except ClientResponseError as error: + raise ResourceDownloadError(error) from error + assert self.rootfs_path.is_file(), f"Runtime not found on {self.rootfs_path}" + + async def download_data(self) -> None: + if self.message_content.data: + data_ref: str = self.message_content.data.ref + try: + data_path = await get_data_path(data_ref) + self.data_path = data_path + except ClientResponseError as error: + raise ResourceDownloadError(error) from error + assert data_path.is_file(), f"Data not found on {data_path}" + else: + self.data_path = None + + async def download_all(self): + await asyncio.gather( + self.download_kernel(), + self.download_runtime(), + self.download_code(), + self.download_volumes(), + self.download_data(), + ) + + +def get_volumes_for_program(resources: AlephProgramResources, drives: list[Drive]) -> tuple[bytes | None, list[Volume]]: + code: bytes | None + volumes: list[Volume] + if resources.code_encoding == Encoding.squashfs: + code = b"" + volumes = [Volume(mount="/opt/code", device="vdb", read_only=True)] + [ + Volume( + mount=volume.mount, + device=drives[index + 1].drive_id, + read_only=volume.read_only, + ) + for index, volume in enumerate(resources.volumes) + ] + else: + if os.path.getsize(resources.code_path) > settings.MAX_PROGRAM_ARCHIVE_SIZE: + msg = "Program file too large to pass as an inline zip" + raise FileTooLargeError(msg) + + code = resources.code_path.read_bytes() if resources.code_path else None + volumes = [ + Volume( + mount=volume.mount, + device=drives[index].drive_id, + read_only=volume.read_only, + ) + for index, volume in enumerate(resources.volumes) + ] + return code, volumes + + +class AlephFirecrackerProgram(AlephFirecrackerExecutable[ProgramVmConfiguration]): + vm_configuration: ProgramVmConfiguration | None + resources: AlephProgramResources + is_instance = False + support_snapshot = False + + def __init__( + self, + vm_id: int, + vm_hash: ItemHash, + resources: AlephProgramResources, + enable_networking: bool = False, + enable_console: bool | None = None, + hardware_resources: MachineResources = MachineResources(), + tap_interface: TapInterface | None = None, + persistent: bool = False, + prepare_jailer: bool = True, + ): + super().__init__( + vm_id, + vm_hash, + resources, + enable_networking, + enable_console, + hardware_resources, + tap_interface, + persistent, + prepare_jailer, + ) + + async def setup(self) -> None: + logger.debug(f"Setup started for VM={self.vm_id}") + await setfacl() + + self._firecracker_config = FirecrackerConfig( + boot_source=BootSource( + kernel_image_path=Path(self.fvm.enable_kernel(self.resources.kernel_image_path)), + boot_args=BootSource.args(enable_console=self.enable_console, writable=False), + ), + drives=[ + Drive( + drive_id="rootfs", + path_on_host=self.fvm.enable_rootfs(self.resources.rootfs_path), + is_root_device=True, + is_read_only=True, + ), + ] + + ( + [self.fvm.enable_drive(self.resources.code_path)] + if hasattr(self.resources, "code_encoding") and self.resources.code_encoding == Encoding.squashfs + else [] + ) + + [ + self.fvm.enable_drive(volume.path_on_host, read_only=volume.read_only) + for volume in self.resources.volumes + ], + machine_config=MachineConfig( + vcpu_count=self.hardware_resources.vcpus, + mem_size_mib=self.hardware_resources.memory, + ), + vsock=Vsock(), + network_interfaces=( + [NetworkInterface(iface_id="eth0", host_dev_name=self.tap_interface.device_name)] + if self.enable_networking and self.tap_interface + else [] + ), + ) + + async def wait_for_init(self) -> None: + """Wait for the custom init inside the virtual machine to signal it is ready.""" + await self.fvm.wait_for_init() + + async def load_configuration(self) -> None: + code: bytes | None + volumes: list[Volume] + + code, volumes = get_volumes_for_program(resources=self.resources, drives=self.fvm.drives) + interface: Interface = Interface.from_entrypoint(self.resources.code_entrypoint) + input_data: bytes | None = read_input_data(self.resources.data_path) + + await self._setup_configuration(code=code, input_data=input_data, interface=interface, volumes=volumes) + + async def _setup_configuration( + self, + code: bytes | None, + input_data: bytes | None, + interface: Interface, + volumes: list[Volume], + ) -> None: + """Set up the VM configuration. The program mode uses a VSOCK connection to the custom init of the virtual + machine to send this configuration. Other modes may use Cloud-init, ...""" + logger.debug("Sending configuration") + reader, writer = await asyncio.open_unix_connection(path=self.fvm.vsock_path) + + ip = self.get_ip() + if ip: + # The ip and route should not contain the network mask in order to maintain + # compatibility with the existing runtimes. + ip = ip.split("/", 1)[0] + route = self.get_ip_route() + ipv6 = self.get_ipv6() + ipv6_gateway = self.get_ipv6_gateway() + + if settings.ALLOW_VM_NETWORKING and not settings.DNS_NAMESERVERS: + msg = "Invalid configuration: DNS nameservers missing" + raise ValueError(msg) + + runtime_config = self.fvm.runtime_config + assert runtime_config + + authorized_keys: list[str] | None + if settings.USE_DEVELOPER_SSH_KEYS: + authorized_keys = settings.DEVELOPER_SSH_KEYS + else: + authorized_keys = self.resources.message_content.authorized_keys + nameservers_ip = [] + if ip: + nameservers_ip = settings.DNS_NAMESERVERS_IPV4 + if ipv6: + nameservers_ip += settings.DNS_NAMESERVERS_IPV6 + + program_config = ProgramConfiguration( + ip=ip, + ipv6=ipv6, + route=route, + ipv6_gateway=ipv6_gateway, + dns_servers=nameservers_ip, + code=code, + encoding=self.resources.code_encoding, + entrypoint=self.resources.code_entrypoint, + input_data=input_data, + interface=interface, + vm_hash=self.vm_hash, + volumes=volumes, + variables=self.resources.message_content.variables, + authorized_keys=authorized_keys, + ) + # Convert the configuration in a format compatible with the runtime + versioned_config = program_config.to_runtime_format(runtime_config) + payload = versioned_config.as_msgpack() + length = f"{len(payload)}\n".encode() + writer.write(b"CONNECT 52\n" + length + payload) + await writer.drain() + + await reader.readline() # Ignore the acknowledgement from the socket + response_raw = await reader.read(1000_000) + response = ConfigurationResponse(**msgpack.loads(response_raw, raw=False)) + if response.success is False: + logger.exception(response.traceback) + raise VmSetupError(response.error) + + async def run_code( + self, + scope: dict | None = None, + ): + if not self.fvm: + msg = "MicroVM must be created first" + raise ValueError(msg) + logger.debug("running code") + scope = scope or {} + + async def communicate(reader_: StreamReader, writer_: StreamWriter, scope_: dict) -> bytes: + payload = RunCodePayload(scope=scope_) + + writer_.write(b"CONNECT 52\n" + payload.as_msgpack()) + await writer_.drain() + + ack: bytes = await reader_.readline() + logger.debug(f"ack={ack.decode()}") + + logger.debug("waiting for VM response") + response: bytes = await reader_.read() + + return response + + try: + reader, writer = await asyncio.open_unix_connection(path=self.fvm.vsock_path) + except ConnectionRefusedError as error: + msg = "MicroVM may have crashed" + raise VmInitNotConnectedError(msg) from error + try: + return await asyncio.wait_for( + communicate(reader, writer, scope), + timeout=self.hardware_resources.seconds, + ) + finally: + logger.debug("Cleaning VM socket resources") + writer.close() + await writer.wait_closed() diff --git a/src/aleph/vm/controllers/firecracker/snapshot_manager.py b/src/aleph/vm/controllers/firecracker/snapshot_manager.py new file mode 100644 index 000000000..6cf36711c --- /dev/null +++ b/src/aleph/vm/controllers/firecracker/snapshot_manager.py @@ -0,0 +1,124 @@ +import asyncio +import logging +import threading +from time import sleep + +from aleph_message.models import ItemHash +from schedule import Job, Scheduler + +from aleph.vm.conf import settings + +from .executable import AlephFirecrackerExecutable +from .snapshots import CompressedDiskVolumeSnapshot + +logger = logging.getLogger(__name__) + + +def wrap_async_snapshot(vm): + asyncio.run(do_vm_snapshot(vm)) + + +def run_threaded_snapshot(vm): + job_thread = threading.Thread(target=wrap_async_snapshot, args=(vm,)) + job_thread.start() + + +async def do_vm_snapshot(vm: AlephFirecrackerExecutable) -> CompressedDiskVolumeSnapshot: + try: + logger.debug(f"Starting new snapshot for VM {vm.vm_hash}") + assert vm, "VM execution not set" + + snapshot = await vm.create_snapshot() + await snapshot.upload() + + logger.debug(f"New snapshots for VM {vm.vm_hash} created in {snapshot.path}") + return snapshot + except ValueError as error: + msg = "Something failed taking an snapshot" + raise ValueError(msg) from error + + +def infinite_run_scheduler_jobs(scheduler: Scheduler) -> None: + while True: + scheduler.run_pending() + sleep(1) + + +class SnapshotExecution: + vm_hash: ItemHash + execution: AlephFirecrackerExecutable + frequency: int + _scheduler: Scheduler + _job: Job + + def __init__( + self, + scheduler: Scheduler, + vm_hash: ItemHash, + execution: AlephFirecrackerExecutable, + frequency: int, + ): + self.vm_hash = vm_hash + self.execution = execution + self.frequency = frequency + self._scheduler = scheduler + + async def start(self) -> None: + logger.debug(f"Starting snapshots for VM {self.vm_hash} every {self.frequency} minutes") + job = self._scheduler.every(self.frequency).minutes.do(run_threaded_snapshot, self.execution) + self._job = job + + async def stop(self) -> None: + logger.debug(f"Stopping snapshots for VM {self.vm_hash}") + self._scheduler.cancel_job(self._job) + + +class SnapshotManager: + """ + Manage VM snapshots. + """ + + executions: dict[ItemHash, SnapshotExecution] + _scheduler: Scheduler + + def __init__(self): + self.executions = {} + self._scheduler = Scheduler() + + def run_in_thread(self) -> None: + job_thread = threading.Thread( + target=infinite_run_scheduler_jobs, + args=[self._scheduler], + daemon=True, + name="SnapshotManager", + ) + job_thread.start() + + async def start_for(self, vm: AlephFirecrackerExecutable, frequency: int | None = None) -> None: + if not vm.support_snapshot: + msg = "Snapshots are not implemented for programs." + raise NotImplementedError(msg) + + default_frequency = frequency or settings.SNAPSHOT_FREQUENCY + + vm_hash = vm.vm_hash + snapshot_execution = SnapshotExecution( + scheduler=self._scheduler, + vm_hash=vm_hash, + execution=vm, + frequency=default_frequency, + ) + self.executions[vm_hash] = snapshot_execution + await snapshot_execution.start() + + async def stop_for(self, vm_hash: ItemHash) -> None: + try: + snapshot_execution = self.executions.pop(vm_hash) + except KeyError: + logger.warning("Could not find snapshot task for instance %s", vm_hash) + return + + await snapshot_execution.stop() + + async def stop_all(self) -> None: + await asyncio.gather(*(self.stop_for(vm_hash) for vm_hash, execution in self.executions)) diff --git a/src/aleph/vm/controllers/firecracker/snapshots.py b/src/aleph/vm/controllers/firecracker/snapshots.py new file mode 100644 index 000000000..7188bfb6d --- /dev/null +++ b/src/aleph/vm/controllers/firecracker/snapshots.py @@ -0,0 +1,55 @@ +import logging +from pathlib import Path + +from aleph_message.models import ItemHash + +from aleph.vm.conf import SnapshotCompressionAlgorithm +from aleph.vm.storage import compress_volume_snapshot, create_volume_snapshot + +logger = logging.getLogger(__name__) + + +class DiskVolumeFile: + path: Path + size: int + + def __init__(self, path: Path): + self.path = path + self.size = path.stat().st_size + + +class CompressedDiskVolumeSnapshot(DiskVolumeFile): + algorithm: SnapshotCompressionAlgorithm + + def __init__(self, path: Path, algorithm: SnapshotCompressionAlgorithm): + super().__init__(path=path) + self.algorithm = algorithm + + def delete(self) -> None: + self.path.unlink(missing_ok=True) + + async def upload(self) -> ItemHash: + # TODO: Upload snapshots to Aleph Network + pass + + +class DiskVolumeSnapshot(DiskVolumeFile): + compressed: CompressedDiskVolumeSnapshot | None + + def delete(self) -> None: + if self.compressed: + self.compressed.delete() + + self.path.unlink(missing_ok=True) + + async def compress(self, algorithm: SnapshotCompressionAlgorithm) -> CompressedDiskVolumeSnapshot: + compressed_snapshot = await compress_volume_snapshot(self.path, algorithm) + compressed = CompressedDiskVolumeSnapshot(path=compressed_snapshot, algorithm=algorithm) + self.compressed = compressed + return compressed + + +class DiskVolume(DiskVolumeFile): + async def take_snapshot(self) -> DiskVolumeSnapshot: + snapshot = await create_volume_snapshot(self.path) + return DiskVolumeSnapshot(snapshot) diff --git a/src/aleph/vm/controllers/firecracker/storage.py b/src/aleph/vm/controllers/firecracker/storage.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/controllers/interface.py b/src/aleph/vm/controllers/interface.py new file mode 100644 index 000000000..bff265a2b --- /dev/null +++ b/src/aleph/vm/controllers/interface.py @@ -0,0 +1,123 @@ +import asyncio +import logging +from abc import ABC +from asyncio.subprocess import Process +from collections.abc import Callable, Coroutine +from typing import Any + +from aleph_message.models import ItemHash +from aleph_message.models.execution.environment import MachineResources + +from aleph.vm.controllers.firecracker.snapshots import CompressedDiskVolumeSnapshot +from aleph.vm.network.interfaces import TapInterface +from aleph.vm.utils.logs import get_past_vm_logs, make_logs_queue + +logger = logging.getLogger(__name__) + + +class AlephVmControllerInterface(ABC): + log_queues: list[asyncio.Queue] = [] + _queue_cancellers: dict[asyncio.Queue, Callable] = {} + + vm_id: int + """id in the VMPool, attributed at execution""" + vm_hash: ItemHash + """identifier for the VM definition, linked to an Aleph Message""" + resources: Any + """local resource for the machine""" + enable_console: bool + enable_networking: bool + """enable networking for this VM""" + hardware_resources: MachineResources + support_snapshot: bool + """Does this controller support snapshotting""" + guest_api_process: Process | None = None + tap_interface: TapInterface | None = None + """Network interface used for this VM""" + + def get_ip(self) -> str | None: + if self.tap_interface: + return self.tap_interface.guest_ip.with_prefixlen + return None + + def get_ip_route(self) -> str | None: + if self.tap_interface: + return str(self.tap_interface.host_ip).split("/", 1)[0] + return None + + def get_ipv6(self) -> str | None: + if self.tap_interface: + return self.tap_interface.guest_ipv6.with_prefixlen + return None + + def get_ipv6_gateway(self) -> str | None: + if self.tap_interface: + return str(self.tap_interface.host_ipv6.ip) + return None + + def to_dict(self): + """Dict representation of the virtual machine. Used to record resource usage and for JSON serialization.""" + raise NotImplementedError() + + async def setup(self): + """Configuration done before the VM process is started""" + raise NotImplementedError() + + async def start(self): + """Start the VM process""" + raise NotImplementedError() + + async def wait_for_init(self) -> None: + """Wait for the init process of the virtual machine to be ready. + May be empty.""" + pass + + async def configure(self) -> None: + """Configuration done after the VM process is started""" + raise NotImplementedError() + + async def load_configuration(self) -> None: + """Load configuration just after the VM process is started""" + raise NotImplementedError() + + async def start_guest_api(self): + raise NotImplementedError() + + async def stop_guest_api(self): + raise NotImplementedError() + + async def teardown(self) -> Coroutine: + raise NotImplementedError() + + async def create_snapshot(self) -> CompressedDiskVolumeSnapshot: + """Must be implement if self.support_snapshot is True""" + raise NotImplementedError() + + def get_log_queue(self) -> asyncio.Queue: + queue, canceller = make_logs_queue(self._journal_stdout_name, self._journal_stderr_name) + self._queue_cancellers[queue] = canceller + # Limit the number of queues per VM + # TODO : fix + if len(self.log_queues) > 20: + logger.warning("Too many log queues, dropping the oldest one") + self.unregister_queue(self.log_queues[1]) + self.log_queues.append(queue) + return queue + + def unregister_queue(self, queue: asyncio.Queue) -> None: + if queue in self.log_queues: + self._queue_cancellers[queue]() + del self._queue_cancellers[queue] + self.log_queues.remove(queue) + queue.empty() + + @property + def _journal_stdout_name(self) -> str: + return f"vm-{self.vm_hash}-stdout" + + @property + def _journal_stderr_name(self) -> str: + return f"vm-{self.vm_hash}-stderr" + + def past_logs(self): + yield from get_past_vm_logs(self._journal_stdout_name, self._journal_stderr_name) diff --git a/src/aleph/vm/controllers/qemu/QEMU.md b/src/aleph/vm/controllers/qemu/QEMU.md new file mode 100644 index 000000000..d1d11059f --- /dev/null +++ b/src/aleph/vm/controllers/qemu/QEMU.md @@ -0,0 +1,160 @@ +# Qemu support + +## Requirements +Commands : qemu, cloud-ds, qemu-img + +These are installable via +`apt install cloud-image-utils qemu-utils qemu-system-x86` + +This branch depends on the version 0.4.1 of `aleph-message` that add the `hypervisor` field. The easiest way is to install tha version using `pip install -e .` + +To create a local venv use the `--system-site-packages` option so it can access nftables + +## To test launching a VM instance + +Launch aleph.vm.orchestrator with the following environment variables + + +```environ +ALEPH_VM_FAKE_INSTANCE_BASE=/home/olivier/Projects/qemu-quickstart/jammy-server-cloudimg-amd64.img +ALEPH_VM_FAKE_INSTANCE_MESSAGE=/home/olivier/Projects/aleph/aleph-vm/examples/qemu_message_from_aleph.json +ALEPH_VM_USE_FAKE_INSTANCE_BASE=1 +# set test as the allocation password +ALEPH_VM_ALLOCATION_TOKEN_HASH=9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 + +``` + +Where `ALEPH_VM_FAKE_INSTANCE_BASE` is the path to the base disk image. You can get the Ubuntu one via: +`wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img` + +You can use any base VM image supporting cloud-init. cloud-init support is mandatory because it is used to set up the network. + + +To only launch the VM instance, use the parameter: +`--run-fake-instance` + +You can then try to connect via ssh to it's ip. Wait a minute or so for it to set up properly with the network + +Or launching the whole supervisor server (no params), then launch the VM via http + +```http request +### Start fake VM +POST http://localhost:4020/control/allocations +Content-Type: application/json +X-Auth-Signature: test +Accept: application/json + +{"persistent_vms": [], "instances": ["decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca"]} +``` + +After a minutes or two you should be able to SSH into the VM. Check in the log for the VM ip. +If you used an Ubuntu image the username should be ubuntu + +You can then stop the VM using +```http request +### Stop the VM +POST http://localhost:4020/control/machine/decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca/stop +Accept: application/json +``` +(you will need to comment @require_jwk_authentication) + +# Connecting to the VM via your own ssh key +In local development, if you want to connect via ssh to the VM and you don't have your + key already included in you base image or inside the aleph message, you can configure it in the following way. + +First set your key in the environment variable ALEPH_VM_DEVELOPER_SSH_KEYS in the json format. You can add it directly in the `.env` file +```env +ALEPH_VM_DEVELOPER_SSH_KEYS=["ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDj95BHGUx0/z2G/tTrEi8o49i70xvjcEUdSs3j4A33jE7pAphrfRVbuFMgFubcm8n9r5ftd/H8SjjTL4hY9YvWV5ZuMf92GUga3n4wgevvPlBszYZCy/idxFl0vtHYC1CcK9v4tVb9onhDt8FOJkf2m6PmDyvC+6tl6LwoerXTeeiKr5VnTB4KOBkammtFmix3d1X1SZd/cxdwZIHcQ7BNsqBm2w/YzVba6Z4ZnFUelBkQtMQqNs2aV51O1pFFqtZp2mM71D5d8vn9pOtqJ5QmY5IW6NypcyqKJZg5o6QguK5rdXLkc7AWro27BiaHIENl3w0wazp9EDO9zPAGJ6lz olivier@lanius"] +``` + +Then pass the `--developer-ssh-keys` as an argument when starting the supervisor. + +Cloud init support for settings the ssh key in the VM image is required, this is the same mechanism and settings as for firecracker program, of course this is not for production use. + +## Using the CentOS distro for your VM +Qemu support has also been tested with CentOS 7 + +To test it locally +1. Download the CentOS cloud image distro: +`curl -LO -C - http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz` +2. It is in a compressed format, so you will need to uncompress it +```unxz CentOS-7-x86_64-GenericCloud.qcow2.xz``` +3. Set the fake instance base to point to the file `CentOS-7-x86_64-GenericCloud.qcow2 +(either via --fake-instance base parameter or the ALEPH_VM_FAKE_INSTANCE_BASE environment) +4. Launch it as per instruction aboce +5. To ssh use the user: `centos` + +## Using the Debian distro for your VM +Debian QEMU Support has been tested with Debian 12 bookworm. Download the image from https://cloud.debian.org/images/cloud/ + +Use the AMD64 `genericcloud` image. The `generic` should work too but `genericcloud` is smaller as it doesn't contain unnecessary hardware drivers. + +e.g `wget https://cloud.debian.org/images/cloud/bookworm/20231013-1532/debian-12-genericcloud-amd64-20231013-1532.qcow2` + +See instruction above for the rest. The default user is `root` + +# Check the log via Websocket +You can stream the logs from the VM using, the following python example script. +Caveat: This requires to temporarly disable auth on this endpoint, you need the print system log settings to be active `ALEPH_VM_PRINT_SYSTEM_LOGS=1`. The system only stream new log content from the VM not the old one. +```python +import json +import sys + +import asyncio +import aiohttp + + +def on_message(content): + try: + msg = json.loads(content) + if msg.get('status'): + print(msg) + else: + fd = sys.stderr if msg["type"] == "stderr" else sys.stdout + print("<", msg["message"], file=fd, end="") + except: + print("unable to parse", content) + + +async def tail_websocket(url): + async with aiohttp.ClientSession() as session: + async with session.ws_connect(url) as ws: + print(f"connected to {url}") + async for msg in ws: + if msg.type == aiohttp.WSMsgType.TEXT: + on_message(msg.data) + elif msg.type == aiohttp.WSMsgType.CLOSED: + print("closed") + break + elif msg.type == aiohttp.WSMsgType.ERROR: + print("Error", msg) + + +vm_hash = "decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca" +url = f"ws://localhost:4020/control/machine/{vm_hash}/logs" +loop = asyncio.get_event_loop() +loop.run_until_complete(tail_websocket(url)) +``` + + +# TODO +- [x] Launch +- [x] Message format +- [x] Network +- [x] Cloud init support +- [x] Download ressource +- [ ] snapshot +- [ ] Multi volume +- [x] fix logs +- [ ] Testing +- [x] Support raw format for base image +- [x] More testing with different Distro: + - [x] Centos + - [x] Debian + - [x] Alpine (do not support centos no cloud) +- [ ] Document for user how to build their own images +- [x] Allow ssh developer key +- [ ] Automated testing in CI +- [x] Output the whole serial console in logs +- [x] Test code for websocket logs +- [ ] Multi Layer Qcow image? diff --git a/src/aleph/vm/controllers/qemu/__init__.py b/src/aleph/vm/controllers/qemu/__init__.py new file mode 100644 index 000000000..eb9414917 --- /dev/null +++ b/src/aleph/vm/controllers/qemu/__init__.py @@ -0,0 +1,3 @@ +from .instance import AlephQemuInstance + +__all__ = "AlephQemuInstance" diff --git a/src/aleph/vm/controllers/qemu/client.py b/src/aleph/vm/controllers/qemu/client.py new file mode 100644 index 000000000..c98899d5b --- /dev/null +++ b/src/aleph/vm/controllers/qemu/client.py @@ -0,0 +1,76 @@ +import qmp +from pydantic import BaseModel + + +class VmSevInfo(BaseModel): + enabled: bool + api_major: int + api_minor: int + build_id: int + policy: int + state: str + handle: int + + +class QemuVmClient: + def __init__(self, vm): + self.vm = vm + if not (vm.qmp_socket_path and vm.qmp_socket_path.exists()): + msg = "VM is not running" + raise Exception(msg) + client = qmp.QEMUMonitorProtocol(str(vm.qmp_socket_path)) + client.connect() + + # qmp_client = qmp.QEMUMonitorProtocol(address=("localhost", vm.qmp_port)) + self.qmp_client = client + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def close(self) -> None: + self.qmp_client.close() + + def query_sev_info(self) -> VmSevInfo: + caps = self.qmp_client.command("query-sev") + return VmSevInfo( + enabled=caps["enabled"], + api_major=caps["api-major"], + api_minor=caps["api-minor"], + handle=caps["handle"], + state=caps["state"], + build_id=caps["build-id"], + policy=caps["policy"], + ) + + def query_launch_measure(self) -> str: + measure = self.qmp_client.command("query-sev-launch-measure") + return measure["data"] + + def inject_secret(self, packet_header: str, secret: str) -> None: + """ + Injects the secret in the SEV secret area. + + :param packet_header: The packet header, as a base64 string. + :param secret: The encoded secret, as a base64 string. + """ + + self.qmp_client.command( + "sev-inject-launch-secret", + **{"packet-header": packet_header, "secret": secret}, + ) + + def continue_execution(self) -> None: + """ + Resumes the execution of the VM. + """ + self.qmp_client.command("cont") + + def query_status(self) -> None: + """ + Get running status. + """ + # {'status': 'prelaunch', 'singlestep': False, 'running': False} + return self.qmp_client.command("query-status") diff --git a/src/aleph/vm/controllers/qemu/cloudinit.py b/src/aleph/vm/controllers/qemu/cloudinit.py new file mode 100644 index 000000000..bcfa51c0f --- /dev/null +++ b/src/aleph/vm/controllers/qemu/cloudinit.py @@ -0,0 +1,148 @@ +"""Generate a cloud-init ISO image for the VM configuration. + +This module automates the creation of a cloud-init ISO image, which is utilized for configuring the +Virtual Machine. The configurations included in this process are the hostname, SSH keys, and network settings. + +The generated ISO image, created using the `cloud-localds` command, is intended to be mounted as a CD-ROM inside the +VM. Upon booting, the VM's cloud-init service detects this CD-ROM and applies the configurations based on the data it +contains. + +Refer to the cloud-init documentation, in particular the NoCloud datasource which is the method we are using. +https://cloudinit.readthedocs.io/en/latest/reference/datasources/nocloud.html + +See also the cloud-localds man page (1) +""" + +import base64 +import json +from pathlib import Path +from tempfile import NamedTemporaryFile + +import yaml +from aleph_message.models import ItemHash + +from aleph.vm.conf import settings +from aleph.vm.controllers.interface import AlephVmControllerInterface +from aleph.vm.hypervisors.firecracker.config import Drive +from aleph.vm.utils import is_command_available, run_in_subprocess + + +def get_hostname_from_hash(vm_hash: ItemHash) -> str: + item_hash_binary: bytes = base64.b16decode(vm_hash.encode().upper()) + return base64.b32encode(item_hash_binary).decode().strip("=").lower() + + +def encode_user_data(hostname, ssh_authorized_keys) -> bytes: + """Creates user data configuration file for cloud-init tool""" + config: dict[str, str | bool | list[str]] = { + "hostname": hostname, + "disable_root": False, + "ssh_pwauth": False, + "ssh_authorized_keys": ssh_authorized_keys, + "resize_rootfs": True, + } + cloud_config_header = "#cloud-config\n" + config_output = yaml.safe_dump(config, default_flow_style=False, sort_keys=False) + content = (cloud_config_header + config_output).encode() + return content + + +def create_metadata_file(hostname, vm_id) -> bytes: + """Creates metadata configuration file for cloud-init tool""" + metadata = { + "instance-id": f"iid-instance-{vm_id}", + "local-hostname": hostname, + } + return json.dumps(metadata).encode() + + +def create_network_file(ip, ipv6, ipv6_gateway, nameservers, route) -> bytes: + """Creates network configuration file for cloud-init tool""" + network = { + "ethernets": { + "eth0": { + # Match the config to the `virtio` driver since the network interface name is not constant across distro + "match": {"driver": "virtio_net"}, + "addresses": [ip, ipv6], + "gateway4": route, + "gateway6": ipv6_gateway, + "nameservers": { + "addresses": nameservers, + }, + # there is a bug in Centos 7 where it will try DHCP if the key is present, even if set to false + # https://stackoverflow.com/questions/59757022/set-static-ip-using-cloud-init-on-centos-7-with-terraform-kvm + # Thus theses are commented for now + # "dhcp4": False, + # "dhcp6": False, + }, + }, + "version": 2, + } + return yaml.safe_dump(network, default_flow_style=False, sort_keys=False).encode() + + +async def create_cloud_init_drive_image( + disk_image_path, hostname, vm_id, ip, ipv6, ipv6_gateway, nameservers, route, ssh_authorized_keys +): + with ( + NamedTemporaryFile() as user_data_config_file, + NamedTemporaryFile() as network_config_file, + NamedTemporaryFile() as metadata_config_file, + ): + user_data = encode_user_data(hostname, ssh_authorized_keys) + user_data_config_file.write(user_data) + user_data_config_file.flush() + network_config = create_network_file(ip, ipv6, ipv6_gateway, nameservers, route) + network_config_file.write(network_config) + network_config_file.flush() + + metadata_config = create_metadata_file(hostname, vm_id) + metadata_config_file.write(metadata_config) + metadata_config_file.flush() + + await run_in_subprocess( + [ + "cloud-localds", + f"--network-config={network_config_file.name}", + str(disk_image_path), + user_data_config_file.name, + metadata_config_file.name, + ] + ) + + +class CloudInitMixin(AlephVmControllerInterface): + async def _create_cloud_init_drive(self) -> Drive: + """Creates the cloud-init volume to configure and set up the VM""" + ssh_authorized_keys = self.resources.message_content.authorized_keys or [] + if settings.USE_DEVELOPER_SSH_KEYS: + ssh_authorized_keys += settings.DEVELOPER_SSH_KEYS + ip = self.get_ip() + route = self.get_ip_route() + ipv6 = self.get_ipv6() + ipv6_gateway = self.get_ipv6_gateway() + vm_id = self.vm_id + nameservers = settings.DNS_NAMESERVERS + hostname = get_hostname_from_hash(self.vm_hash) + + disk_image_path: Path = settings.EXECUTION_ROOT / f"cloud-init-{self.vm_hash}.img" + assert is_command_available("cloud-localds") + + await create_cloud_init_drive_image( + disk_image_path, + hostname, + vm_id, + ip, + ipv6, + ipv6_gateway, + nameservers, + route, + ssh_authorized_keys, + ) + + return Drive( + drive_id="Fake", + path_on_host=disk_image_path, + is_root_device=False, + is_read_only=True, + ) diff --git a/src/aleph/vm/controllers/qemu/instance.py b/src/aleph/vm/controllers/qemu/instance.py new file mode 100644 index 000000000..259f84744 --- /dev/null +++ b/src/aleph/vm/controllers/qemu/instance.py @@ -0,0 +1,274 @@ +import asyncio +import json +import logging +import shutil +from asyncio import Task +from asyncio.subprocess import Process +from pathlib import Path +from typing import Generic, List, TypeVar + +import psutil +from aleph_message.models import ItemHash +from aleph_message.models.execution.environment import MachineResources +from aleph_message.models.execution.instance import RootfsVolume +from aleph_message.models.execution.volume import PersistentVolume, VolumePersistence + +from aleph.vm.conf import settings +from aleph.vm.controllers.configuration import ( + Configuration, + HypervisorType, + QemuGPU, + QemuVMConfiguration, + QemuVMHostVolume, + save_controller_configuration, +) +from aleph.vm.controllers.firecracker.executable import ( + AlephFirecrackerResources, + VmSetupError, +) +from aleph.vm.controllers.interface import AlephVmControllerInterface +from aleph.vm.controllers.qemu.cloudinit import CloudInitMixin +from aleph.vm.network.firewall import teardown_nftables_for_vm +from aleph.vm.network.interfaces import TapInterface +from aleph.vm.resources import HostGPU +from aleph.vm.storage import get_rootfs_base_path +from aleph.vm.utils import HostNotFoundError, ping, run_in_subprocess + +logger = logging.getLogger(__name__) + + +class AlephQemuResources(AlephFirecrackerResources): + gpus: List[HostGPU] = [] + + async def download_runtime(self) -> None: + volume = self.message_content.rootfs + parent_image_path = await get_rootfs_base_path(volume.parent.ref) + self.rootfs_path = await self.make_writable_volume(parent_image_path, volume) + + async def download_all(self): + await asyncio.gather( + self.download_runtime(), + self.download_volumes(), + ) + + async def make_writable_volume(self, parent_image_path, volume: PersistentVolume | RootfsVolume): + """Create a new qcow2 image file based on the passed one, that we give to the VM to write onto""" + qemu_img_path: str | None = shutil.which("qemu-img") + if not qemu_img_path: + msg = "qemu-img not found in PATH" + raise VmSetupError(msg) + + volume_name = volume.name if isinstance(volume, PersistentVolume) else "rootfs" + + # detect the image format + out_json = await run_in_subprocess([qemu_img_path, "info", str(parent_image_path), "--output=json"]) + out = json.loads(out_json) + parent_format = out.get("format", None) + if parent_format is None: + msg = f"Failed to detect format for {volume}: {out_json}" + raise VmSetupError(msg) + if parent_format not in ("qcow2", "raw"): + msg = f"Format {parent_format} for {volume} unhandled by QEMU hypervisor" + raise VmSetupError(msg) + + dest_path = settings.PERSISTENT_VOLUMES_DIR / self.namespace / f"{volume_name}.qcow2" + # Do not override if user asked for host persistance. + if dest_path.exists() and volume.persistence == VolumePersistence.host: + return dest_path + + dest_path.parent.mkdir(parents=True, exist_ok=True) + size_in_bytes = int(volume.size_mib * 1024 * 1024) + + await run_in_subprocess( + [ + qemu_img_path, + "create", + "-f", # Format + "qcow2", + "-F", + parent_format, + "-b", + str(parent_image_path), + str(dest_path), + str(size_in_bytes), + ] + ) + return dest_path + + +ConfigurationType = TypeVar("ConfigurationType") + + +class AlephQemuInstance(Generic[ConfigurationType], CloudInitMixin, AlephVmControllerInterface): + vm_id: int + vm_hash: ItemHash + resources: AlephQemuResources + enable_networking: bool + hardware_resources: MachineResources + tap_interface: TapInterface | None = None + vm_configuration: ConfigurationType | None + is_instance: bool + qemu_process: Process | None + support_snapshot = False + persistent = True + controller_configuration: Configuration + + def __repr__(self): + return f"" + + def __str__(self): + return f"vm-{self.vm_id}" + + def __init__( + self, + vm_id: int, + vm_hash: ItemHash, + resources: AlephQemuResources, + enable_networking: bool = False, + hardware_resources: MachineResources = MachineResources(), + tap_interface: TapInterface | None = None, + ): + self.vm_id = vm_id + self.vm_hash = vm_hash + self.resources = resources + self.enable_networking = enable_networking and settings.ALLOW_VM_NETWORKING + self.hardware_resources = hardware_resources + self.tap_interface = tap_interface + self.qemu_process = None + + # TODO : wait for andress soltion for pid handling + def to_dict(self): + """Dict representation of the virtual machine. Used to record resource usage and for JSON serialization.""" + if self.qemu_process and psutil: + # The Qemu process is still running and process information can be obtained from `psutil`. + try: + p = psutil.Process(self.qemu_process.pid) + pid_info = { + "status": p.status(), + "create_time": p.create_time(), + "cpu_times": p.cpu_times(), + "cpu_percent": p.cpu_percent(), + "memory_info": p.memory_info(), + "io_counters": p.io_counters(), + "open_files": p.open_files(), + "connections": p.connections(), + "num_threads": p.num_threads(), + "num_ctx_switches": p.num_ctx_switches(), + } + except psutil.NoSuchProcess: + logger.warning("Cannot read process metrics (process %s not found)", self.qemu_process) + pid_info = None + else: + pid_info = None + + return { + "process": pid_info, + **self.__dict__, + } + + async def setup(self): + pass + + async def configure(self): + """Configure the VM by saving controller service configuration""" + + logger.debug(f"Making Qemu configuration: {self} ") + monitor_socket_path = settings.EXECUTION_ROOT / (str(self.vm_hash) + "-monitor.socket") + + cloud_init_drive = await self._create_cloud_init_drive() + + image_path = str(self.resources.rootfs_path) + vcpu_count = self.hardware_resources.vcpus + mem_size_mib = self.hardware_resources.memory + mem_size_mb = str(int(mem_size_mib / 1024 / 1024 * 1000 * 1000)) + + qemu_bin_path = shutil.which("qemu-system-x86_64") + interface_name = None + if self.tap_interface: + interface_name = self.tap_interface.device_name + cloud_init_drive_path = str(cloud_init_drive.path_on_host) if cloud_init_drive else None + vm_configuration = QemuVMConfiguration( + qemu_bin_path=qemu_bin_path, + cloud_init_drive_path=cloud_init_drive_path, + image_path=image_path, + monitor_socket_path=monitor_socket_path, + qmp_socket_path=self.qmp_socket_path, + vcpu_count=vcpu_count, + mem_size_mb=mem_size_mb, + interface_name=interface_name, + host_volumes=[ + QemuVMHostVolume( + mount=volume.mount, + path_on_host=volume.path_on_host, + read_only=volume.read_only, + ) + for volume in self.resources.volumes + ], + gpus=[QemuGPU(pci_host=gpu.pci_host) for gpu in self.resources.gpus], + ) + + configuration = Configuration( + vm_id=self.vm_id, + vm_hash=self.vm_hash, + settings=settings, + vm_configuration=vm_configuration, + hypervisor=HypervisorType.qemu, + ) + logger.debug(configuration) + save_controller_configuration(self.vm_hash, configuration) + + def save_controller_configuration(self): + """Save VM configuration to be used by the controller service""" + path = Path(f"{settings.EXECUTION_ROOT}/{self.vm_hash}-controller.json") + path.open("w").write(self.controller_configuration.json(by_alias=True, exclude_none=True, indent=4)) + path.chmod(0o644) + return path + + @property + def qmp_socket_path(self) -> Path: + return settings.EXECUTION_ROOT / f"{self.vm_hash}-qmp.socket" + + async def start(self): + # Start via systemd not here + raise NotImplementedError() + + async def wait_for_init(self) -> None: + """Wait for the init process of the instance to be ready.""" + assert self.enable_networking and self.tap_interface, f"Network not enabled for VM {self.vm_id}" + + ip = self.get_ip() + if not ip: + msg = "Host IP not available" + raise ValueError(msg) + ip = ip.split("/", 1)[0] + + attempts = 30 + timeout_seconds = 2 + + for attempt in range(attempts): + try: + await ping(ip, packets=1, timeout=timeout_seconds) + return + except HostNotFoundError: + if attempt < (attempts - 1): + continue + else: + raise + + async def start_guest_api(self): + pass + + async def stop_guest_api(self): + pass + + print_task: Task | None = None + + async def teardown(self): + if self.print_task: + self.print_task.cancel() + + if self.enable_networking: + teardown_nftables_for_vm(self.vm_id) + if self.tap_interface: + await self.tap_interface.delete() + await self.stop_guest_api() diff --git a/src/aleph/vm/controllers/qemu_confidential/__init__.py b/src/aleph/vm/controllers/qemu_confidential/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/controllers/qemu_confidential/instance.py b/src/aleph/vm/controllers/qemu_confidential/instance.py new file mode 100644 index 000000000..37986b10c --- /dev/null +++ b/src/aleph/vm/controllers/qemu_confidential/instance.py @@ -0,0 +1,147 @@ +import asyncio +import logging +import shutil +from asyncio.subprocess import Process +from collections.abc import Callable +from pathlib import Path + +from aleph_message.models import ItemHash +from aleph_message.models.execution.environment import AMDSEVPolicy, MachineResources + +from aleph.vm.conf import settings +from aleph.vm.controllers.configuration import ( + Configuration, + HypervisorType, + QemuConfidentialVMConfiguration, + QemuGPU, + QemuVMHostVolume, + save_controller_configuration, +) +from aleph.vm.controllers.qemu import AlephQemuInstance +from aleph.vm.controllers.qemu.instance import ( + AlephQemuResources, + ConfigurationType, + logger, +) +from aleph.vm.network.interfaces import TapInterface +from aleph.vm.storage import get_existing_file + +logger = logging.getLogger(__name__) + + +class AlephQemuConfidentialResources(AlephQemuResources): + firmware_path: Path + + async def download_firmware(self): + firmware = self.message_content.environment.trusted_execution.firmware + self.firmware_path = await get_existing_file(firmware) + + async def download_all(self): + await asyncio.gather( + self.download_runtime(), + self.download_firmware(), + self.download_volumes(), + ) + + +class AlephQemuConfidentialInstance(AlephQemuInstance): + vm_id: int + vm_hash: ItemHash + resources: AlephQemuConfidentialResources + enable_console: bool + enable_networking: bool + hardware_resources: MachineResources + tap_interface: TapInterface | None = None + vm_configuration: ConfigurationType | None + is_instance: bool + qemu_process: Process | None + support_snapshot = False + persistent = True + _queue_cancellers: dict[asyncio.Queue, Callable] = {} + controller_configuration: Configuration + confidential_policy: int + + def __repr__(self): + return f"" + + def __str__(self): + return f"vm-{self.vm_id}" + + def __init__( + self, + vm_id: int, + vm_hash: ItemHash, + resources: AlephQemuConfidentialResources, + enable_networking: bool = False, + confidential_policy: int = AMDSEVPolicy.NO_DBG, + hardware_resources: MachineResources = MachineResources(), + tap_interface: TapInterface | None = None, + ): + super().__init__(vm_id, vm_hash, resources, enable_networking, hardware_resources, tap_interface) + self.confidential_policy = confidential_policy + + async def setup(self): + pass + + async def configure(self): + """Configure the VM by saving controller service configuration""" + + logger.debug(f"Making Qemu configuration: {self} ") + monitor_socket_path = settings.EXECUTION_ROOT / (str(self.vm_id) + "-monitor.socket") + + cloud_init_drive = await self._create_cloud_init_drive() + + image_path = str(self.resources.rootfs_path) + firmware_path = str(self.resources.firmware_path) + vcpu_count = self.hardware_resources.vcpus + mem_size_mib = self.hardware_resources.memory + mem_size_mb = str(int(mem_size_mib / 1024 / 1024 * 1000 * 1000)) + + vm_session_path = settings.CONFIDENTIAL_SESSION_DIRECTORY / self.vm_hash + session_file_path = vm_session_path / "vm_session.b64" + godh_file_path = vm_session_path / "vm_godh.b64" + + qemu_bin_path = shutil.which("qemu-system-x86_64") + interface_name = None + if self.tap_interface: + interface_name = self.tap_interface.device_name + cloud_init_drive_path = str(cloud_init_drive.path_on_host) if cloud_init_drive else None + vm_configuration = QemuConfidentialVMConfiguration( + qemu_bin_path=qemu_bin_path, + cloud_init_drive_path=cloud_init_drive_path, + image_path=image_path, + monitor_socket_path=monitor_socket_path, + qmp_socket_path=self.qmp_socket_path, + vcpu_count=vcpu_count, + mem_size_mb=mem_size_mb, + interface_name=interface_name, + ovmf_path=firmware_path, + sev_session_file=session_file_path, + sev_dh_cert_file=godh_file_path, + sev_policy=self.confidential_policy, + host_volumes=[ + QemuVMHostVolume( + mount=volume.mount, + path_on_host=volume.path_on_host, + read_only=volume.read_only, + ) + for volume in self.resources.volumes + ], + gpus=[QemuGPU(pci_host=gpu.pci_host) for gpu in self.resources.gpus], + ) + + configuration = Configuration( + vm_id=self.vm_id, + vm_hash=self.vm_hash, + settings=settings, + vm_configuration=vm_configuration, + hypervisor=HypervisorType.qemu, + ) + logger.debug(configuration) + + save_controller_configuration(self.vm_hash, configuration) + + async def wait_for_init(self) -> None: + """Wait for the init process of the instance to be ready.""" + # FIXME: Cannot ping since network is not set up yet. + return diff --git a/src/aleph/vm/garbage_collector.py b/src/aleph/vm/garbage_collector.py new file mode 100644 index 000000000..54d182775 --- /dev/null +++ b/src/aleph/vm/garbage_collector.py @@ -0,0 +1,146 @@ +"""Free disk space by removing unused volume from the hard drive to free up + + +This script allow to manually list and remove volume linked to inactive VM +It fetches data from the scheduler and pyaleph main's node as to fetch information on the status of the VM. +Then display them to the user to determine if they can be removed safely. + +Requires to be run as root. +""" + +import os +import subprocess +from pathlib import Path + +import requests + +# following hashes are used in tests or debug VM, we can ignore them. +TEST_HASHES = [ + "fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_", + "cafecafecafecafecafecafecafecafecafecafecafecafecafecafecafecafe", + "decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca", + "63faf8b5db1cf8d965e6a464a0cb8062af8e7df131729e48738342d956f29ace", + "67705389842a0a1b95eaa408b009741027964edc805997475e95c505d642edd8", +] + +api_server = [ + "https://api2.aleph.im", + "https://api3.aleph.im", + # 'https://official.aleph.cloud', +] + +endpoint = "/api/v0/messages/" + + +def check_api(item_hash): + """Check on which api the ITEM_HASH msg is available.""" + for api in api_server: + response = requests.get(api + endpoint + item_hash) + print(api + " ", end="") + print(response.status_code, end="") + j = response.json() + print(" " + j["status"], end="") + print() + + +p = Path("/var/lib/aleph/vm/volumes/persistent") +# print current size +os.system(" ".join(["df", "-h", str(p)])) + +# Before anything check that we can reach the api server and the scheduler server +res = requests.get("https://api2.aleph.im/api/v0/info/public.json") +assert res.status_code == 200 +res = requests.get("https://scheduler.api.aleph.cloud/api/v0/plan") +assert res.status_code == 200 + +volume_dirs = list(p.glob("*")) +for i, f in enumerate(reversed(volume_dirs)): + if not f.is_dir(): + continue + item_hash = f.name + print(f"= {i}/{len(volume_dirs) -1} {item_hash}") + if item_hash in TEST_HASHES: + print("Test VM, skipping") + continue + + res = requests.get(f"https://api2.aleph.im/api/v0/messages/{item_hash}") + + if res.status_code == 404: + print("Not found on API server") + continue + message = res.json() + message_status = message.get("status") + # if message_status == "forgotten" or message_status == "rejected": + # print(f"{item_hash} status: {j.message_status('status')}") + # continue + # print(f"{item_hash} status: {j.message_status('status')}") + sender = message["message"]["sender"] + print(f"Sender {sender}. State: {message_status}") + if not message["message"]["type"] == "INSTANCE": + print("Type: ", message["message"]["type"], "not an instance") + continue + scheduler_res = requests.get(f"https://scheduler.api.aleph.cloud/api/v0/allocation/{item_hash}") + schedule = None + + if scheduler_res.status_code == 404: + print("Not found on scheduler plan") + else: + schedule = scheduler_res.json() + print(f"scheduled on {schedule['node']['node_id']}") + + balance = requests.get(f"https://api2.aleph.im/api/v0/addresses/{sender}/balance").json() + print(f"User balance: {balance['balance']:.2f}, locked amount {balance['locked_amount']:.2f}") + # print(balance) + + # check if process is still running + + proc_ret = subprocess.run( + f"systemctl status aleph-vm-controller@{item_hash}.service --no-pager", + shell=True, + capture_output=True, + ) + exit_code = proc_ret.returncode + if exit_code == 0: + proc_status = "running" + elif exit_code == 3: + proc_status = "stopped" + else: + proc_status = "error" + print("Unknown process state", exit_code) + # to remove + + if proc_status != "running": + # not running and forgotten + + if message_status == "forgotten" or message_status == "rejected": + print("Recommendation: remove, process not running and message rejected or forgotten") + else: + print("Process stopped") + # print(f"balances: {balance['balance']}, locked amount {balance['locked_amount']}'") + + while True: + inp = input("Do you want to delete y/n ? More info (h) [n] ").lower() + if inp in ["y", "yes"]: + os.system(f"dmsetup remove {item_hash}_base") + os.system(f"dmsetup remove {item_hash}_rootfs") + os.system(f"rm -r {f.absolute()}") + # close all loop device + os.system( + "sudo losetup -l | grep 'persistent' | grep deleted | awk '{print $1}' | sudo xargs losetup -d {}" + ) + break + elif inp == "h": + print(proc_ret.stdout.decode()) + check_api(item_hash) + print(f"https://api2.aleph.im/api/v0/messages/{item_hash}") + print(f"https://api2.aleph.im/api/v0/addresses/{sender}/balance") + else: + break + + else: + print("process is running, do not delete") + + +# print current size. +print("Size after") +os.system(" ".join(["df", "-h", str(p)])) diff --git a/src/aleph/vm/guest_api/__init__.py b/src/aleph/vm/guest_api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/guest_api/__main__.py b/src/aleph/vm/guest_api/__main__.py new file mode 100644 index 000000000..e5b89ebe8 --- /dev/null +++ b/src/aleph/vm/guest_api/__main__.py @@ -0,0 +1,209 @@ +import json +import logging +import re +from pathlib import Path + +import aiohttp +import aioredis +import sentry_sdk +from aiohttp import web +from setproctitle import setproctitle + +from aleph.vm.conf import settings +from aleph.vm.version import get_version_from_apt, get_version_from_git + +logger = logging.getLogger(__name__) + +ALEPH_API_SERVER = "https://official.aleph.cloud" +ALEPH_VM_CONNECTOR = "http://localhost:4021" +CACHE_EXPIRES_AFTER = 7 * 24 * 3600 # Seconds +REDIS_ADDRESS = "redis://localhost" + +_redis: aioredis.Redis | None = None + + +async def get_redis(address: str = REDIS_ADDRESS) -> aioredis.Redis: + global _redis + # Ensure the redis connection is still up before returning it + if _redis: + try: + await _redis.ping() + except aioredis.ConnectionClosedError: + _redis = None + if not _redis: + _redis = await aioredis.create_redis(address=address) + + return _redis + + +async def proxy(request: web.Request): + tail: str = request.match_info.get("tail") or "" + path: str = tail.lstrip("/") + query_string = request.rel_url.query_string + url = f"{ALEPH_API_SERVER}/{path}?{query_string}" + + async with aiohttp.ClientSession() as session: + async with session.request(method=request.method, url=url) as response: + data = await response.read() + return web.Response(body=data, status=response.status, content_type=response.content_type) + + +async def repost(request: web.Request): + logger.debug("REPOST") + data_raw = await request.json() + topic, message = data_raw["topic"], json.loads(data_raw["data"]) + + content = json.loads(message["item_content"]) + content["address"] = "VM on executor" + message["item_content"] = json.dumps(content) + + new_data = {"topic": topic, "data": json.dumps(message)} + + path = request.path + if request.rel_url.query_string: + query_string = request.rel_url.query_string + url = f"{ALEPH_VM_CONNECTOR}{path}?{query_string}" + else: + url = f"{ALEPH_VM_CONNECTOR}{path}" + + async with aiohttp.ClientSession() as session: + async with session.post(url=url, json=new_data) as response: + data = await response.read() + return web.Response(body=data, status=response.status, content_type=response.content_type) + + +# async def decrypt_secret(request: web.Request): +# Not implemented... + + +async def properties(request: web.Request): + logger.debug("Forwarding signing properties") + _ = request + + url = f"{ALEPH_VM_CONNECTOR}/properties" + async with aiohttp.ClientSession() as session: + async with session.get(url=url) as response: + data = await response.read() + return web.Response(body=data, status=response.status, content_type=response.content_type) + + +async def sign(request: web.Request): + vm_hash = request.app["meta_vm_hash"] + message = await request.json() + + # Ensure that the hash of the VM is used as sending address + content = json.loads(message["item_content"]) + if content["address"] != vm_hash: + raise web.HTTPBadRequest(reason="Message address does not match VM item_hash") + + logger.info("Forwarding signing request to VM Connector") + + url = f"{ALEPH_VM_CONNECTOR}/sign" + async with aiohttp.ClientSession() as session: + async with session.post(url=url, json=message) as response: + signed_message = await response.read() + return web.Response( + body=signed_message, + status=response.status, + content_type=response.content_type, + ) + + +async def get_from_cache(request: web.Request): + prefix: str = request.app["meta_vm_hash"] + key: str | None = request.match_info.get("key") + if not (key and re.match(r"^\w+$", key)): + return web.HTTPBadRequest(text="Invalid key") + + redis: aioredis.Redis = await get_redis() + body = await redis.get(f"{prefix}:{key}") + if body: + return web.Response(body=body, status=200) + else: + return web.Response(text="No such key in cache", status=404) + + +async def put_in_cache(request: web.Request): + prefix: str = request.app["meta_vm_hash"] + key: str | None = request.match_info.get("key") + if not (key and re.match(r"^\w+$", key)): + return web.HTTPBadRequest(text="Invalid key") + + value: bytes = await request.read() + + redis: aioredis.Redis = await get_redis() + return web.json_response(await redis.set(f"{prefix}:{key}", value, expire=CACHE_EXPIRES_AFTER)) + + +async def delete_from_cache(request: web.Request): + prefix: str = request.app["meta_vm_hash"] + key: str | None = request.match_info.get("key") + if not (key and re.match(r"^\w+$", key)): + return web.HTTPBadRequest(text="Invalid key") + + redis: aioredis.Redis = await get_redis() + result = await redis.delete(f"{prefix}:{key}") + return web.json_response(result) + + +async def list_keys_from_cache(request: web.Request): + prefix: str = request.app["meta_vm_hash"] + pattern: str = request.rel_url.query.get("pattern", "*") + if not re.match(r"^[\w?*^\-]+$", pattern): + return web.HTTPBadRequest(text="Invalid key") + + redis: aioredis.Redis = await get_redis() + result = await redis.keys(f"{prefix}:{pattern}") + keys = [key.decode()[len(prefix) + 1 :] for key in result] + return web.json_response(keys) + + +def run_guest_api( + unix_socket_path: Path, + vm_hash: str | None = None, + sentry_dsn: str | None = None, + server_name: str | None = None, +): + # This function runs in a separate process, requiring to reinitialize the Sentry SDK + if sentry_sdk and sentry_dsn: + sentry_sdk.init( + dsn=sentry_dsn, + server_name=server_name, + # Set traces_sample_rate to 1.0 to capture 100% + # of transactions for performance monitoring. + # We recommend adjusting this value in production. + traces_sample_rate=settings.SENTRY_TRACES_SAMPLE_RATE, + ) + sentry_sdk.set_context( + "version", + { + "git": get_version_from_git(), + "apt": get_version_from_apt(), + }, + ) + + setproctitle(f"aleph-vm guest_api on {unix_socket_path}") + app = web.Application() + app["meta_vm_hash"] = vm_hash or "_" + + app.router.add_route(method="GET", path="/properties", handler=properties) + app.router.add_route(method="POST", path="/sign", handler=sign) + + app.router.add_route(method="GET", path="/cache/", handler=list_keys_from_cache) + app.router.add_route(method="GET", path="/cache/{key:.*}", handler=get_from_cache) + app.router.add_route(method="PUT", path="/cache/{key:.*}", handler=put_in_cache) + app.router.add_route(method="DELETE", path="/cache/{key:.*}", handler=delete_from_cache) + + app.router.add_route(method="GET", path="/{tail:.*}", handler=proxy) + app.router.add_route(method="HEAD", path="/{tail:.*}", handler=proxy) + app.router.add_route(method="OPTIONS", path="/{tail:.*}", handler=proxy) + + app.router.add_route(method="POST", path="/api/v0/ipfs/pubsub/pub", handler=repost) + app.router.add_route(method="POST", path="/api/v0/p2p/pubsub/pub", handler=repost) + + # web.run_app(app=app, port=9000) + web.run_app(app=app, path=str(unix_socket_path)) + + +if __name__ == "__main__": + run_guest_api(Path("/tmp/guest-api"), vm_hash="vm") diff --git a/src/aleph/vm/hypervisors/__init__.py b/src/aleph/vm/hypervisors/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/hypervisors/firecracker/__init__.py b/src/aleph/vm/hypervisors/firecracker/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/hypervisors/firecracker/config.py b/src/aleph/vm/hypervisors/firecracker/config.py new file mode 100644 index 000000000..b7e4fc77a --- /dev/null +++ b/src/aleph/vm/hypervisors/firecracker/config.py @@ -0,0 +1,62 @@ +from pathlib import Path + +from pydantic import BaseModel, PositiveInt + +VSOCK_PATH = "/tmp/v.sock" + + +class BootSource(BaseModel): + kernel_image_path: Path = Path("vmlinux.bin") + boot_args: str = "console=ttyS0 reboot=k panic=1 pci=off ro noapic nomodules random.trust_cpu=on" + + @staticmethod + def args(enable_console: bool = True, writable: bool = False): + default = "reboot=k panic=1 pci=off noapic nomodules random.trust_cpu=on" + if writable: + default = default + " rw" + else: + default = default + " ro" + if enable_console: + return "console=ttyS0 " + default + else: + return default + + +class Drive(BaseModel): + drive_id: str = "rootfs" + path_on_host: Path = Path("./runtimes/aleph-alpine-3.13-python/rootfs.ext4") + is_root_device: bool = True + is_read_only: bool = True + + +class MachineConfig(BaseModel): + vcpu_count: PositiveInt = 1 + mem_size_mib: PositiveInt = 128 + smt: bool = False + + +class Vsock(BaseModel): + vsock_id: str = "1" + guest_cid: PositiveInt = 3 + uds_path: str = VSOCK_PATH + + +class NetworkInterface(BaseModel): + iface_id: str = "eth0" + guest_mac: str = "AA:FC:00:00:00:01" + host_dev_name: str + + +class FirecrackerConfig(BaseModel): + boot_source: BootSource + drives: list[Drive] + machine_config: MachineConfig + vsock: Vsock | None + network_interfaces: list[NetworkInterface] | None + + class Config: + allow_population_by_field_name = True + + @staticmethod + def alias_generator(x: str): + return x.replace("_", "-") diff --git a/src/aleph/vm/hypervisors/firecracker/microvm.py b/src/aleph/vm/hypervisors/firecracker/microvm.py new file mode 100644 index 000000000..d357fb6e0 --- /dev/null +++ b/src/aleph/vm/hypervisors/firecracker/microvm.py @@ -0,0 +1,525 @@ +import asyncio +import errno +import json +import logging +import os.path +import shutil +import string +import traceback +from asyncio import Task +from asyncio.base_events import Server +from dataclasses import dataclass +from os import getuid +from pathlib import Path +from pwd import getpwnam +from tempfile import NamedTemporaryFile +from typing import Any, BinaryIO + +import msgpack +from aleph_message.models import ItemHash +from systemd import journal + +from .config import Drive, FirecrackerConfig + +logger = logging.getLogger(__name__) + +VSOCK_PATH = "/tmp/v.sock" +DEVICE_BASE_DIRECTORY = "/dev/mapper" + + +class MicroVMFailedInitError(Exception): + pass + + +# extend the json.JSONEncoder class to support bytes +class JSONBytesEncoder(json.JSONEncoder): + # overload method default + def default(self, obj): + # Match all the types you want to handle in your converter + if isinstance(obj, bytes): + return obj.decode() + return json.JSONEncoder.default(self, obj) + + +def system(command): + logger.debug(f"shell {command}") + ret = os.system(command) + if ret != 0: + logger.warning(f"Failed shell `{command}`: return code {ret}") + # print trace so we know who called this + traceback.print_stack() + return ret + + +async def setfacl(): + """Give current user permission to access /dev/kvm via acl""" + if os.access("/dev/kvm", os.R_OK | os.W_OK): + return + + user = getuid() + cmd = f"sudo setfacl -m u:{user}:rw /dev/kvm" + proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) + stdout, stderr = await proc.communicate() + + if proc.returncode == 0: + return + logger.warning(f"[{cmd!r} exited with {[proc.returncode]}]") + if stdout: + logger.warning(f"[stdout]\n{stdout.decode()}") + if stderr: + logger.warning(f"[stderr]\n{stderr.decode()}") + + +@dataclass +class RuntimeConfiguration: + version: str + + def supports_ipv6(self) -> bool: + return self.version != "1.0.0" + + +class MicroVM: + vm_id: int + use_jailer: bool + firecracker_bin_path: Path + jailer_bin_path: Path | None + proc: asyncio.subprocess.Process | None = None + stdout_task: Task | None = None + stderr_task: Task | None = None + config_file_path: Path | None = None + drives: list[Drive] + init_timeout: float + runtime_config: RuntimeConfiguration | None + mounted_rootfs: Path | None = None + _unix_socket: Server | None = None + enable_log: bool + journal_stdout: BinaryIO | int | None = None + journal_stderr: BinaryIO | int | None = None + + def __repr__(self): + return f"" + + def __str__(self): + return f"vm-{self.vm_id}" + + @property + def namespace_path(self) -> str: + firecracker_bin_name = os.path.basename(self.firecracker_bin_path) + return str(self.jailer_base_directory / firecracker_bin_name / str(self.vm_id)) + + @property + def jailer_path(self) -> str: + return os.path.join(self.namespace_path, "root") + + @property + def socket_path(self) -> str: + if self.use_jailer: + return f"{self.jailer_path}/run/firecracker.socket" + else: + return f"/tmp/firecracker-{self.vm_id}.socket" + + @property + def vsock_path(self) -> str: + if self.use_jailer: + return f"{self.jailer_path}{VSOCK_PATH}" + else: + return f"{VSOCK_PATH}" + + def __init__( + self, + vm_id: int, + vm_hash: ItemHash, + firecracker_bin_path: Path, + jailer_base_directory: Path, + use_jailer: bool = True, + jailer_bin_path: Path | None = None, + init_timeout: float = 5.0, + enable_log: bool = True, + ): + self.vm_id = vm_id + self.vm_hash = vm_hash + self.use_jailer = use_jailer + self.jailer_base_directory = jailer_base_directory + self.firecracker_bin_path = firecracker_bin_path + self.jailer_bin_path = jailer_bin_path + self.drives = [] + self.init_timeout = init_timeout + self.runtime_config = None + self.enable_log = enable_log + + def to_dict(self) -> dict: + return { + "jailer_path": self.jailer_path, + "socket_path": self.socket_path, + "vsock_path": self.vsock_path, + **self.__dict__, + } + + def prepare_jailer(self) -> None: + if not self.use_jailer: + return + system(f"rm -fr {self.jailer_path}") + + # system(f"rm -fr {self.jailer_path}/run/") + # system(f"rm -fr {self.jailer_path}/dev/") + # system(f"rm -fr {self.jailer_path}/opt/") + # + # if os.path.exists(path=self.vsock_path): + # os.remove(path=self.vsock_path) + # + system(f"mkdir -p {self.jailer_path}/tmp/") + system(f"chown jailman:jailman {self.jailer_path}/tmp/") + # + system(f"mkdir -p {self.jailer_path}/opt") + system(f"mkdir -p {self.jailer_path}/dev/mapper") + + # system(f"cp disks/rootfs.ext4 {self.jailer_path}/opt") + # system(f"cp hello-vmlinux.bin {self.jailer_path}/opt") + + def prepare_start(self): + if not self.use_jailer: + return False + + system(f"rm -fr {self.jailer_path}/dev/net/") + system(f"rm -fr {self.jailer_path}/dev/kvm") + system(f"rm -fr {self.jailer_path}/dev/urandom") + system(f"rm -fr {self.jailer_path}/dev/userfaultfd") + system(f"rm -fr {self.jailer_path}/run/") + + if os.path.exists(path=self.vsock_path): + os.remove(path=self.vsock_path) + + async def save_configuration_file(self, config: FirecrackerConfig) -> Path: + with ( + NamedTemporaryFile(delete=False) + if not self.use_jailer + else open(f"{self.jailer_path}/tmp/config.json", "wb") + ) as config_file: + config_file.write(config.json(by_alias=True, exclude_none=True, indent=4).encode()) + config_file.flush() + config_file_path = Path(config_file.name) + config_file_path.chmod(0o644) + return config_file_path + + async def start(self, config_path: Path) -> asyncio.subprocess.Process: + if self.use_jailer: + return await self.start_jailed_firecracker(config_path) + else: + return await self.start_firecracker(config_path) + + async def start_firecracker(self, config_path: Path) -> asyncio.subprocess.Process: + if os.path.exists(VSOCK_PATH): + os.remove(VSOCK_PATH) + if os.path.exists(self.socket_path): + os.remove(self.socket_path) + + options = ( + str(self.firecracker_bin_path), + "--api-sock", + str(self.socket_path), + "--config-file", + str(config_path), + ) + if self.enable_log: + self.journal_stdout = journal.stream(self._journal_stdout_name) + self.journal_stderr = journal.stream(self._journal_stderr_name) + else: + self.journal_stdout = asyncio.subprocess.DEVNULL + self.journal_stderr = asyncio.subprocess.DEVNULL + + logger.debug(" ".join(options)) + + self.proc = await asyncio.create_subprocess_exec( + *options, + stdin=asyncio.subprocess.PIPE, + stdout=self.journal_stdout, + stderr=self.journal_stderr, + ) + return self.proc + + @property + def _journal_stdout_name(self) -> str: + return f"vm-{self.vm_hash}-stdout" + + @property + def _journal_stderr_name(self) -> str: + return f"vm-{self.vm_hash}-stderr" + + async def start_jailed_firecracker(self, config_path: Path) -> asyncio.subprocess.Process: + if not self.jailer_bin_path: + msg = "Jailer binary path is missing" + raise ValueError(msg) + uid = str(getpwnam("jailman").pw_uid) + gid = str(getpwnam("jailman").pw_gid) + + self.config_file_path = config_path + if self.enable_log: + self.journal_stdout = journal.stream(self._journal_stdout_name) + self.journal_stderr = journal.stream(self._journal_stderr_name) + else: + self.journal_stdout = asyncio.subprocess.DEVNULL + self.journal_stderr = asyncio.subprocess.DEVNULL + + options = ( + str(self.jailer_bin_path), + "--id", + str(self.vm_id), + "--exec-file", + str(self.firecracker_bin_path), + "--uid", + uid, + "--gid", + gid, + "--chroot-base-dir", + str(self.jailer_base_directory), + "--", + "--config-file", + "/tmp/" + str(self.config_file_path.name), + ) + + logger.debug(" ".join(options)) + + self.proc = await asyncio.create_subprocess_exec( + *options, + stdin=asyncio.subprocess.PIPE, + stdout=self.journal_stdout, + stderr=self.journal_stderr, + ) + return self.proc + + def enable_kernel(self, kernel_image_path: Path) -> Path: + """Make a kernel available to the VM. + + Creates a symlink to the kernel file if jailer is in use. + """ + if self.use_jailer: + kernel_filename = kernel_image_path.name + jailer_kernel_image_path = f"/opt/{kernel_filename}" + + try: + Path(f"{self.jailer_path}{jailer_kernel_image_path}").hardlink_to(kernel_image_path) + except FileExistsError: + logger.debug(f"File {jailer_kernel_image_path} already exists") + + return Path(jailer_kernel_image_path) + else: + return kernel_image_path + + def enable_rootfs(self, path_on_host: Path) -> Path: + if path_on_host.is_file(): + return self.enable_file_rootfs(path_on_host) + elif path_on_host.is_block_device(): + return self.enable_device_mapper_rootfs(path_on_host) + else: + msg = f"Not a file or a block device: {path_on_host}" + raise ValueError(msg) + + def enable_file_rootfs(self, path_on_host: Path) -> Path: + """Make a rootfs available to the VM. + + If jailer is in use, try to create a hardlink + If it is not possible to create a link because the dir are in separate device made a copy. + """ + if self.use_jailer: + rootfs_filename = Path(path_on_host).name + jailer_path_on_host = f"/opt/{rootfs_filename}" + try: + os.link(path_on_host, f"{self.jailer_path}/{jailer_path_on_host}") + except FileExistsError: + logger.debug(f"File {jailer_path_on_host} already exists") + except OSError as err: + if err.errno == errno.EXDEV: + # Invalid cross-device link: cannot make hard link between partition. + # In this case, copy the file instead: + shutil.copyfile(path_on_host, f"{self.jailer_path}/{jailer_path_on_host}") + else: + raise + return Path(jailer_path_on_host) + else: + return path_on_host + + def enable_device_mapper_rootfs(self, path_on_host: Path) -> Path: + """Mount a rootfs to the VM.""" + self.mounted_rootfs = path_on_host + if not self.use_jailer: + return path_on_host + + rootfs_filename = path_on_host.name + device_jailer_path = Path(DEVICE_BASE_DIRECTORY) / rootfs_filename + final_path = Path(self.jailer_path) / str(device_jailer_path).strip("/") + if not final_path.is_block_device(): + jailer_device_vm_path = Path(f"{self.jailer_path}/{DEVICE_BASE_DIRECTORY}") + jailer_device_vm_path.mkdir(exist_ok=True, parents=True) + rootfs_device = path_on_host.resolve() + # Copy the /dev/dm-{device_id} special block file that is the real mapping destination on Jailer + system(f"cp -vap {rootfs_device} {self.jailer_path}/dev/") + path_to_mount = jailer_device_vm_path / rootfs_filename + if not path_to_mount.is_symlink(): + path_to_mount.symlink_to(rootfs_device) + system(f"chown -Rh jailman:jailman {self.jailer_path}/dev") + + return device_jailer_path + + @staticmethod + def compute_device_name(index: int) -> str: + return f"vd{string.ascii_lowercase[index + 1]}" + + def enable_drive(self, drive_path: Path, read_only: bool = True) -> Drive: + """Make a volume available to the VM. + + Creates a hardlink or a copy to the volume file if jailer is in use. + """ + index = len(self.drives) + device_name = self.compute_device_name(index) + + if self.use_jailer: + drive_filename = drive_path.name + jailer_path_on_host = f"/opt/{drive_filename}" + + try: + Path(f"{self.jailer_path}/{jailer_path_on_host}").hardlink_to(drive_path) + except OSError as err: + if err.errno == errno.EXDEV: + # Invalid cross-device link: cannot make hard link between partition. + # In this case, copy the file instead: + shutil.copyfile(drive_path, f"{self.jailer_path}/{jailer_path_on_host}") + except FileExistsError: + logger.debug(f"File {jailer_path_on_host} already exists") + drive_path = Path(jailer_path_on_host) + + drive = Drive( + drive_id=device_name, + path_on_host=drive_path, + is_root_device=False, + is_read_only=read_only, + ) + self.drives.append(drive) + return drive + + async def wait_for_init(self) -> None: + """Wait for a connection from the init in the VM""" + logger.debug("Waiting for init...") + queue: asyncio.Queue[RuntimeConfiguration] = asyncio.Queue() + + async def unix_client_connected(reader: asyncio.StreamReader, _writer: asyncio.StreamWriter): + data = await reader.read(1_000_000) + if data: + config_dict: dict[str, Any] = msgpack.loads(data) + runtime_config = RuntimeConfiguration(version=config_dict["version"]) + else: + # Older runtimes do not send a config. Use a default. + runtime_config = RuntimeConfiguration(version="1.0.0") + + logger.debug("Runtime version: %s", runtime_config) + await queue.put(runtime_config) + + self._unix_socket = await asyncio.start_unix_server(unix_client_connected, path=f"{self.vsock_path}_52") + if self.use_jailer: + system(f"chown jailman:jailman {self.vsock_path}_52") + try: + self.runtime_config = await asyncio.wait_for(queue.get(), timeout=self.init_timeout) + logger.debug("...signal from init received") + except asyncio.TimeoutError as error: + logger.warning("Never received signal from init") + raise MicroVMFailedInitError() from error + + async def shutdown(self) -> None: + logger.debug(f"Shutdown vm={self.vm_id}") + try: + reader, writer = await asyncio.open_unix_connection(path=self.vsock_path) + except ( + FileNotFoundError, + ConnectionResetError, + ConnectionRefusedError, + ) as error: + logger.warning(f"VM={self.vm_id} cannot receive shutdown signal: {error.args}") + return + + try: + payload = b"halt" + writer.write(b"CONNECT 52\n" + payload) + + await writer.drain() + + ack: bytes = await reader.readline() + logger.debug(f"ack={ack.decode()}") + + msg: bytes = await reader.readline() + logger.debug(f"msg={msg!r}") + + msg2: bytes = await reader.readline() + logger.debug(f"msg2={msg2!r}") + + if msg2 != b"STOPZ\n": + logger.warning(f"Unexpected response from VM: {msg2[:20]!r}") + except ConnectionResetError as error: + logger.warning(f"ConnectionResetError in shutdown of {self.vm_id}: {error.args}") + + async def stop(self): + if self.proc: + logger.debug("Stopping firecracker process") + try: + self.proc.terminate() + self.proc.kill() + except ProcessLookupError: + logger.debug(f"Firecracker process pid={self.proc.pid} not found") + self.proc = None + else: + logger.debug("No firecracker process to stop") + + async def teardown(self): + """Stop the VM, cleanup network interface and remove data directory.""" + try: + await asyncio.wait_for(self.shutdown(), timeout=5) + except asyncio.TimeoutError: + logger.exception(f"Timeout during VM shutdown vm={self.vm_id}") + logger.debug("Waiting for one second for the process to shutdown") + await asyncio.sleep(1) + await self.stop() + + if self.stdout_task: + self.stdout_task.cancel() + if self.stderr_task: + self.stderr_task.cancel() + + if ( + self.journal_stdout + and self.journal_stdout != asyncio.subprocess.DEVNULL + and hasattr(self.journal_stdout, "close") + ): + self.journal_stdout.close() + if ( + self.journal_stderr + and self.journal_stderr != asyncio.subprocess.DEVNULL + and hasattr(self.journal_stderr, "close") + ): + self.journal_stderr.close() + + # Clean mounted block devices + if self.mounted_rootfs: + logger.debug("Waiting for one second for the VM to shutdown") + await asyncio.sleep(1) + if self.mounted_rootfs.is_block_device(): + root_fs = self.mounted_rootfs.name + system(f"dmsetup remove {root_fs}") + base_device = Path(self.mounted_rootfs.name.replace("_rootfs", "_base")) + if base_device.is_block_device(): + system(f"dmsetup remove {base_device}") + if self.use_jailer and Path(self.jailer_path).is_dir(): + shutil.rmtree(self.jailer_path) + + if self._unix_socket: + logger.debug("Closing unix socket") + self._unix_socket.close() + try: + await asyncio.wait_for(self._unix_socket.wait_closed(), 2) + except asyncio.TimeoutError: + # In Python < 3.11 wait_closed() was broken and returned immediatly + # It is supposedly fixed in Python 3.12.1, but it hangs indefinitely during tests. + logger.info("f{self} unix socket closing timeout") + + logger.debug("Removing files") + if self.config_file_path: + self.config_file_path.unlink(missing_ok=True) + if Path(self.namespace_path).exists(): + system(f"rm -fr {self.namespace_path}") diff --git a/src/aleph/vm/hypervisors/qemu/__init__.py b/src/aleph/vm/hypervisors/qemu/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/hypervisors/qemu/qemuvm.py b/src/aleph/vm/hypervisors/qemu/qemuvm.py new file mode 100644 index 000000000..df7559613 --- /dev/null +++ b/src/aleph/vm/hypervisors/qemu/qemuvm.py @@ -0,0 +1,187 @@ +import asyncio +from asyncio.subprocess import Process +from dataclasses import dataclass +from pathlib import Path +from typing import BinaryIO, TextIO + +import qmp +from systemd import journal + +from aleph.vm.controllers.configuration import QemuGPU, QemuVMConfiguration +from aleph.vm.controllers.qemu.instance import logger + + +@dataclass +class HostVolume: + path_on_host: Path + read_only: bool + + +class QemuVM: + qemu_bin_path: str + cloud_init_drive_path: str | None + image_path: str + monitor_socket_path: Path + qmp_socket_path: Path + vcpu_count: int + mem_size_mb: int + interface_name: str + qemu_process: Process | None = None + host_volumes: list[HostVolume] + gpus: list[QemuGPU] + journal_stdout: TextIO | None + journal_stderr: TextIO | None + + def __repr__(self) -> str: + if self.qemu_process: + return f"" + else: + return "" + + def __init__(self, vm_hash, config: QemuVMConfiguration): + self.qemu_bin_path = config.qemu_bin_path + self.cloud_init_drive_path = config.cloud_init_drive_path + self.image_path = config.image_path + self.monitor_socket_path = config.monitor_socket_path + self.qmp_socket_path = config.qmp_socket_path + self.vcpu_count = config.vcpu_count + self.mem_size_mb = config.mem_size_mb + self.interface_name = config.interface_name + self.vm_hash = vm_hash + + self.host_volumes = [ + HostVolume( + path_on_host=volume.path_on_host, + read_only=volume.read_only, + ) + for volume in config.host_volumes + ] + self.gpus = config.gpus + + @property + def _journal_stdout_name(self) -> str: + return f"vm-{self.vm_hash}-stdout" + + @property + def _journal_stderr_name(self) -> str: + return f"vm-{self.vm_hash}-stderr" + + def prepare_start(self): + pass + + async def start( + self, + ) -> Process: + # Based on the command + # qemu-system-x86_64 -enable-kvm -m 2048 -net nic,model=virtio + # -net tap,ifname=tap0,script=no,downscript=no -drive file=alpine.qcow2,media=disk,if=virtio -nographic + + self.journal_stdout: BinaryIO = journal.stream(self._journal_stdout_name) + self.journal_stderr: BinaryIO = journal.stream(self._journal_stderr_name) + # hardware_resources.published ports -> not implemented at the moment + # hardware_resources.seconds -> only for microvm + args = [ + self.qemu_bin_path, + "-enable-kvm", + "-nodefaults", + "-m", + str(self.mem_size_mb), + "-smp", + str(self.vcpu_count), + "-drive", + f"file={self.image_path},media=disk,if=virtio", + # To debug you can pass gtk or curses instead + "-display", + "none", + "--no-reboot", # Rebooting from inside the VM shuts down the machine + # Listen for commands on this socket + "-monitor", + f"unix:{self.monitor_socket_path},server,nowait", + # Listen for commands on this socket (QMP protocol in json). Supervisor use it to send shutdown or start + # command + "-qmp", + f"unix:{self.qmp_socket_path},server,nowait", + # Tell to put the output to std fd, so we can include them in the log + "-serial", + "stdio", + # nographics. Seems redundant with -serial stdio but without it the boot process is not displayed on stdout + "-nographic", + # Boot + # order=c only first hard drive + # reboot-timeout in combination with -no-reboot, makes it so qemu stop if there is no bootable device + "-boot", + "order=c,reboot-timeout=1", + # Uncomment for debug + # "-serial", "telnet:localhost:4321,server,nowait", + # "-snapshot", # Do not save anything to disk + ] + if self.interface_name: + # script=no, downscript=no tell qemu not to try to set up the network itself + args += ["-net", "nic,model=virtio", "-net", f"tap,ifname={self.interface_name},script=no,downscript=no"] + + if self.cloud_init_drive_path: + args += ["-cdrom", f"{self.cloud_init_drive_path}"] + + args += self._get_host_volumes_args() + args += self._get_gpu_args() + print(*args) + + self.qemu_process = proc = await asyncio.create_subprocess_exec( + *args, + stdin=asyncio.subprocess.DEVNULL, + stdout=self.journal_stdout, + stderr=self.journal_stderr, + ) + + print( + f"Started QemuVm {self}, {proc}. Log available with: journalctl -t {self._journal_stdout_name} -t {self._journal_stderr_name}" + ) + return proc + + def _get_host_volumes_args(self): + args = [] + for volume in self.host_volumes: + args += [ + "-drive", + f"file={volume.path_on_host},format=raw,readonly={'on' if volume.read_only else 'off'},media=disk,if=virtio", + ] + return args + + def _get_gpu_args(self): + args = [ + # Use host-phys-bits-limit argument for GPU support. TODO: Investigate how to get the correct bits size + "-cpu", + "host,host-phys-bits-limit=0x28", + ] + for gpu in self.gpus: + args += [ + "-device", + f"vfio-pci,host={gpu.pci_host},multifunction=on,x-vga=on", + ] + return args + + def _get_qmpclient(self) -> qmp.QEMUMonitorProtocol | None: + if not (self.qmp_socket_path and self.qmp_socket_path.exists()): + return None + client = qmp.QEMUMonitorProtocol(str(self.qmp_socket_path)) + client.connect() + return client + + def send_shutdown_message(self): + print("sending shutdown message to vm") + client = self._get_qmpclient() + if client: + resp = client.command("system_powerdown") + if not resp == {}: + logger.warning("unexpected answer from VM", resp) + print("shutdown message sent") + client.close() + + async def stop(self): + """Stop the VM.""" + self.send_shutdown_message() + + if self.journal_stdout and self.journal_stdout != asyncio.subprocess.DEVNULL: + self.journal_stdout.close() + if self.journal_stderr and self.journal_stderr != asyncio.subprocess.DEVNULL: + self.journal_stderr.close() diff --git a/src/aleph/vm/hypervisors/qemu_confidential/__init__.py b/src/aleph/vm/hypervisors/qemu_confidential/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/hypervisors/qemu_confidential/qemuvm.py b/src/aleph/vm/hypervisors/qemu_confidential/qemuvm.py new file mode 100644 index 000000000..353c3f78d --- /dev/null +++ b/src/aleph/vm/hypervisors/qemu_confidential/qemuvm.py @@ -0,0 +1,141 @@ +import asyncio +from asyncio.subprocess import Process +from pathlib import Path +from typing import TextIO + +from aleph_message.models.execution.environment import AMDSEVPolicy +from cpuid.features import secure_encryption_info +from systemd import journal + +from aleph.vm.controllers.configuration import QemuConfidentialVMConfiguration +from aleph.vm.hypervisors.qemu.qemuvm import QemuVM + + +class QemuConfidentialVM(QemuVM): + sev_policy: str = hex(AMDSEVPolicy.NO_DBG) + sev_dh_cert_file: Path # "vm_godh.b64" + sev_session_file: Path # "vm_session.b64" + + def __repr__(self) -> str: + if self.qemu_process: + return f"" + else: + return "" + + def __init__(self, vm_hash, config: QemuConfidentialVMConfiguration): + super().__init__(vm_hash, config) + self.qemu_bin_path = config.qemu_bin_path + self.cloud_init_drive_path = config.cloud_init_drive_path + self.image_path = config.image_path + self.monitor_socket_path = config.monitor_socket_path + self.qmp_socket_path = config.qmp_socket_path + self.vcpu_count = config.vcpu_count + self.mem_size_mb = config.mem_size_mb + self.interface_name = config.interface_name + self.log_queues: list[asyncio.Queue] = [] + self.ovmf_path: Path = config.ovmf_path + self.sev_session_file = config.sev_session_file + self.sev_dh_cert_file = config.sev_dh_cert_file + self.sev_policy = hex(config.sev_policy) + + def prepare_start(self): + pass + + async def start( + self, + ) -> Process: + # Based on the command + # qemu-system-x86_64 -enable-kvm -m 2048 -net nic,model=virtio + # -net tap,ifname=tap0,script=no,downscript=no -drive file=alpine.qcow2,media=disk,if=virtio -nographic + # hardware_resources.published ports -> not implemented at the moment + # hardware_resources.seconds -> only for microvm + journal_stdout: TextIO = journal.stream(self._journal_stdout_name) + journal_stderr: TextIO = journal.stream(self._journal_stderr_name) + + # TODO : ensure this is ok at launch + sev_info = secure_encryption_info() + if sev_info is None: + msg = "Not running on an AMD SEV platform?" + raise ValueError(msg) + godh = self.sev_dh_cert_file + launch_blob = self.sev_session_file + + if not (godh.is_file() and launch_blob.is_file()): + msg = "Missing guest owner certificates, cannot start the VM.`" + raise FileNotFoundError(msg) + args = [ + self.qemu_bin_path, + "-enable-kvm", + "-nodefaults", + "-m", + str(self.mem_size_mb), + "-smp", + str(self.vcpu_count), + "-drive", + f"if=pflash,format=raw,unit=0,file={self.ovmf_path},readonly=on", + "-drive", + f"file={self.image_path},media=disk,if=virtio,format=qcow2", + # To debug you can pass gtk or curses instead + "-display", + "none", + "--no-reboot", # Rebooting from inside the VM shuts down the machine + # Listen for commands on this socket + "-monitor", + f"unix:{self.monitor_socket_path},server,nowait", + # Listen for commands on this socket (QMP protocol in json). Supervisor use it to send shutdown or start + # command + "-qmp", + f"unix:{self.qmp_socket_path},server,nowait", + # Tell to put the output to std fd, so we can include them in the log + "-serial", + "stdio", + # nographics. Seems redundant with -serial stdio but without it the boot process is not displayed on stdout + "-nographic", + # Boot + # order=c only first hard drive + # reboot-timeout in combination with -no-reboot, makes it so qemu stop if there is no bootable device + "-boot", + "order=c,reboot-timeout=1", + # Confidential options + # Do not start CPU at startup, we will start it via QMP after injecting the secret + "-S", + "-object", + f"sev-guest,id=sev0,policy={self.sev_policy},cbitpos={sev_info.c_bit_position}," + f"reduced-phys-bits={sev_info.phys_addr_reduction}," + f"dh-cert-file={godh},session-file={launch_blob}", + "-machine", + "confidential-guest-support=sev0", + # Linux kernel 6.9 added a control on the RDRAND function to ensure that the random numbers generation + # works well, on Qemu emulation for confidential computing the CPU model us faked and this makes control + # raise an error and prevent boot. Passing the argument --cpu host instruct the VM to use the same CPU + # model than the host thus the VM's kernel knows which method is used to get random numbers (Intel and + # AMD have different methods) and properly boot. + # Use host-phys-bits-limit argument for GPU support. TODO: Investigate how to get the correct bits size + "-cpu", + "host,host-phys-bits-limit=0x28", + # Uncomment following for debug + # "-serial", "telnet:localhost:4321,server,nowait", + # "-snapshot", # Do not save anything to disk + ] + if self.interface_name: + # script=no, downscript=no tell qemu not to try to set up the network itself + args += ["-net", "nic,model=virtio", "-net", f"tap,ifname={self.interface_name},script=no,downscript=no"] + + if self.cloud_init_drive_path: + args += ["-cdrom", f"{self.cloud_init_drive_path}"] + + args += self._get_host_volumes_args() + args += self._get_gpu_args() + print(*args) + + self.qemu_process = proc = await asyncio.create_subprocess_exec( + *args, + stdin=asyncio.subprocess.DEVNULL, + stdout=journal_stdout, + stderr=journal_stderr, + ) + + print( + f"Started QemuVm {self}, {proc}. Log available with: journalctl -t {self._journal_stdout_name} -t {self._journal_stderr_name}" + ) + return proc diff --git a/src/aleph/vm/models.py b/src/aleph/vm/models.py new file mode 100644 index 000000000..7dd59091b --- /dev/null +++ b/src/aleph/vm/models.py @@ -0,0 +1,492 @@ +import asyncio +import json +import logging +import uuid +from asyncio import Task +from collections.abc import Callable, Coroutine +from dataclasses import dataclass +from datetime import datetime, timezone +from typing import List + +from aleph_message.models import ( + ExecutableContent, + InstanceContent, + ItemHash, + ProgramContent, +) +from aleph_message.models.execution.environment import GpuProperties, HypervisorType +from pydantic.json import pydantic_encoder + +from aleph.vm.conf import settings +from aleph.vm.controllers.firecracker.executable import AlephFirecrackerExecutable +from aleph.vm.controllers.firecracker.instance import AlephInstanceResources +from aleph.vm.controllers.firecracker.program import ( + AlephFirecrackerProgram, + AlephProgramResources, +) +from aleph.vm.controllers.firecracker.snapshot_manager import SnapshotManager +from aleph.vm.controllers.interface import AlephVmControllerInterface +from aleph.vm.controllers.qemu.instance import AlephQemuInstance, AlephQemuResources +from aleph.vm.controllers.qemu_confidential.instance import ( + AlephQemuConfidentialInstance, + AlephQemuConfidentialResources, +) +from aleph.vm.network.interfaces import TapInterface +from aleph.vm.orchestrator.metrics import ( + ExecutionRecord, + delete_record, + save_execution_data, + save_record, +) +from aleph.vm.orchestrator.pubsub import PubSub +from aleph.vm.orchestrator.vm import AlephFirecrackerInstance +from aleph.vm.resources import GpuDevice, HostGPU +from aleph.vm.systemd import SystemDManager +from aleph.vm.utils import create_task_log_exceptions, dumps_for_json + +logger = logging.getLogger(__name__) + + +@dataclass +class VmExecutionTimes: + defined_at: datetime + preparing_at: datetime | None = None + prepared_at: datetime | None = None + starting_at: datetime | None = None + started_at: datetime | None = None + stopping_at: datetime | None = None + stopped_at: datetime | None = None + + def to_dict(self): + return self.__dict__ + + +class VmExecution: + """ + Control the execution of a VM on a high level. + + Implementation agnostic (Firecracker, maybe WASM in the future, ...). + """ + + uuid: uuid.UUID # Unique identifier of this execution + vm_hash: ItemHash + original: ExecutableContent + message: ExecutableContent + resources: ( + AlephProgramResources | AlephInstanceResources | AlephQemuResources | AlephQemuConfidentialInstance | None + ) = None + vm: AlephFirecrackerExecutable | AlephQemuInstance | AlephQemuConfidentialInstance | None = None + gpus: List[HostGPU] = [] + + times: VmExecutionTimes + + ready_event: asyncio.Event + concurrent_runs: int + runs_done_event: asyncio.Event + stop_pending_lock: asyncio.Lock + stop_event: asyncio.Event + expire_task: asyncio.Task | None = None + update_task: asyncio.Task | None = None + + snapshot_manager: SnapshotManager | None + systemd_manager: SystemDManager | None + + persistent: bool = False + + @property + def is_running(self) -> bool: + return ( + self.systemd_manager.is_service_active(self.controller_service) + if self.persistent and self.systemd_manager + else bool(self.times.starting_at and not self.times.stopping_at) + ) + + @property + def is_stopping(self) -> bool: + return bool(self.times.stopping_at and not self.times.stopped_at) + + @property + def is_program(self) -> bool: + return isinstance(self.message, ProgramContent) + + @property + def is_instance(self) -> bool: + return isinstance(self.message, InstanceContent) + + @property + def is_confidential(self) -> bool: + # FunctionEnvironment has no trusted_execution + return True if getattr(self.message.environment, "trusted_execution", None) else False + + @property + def hypervisor(self) -> HypervisorType: + if self.is_program: + return HypervisorType.firecracker + + # Hypervisor setting is only used for instances + return self.message.environment.hypervisor or settings.INSTANCE_DEFAULT_HYPERVISOR + + @property + def becomes_ready(self) -> Callable[[], Coroutine]: + return self.ready_event.wait + + @property + def vm_id(self) -> int | None: + return self.vm.vm_id if self.vm else None + + @property + def controller_service(self) -> str: + return f"aleph-vm-controller@{self.vm_hash}.service" + + @property + def uses_payment_stream(self) -> bool: + return self.message.payment and self.message.payment.is_stream + + @property + def has_resources(self) -> bool: + assert self.vm, "The VM attribute has to be set before calling has_resources()" + if isinstance(self.vm, AlephFirecrackerExecutable): + assert self.hypervisor == HypervisorType.firecracker + return self.vm.resources_path.exists() + else: + return True + + def __repr__(self): + return f"" + + def __init__( + self, + vm_hash: ItemHash, + message: ExecutableContent, + original: ExecutableContent, + snapshot_manager: SnapshotManager | None, + systemd_manager: SystemDManager | None, + persistent: bool, + ): + self.uuid = uuid.uuid1() # uuid1() includes the hardware address and timestamp + self.vm_hash = vm_hash + self.message = message + self.original = original + self.times = VmExecutionTimes(defined_at=datetime.now(tz=timezone.utc)) + self.ready_event = asyncio.Event() + self.concurrent_runs = 0 + self.runs_done_event = asyncio.Event() + self.stop_event = asyncio.Event() # triggered when the VM is stopped + self.preparation_pending_lock = asyncio.Lock() + self.stop_pending_lock = asyncio.Lock() + self.snapshot_manager = snapshot_manager + self.systemd_manager = systemd_manager + self.persistent = persistent + + def to_dict(self) -> dict: + return { + "is_running": self.is_running, + **self.__dict__, + } + + def to_json(self, indent: int | None = None) -> str: + return dumps_for_json(self.to_dict(), indent=indent) + + async def prepare(self) -> None: + """Download VM required files""" + async with self.preparation_pending_lock: + if self.resources: + # Already prepared + return + + self.times.preparing_at = datetime.now(tz=timezone.utc) + resources: ( + AlephProgramResources | AlephInstanceResources | AlephQemuResources | AlephQemuConfidentialInstance + ) + if self.is_program: + resources = AlephProgramResources(self.message, namespace=self.vm_hash) + elif self.is_instance: + if self.hypervisor == HypervisorType.firecracker: + resources = AlephInstanceResources(self.message, namespace=self.vm_hash) + elif self.hypervisor == HypervisorType.qemu: + if self.is_confidential: + resources = AlephQemuConfidentialResources(self.message, namespace=self.vm_hash) + else: + resources = AlephQemuResources(self.message, namespace=self.vm_hash) + resources.gpus = self.gpus + else: + msg = f"Unknown hypervisor type {self.hypervisor}" + raise ValueError(msg) + else: + msg = "Unknown executable message type" + raise ValueError(msg) + + if not resources: + msg = "Unknown executable message type" + raise ValueError(msg, repr(self.message)) + await resources.download_all() + self.times.prepared_at = datetime.now(tz=timezone.utc) + self.resources = resources + + def prepare_gpus(self, available_gpus: List[GpuDevice]) -> None: + gpus = [] + if self.message.requirements and self.message.requirements.gpu: + for gpu in self.message.requirements.gpu: + gpu = GpuProperties.parse_obj(gpu) + for available_gpu in available_gpus: + if available_gpu.device_id == gpu.device_id: + gpus.append(HostGPU(pci_host=available_gpu.pci_host)) + break + self.gpus = gpus + + def uses_gpu(self, pci_host: str) -> bool: + for gpu in self.gpus: + if gpu.pci_host == pci_host: + return True + + return False + + def create( + self, vm_id: int, tap_interface: TapInterface | None = None, prepare: bool = True + ) -> AlephVmControllerInterface: + if not self.resources: + msg = "Execution resources must be configured first" + raise ValueError(msg) + + vm: AlephVmControllerInterface + if self.is_program: + assert isinstance(self.resources, AlephProgramResources) + self.vm = vm = AlephFirecrackerProgram( + vm_id=vm_id, + vm_hash=self.vm_hash, + resources=self.resources, + enable_networking=self.message.environment.internet, + hardware_resources=self.message.resources, + tap_interface=tap_interface, + persistent=self.persistent, + prepare_jailer=prepare, + ) + elif self.is_instance: + if self.hypervisor == HypervisorType.firecracker: + assert isinstance(self.resources, AlephInstanceResources) + self.vm = vm = AlephFirecrackerInstance( + vm_id=vm_id, + vm_hash=self.vm_hash, + resources=self.resources, + enable_networking=self.message.environment.internet, + hardware_resources=self.message.resources, + tap_interface=tap_interface, + prepare_jailer=prepare, + ) + elif self.hypervisor == HypervisorType.qemu: + if self.is_confidential: + assert isinstance(self.resources, AlephQemuConfidentialResources) + self.vm = vm = AlephQemuConfidentialInstance( + vm_id=vm_id, + vm_hash=self.vm_hash, + resources=self.resources, + enable_networking=self.message.environment.internet, + hardware_resources=self.message.resources, + tap_interface=tap_interface, + ) + else: + assert isinstance(self.resources, AlephQemuResources) + self.vm = vm = AlephQemuInstance( + vm_id=vm_id, + vm_hash=self.vm_hash, + resources=self.resources, + enable_networking=self.message.environment.internet, + hardware_resources=self.message.resources, + tap_interface=tap_interface, + ) + else: + msg = "Unknown VM" + raise Exception(msg) + else: + msg = "Unknown VM" + raise Exception(msg) + + return vm + + async def start(self): + assert self.vm, "The VM attribute has to be set before calling start()" + + self.times.starting_at = datetime.now(tz=timezone.utc) + + try: + await self.vm.setup() + # Avoid VM start() method because it's only for ephemeral programs, + # for persistent and instances we will use SystemD manager + if not self.persistent: + await self.vm.start() + await self.vm.configure() + await self.vm.start_guest_api() + self.times.started_at = datetime.now(tz=timezone.utc) + self.ready_event.set() + await self.save() + except Exception: + await self.vm.teardown() + raise + + async def wait_for_init(self): + assert self.vm, "The VM attribute has to be set before calling wait_for_init()" + await self.vm.wait_for_init() + + def stop_after_timeout(self, timeout: float = 5.0) -> Task | None: + if self.persistent: + logger.debug("VM marked as long running. Ignoring timeout.") + return None + + if self.expire_task: + logger.debug("VM already has a timeout. Extending it.") + self.expire_task.cancel() + + vm_id: str = str(self.vm.vm_id if self.vm else None) + self.expire_task = create_task_log_exceptions(self.expire(timeout), name=f"expire {vm_id}") + return self.expire_task + + async def expire(self, timeout: float) -> None: + """Coroutine that will stop the VM after 'timeout' seconds.""" + await asyncio.sleep(timeout) + assert self.times.started_at + if self.times.stopping_at or self.times.stopped_at: + return + await self.stop() + + def cancel_expiration(self) -> bool: + if self.expire_task: + self.expire_task.cancel() + return True + else: + return False + + def cancel_update(self) -> bool: + if self.update_task: + self.update_task.cancel() + return True + else: + return False + + async def stop(self) -> None: + """Stop the VM and release resources""" + assert self.vm, "The VM attribute has to be set before calling stop()" + + # Prevent concurrent calls to stop() using a Lock + async with self.stop_pending_lock: + if self.times.stopped_at is not None: + logger.debug(f"VM={self.vm.vm_id} already stopped") + return + self.times.stopping_at = datetime.now(tz=timezone.utc) + await self.all_runs_complete() + await self.record_usage() + await self.vm.teardown() + self.times.stopped_at = datetime.now(tz=timezone.utc) + self.cancel_expiration() + self.cancel_update() + + if self.vm.support_snapshot and self.snapshot_manager: + await self.snapshot_manager.stop_for(self.vm_hash) + self.stop_event.set() + + def start_watching_for_updates(self, pubsub: PubSub): + if not self.update_task: + self.update_task = create_task_log_exceptions(self.watch_for_updates(pubsub=pubsub)) + + async def watch_for_updates(self, pubsub: PubSub): + if self.is_instance: + await pubsub.msubscribe( + *(volume.ref for volume in (self.original.volumes or []) if hasattr(volume, "ref")), + ) + else: + await pubsub.msubscribe( + self.original.code.ref, + self.original.runtime.ref, + self.original.data.ref if self.original.data else None, + *(volume.ref for volume in (self.original.volumes or []) if hasattr(volume, "ref")), + ) + logger.debug("Update received, stopping VM...") + await self.stop() + + async def all_runs_complete(self): + """Wait for all runs to complete. Used in self.stop() to prevent interrupting a request.""" + if self.concurrent_runs == 0: + logger.debug("Stop: clear, no run at the moment") + return + else: + logger.debug("Stop: waiting for runs to complete...") + await self.runs_done_event.wait() + + async def save(self): + assert self.vm, "The VM attribute has to be set before calling save()" + + pid_info = self.vm.to_dict() if self.vm else None + # Handle cases when the process cannot be accessed + if not self.persistent and pid_info and pid_info.get("process"): + await save_record( + ExecutionRecord( + uuid=str(self.uuid), + vm_hash=self.vm_hash, + vm_id=self.vm_id, + time_defined=self.times.defined_at, + time_prepared=self.times.prepared_at, + time_started=self.times.started_at, + time_stopping=self.times.stopping_at, + cpu_time_user=pid_info["process"]["cpu_times"].user, + cpu_time_system=pid_info["process"]["cpu_times"].system, + io_read_count=pid_info["process"]["io_counters"][0], + io_write_count=pid_info["process"]["io_counters"][1], + io_read_bytes=pid_info["process"]["io_counters"][2], + io_write_bytes=pid_info["process"]["io_counters"][3], + vcpus=self.vm.hardware_resources.vcpus, + memory=self.vm.hardware_resources.memory, + network_tap=self.vm.tap_interface.device_name if self.vm.tap_interface else "", + message=self.message.json(), + original_message=self.original.json(), + persistent=self.persistent, + ) + ) + else: + # The process cannot be accessed, or it's a persistent VM. + await save_record( + ExecutionRecord( + uuid=str(self.uuid), + vm_hash=self.vm_hash, + vm_id=self.vm_id, + time_defined=self.times.defined_at, + time_prepared=self.times.prepared_at, + time_started=self.times.started_at, + time_stopping=self.times.stopping_at, + cpu_time_user=None, + cpu_time_system=None, + io_read_count=None, + io_write_count=None, + io_read_bytes=None, + io_write_bytes=None, + vcpus=self.vm.hardware_resources.vcpus, + memory=self.vm.hardware_resources.memory, + message=self.message.json(), + original_message=self.original.json(), + persistent=self.persistent, + gpus=json.dumps(self.gpus, default=pydantic_encoder), + ) + ) + + async def record_usage(self): + await delete_record(execution_uuid=str(self.uuid)) + if settings.EXECUTION_LOG_ENABLED: + await save_execution_data(execution_uuid=self.uuid, execution_data=self.to_json()) + + async def run_code(self, scope: dict | None = None) -> bytes: + if not self.vm: + msg = "The VM has not been created yet" + raise ValueError(msg) + + if not self.is_program: + msg = "Code can ony be run on programs" + raise ValueError(msg) + + assert isinstance(self.vm, AlephFirecrackerProgram) + + self.concurrent_runs += 1 + self.runs_done_event.clear() + try: + return await self.vm.run_code(scope=scope) + finally: + self.concurrent_runs -= 1 + if self.concurrent_runs == 0: + self.runs_done_event.set() diff --git a/src/aleph/vm/network/__init__.py b/src/aleph/vm/network/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/network/firewall.py b/src/aleph/vm/network/firewall.py new file mode 100644 index 000000000..fb91d7288 --- /dev/null +++ b/src/aleph/vm/network/firewall.py @@ -0,0 +1,390 @@ +import json +import logging +from functools import lru_cache + +from nftables import Nftables + +from aleph.vm.conf import settings + +from .interfaces import TapInterface + +logger = logging.getLogger(__name__) + + +@lru_cache +def get_customized_nftables() -> Nftables: + nft = Nftables() + nft.set_json_output(True) + nft.set_stateless_output(True) + nft.set_service_output(False) + nft.set_reversedns_output(False) + nft.set_numeric_proto_output(True) + return nft + + +def execute_json_nft_commands(commands: list[dict]) -> int: + """Executes a list of nftables commands, and returns the exit status""" + nft = get_customized_nftables() + commands_dict = {"nftables": commands} + try: + logger.debug("Validating nftables rules") + nft.json_validate(commands_dict) + except Exception as e: + logger.error(f"Failed to verify nftables rules: {e}") + + logger.debug("Inserting nftables rules") + return_code, output, error = nft.json_cmd(commands_dict) + if return_code != 0: + logger.error(f"Failed to add nftables rules: {error}") + + return return_code + + +def get_existing_nftables_ruleset() -> dict: + """Retrieves the full nftables ruleset and returns it""" + nft = get_customized_nftables() + return_code, output, error = nft.cmd("list ruleset") + + if return_code != 0: + logger.error(f"Unable to get nftables ruleset: {error}") + return {"nftables": []} + + nft_ruleset = json.loads(output) + return nft_ruleset + + +def get_base_chains_for_hook(hook: str, family: str = "ip") -> list: + """Looks through the nftables ruleset and creates a list of + all chains that are base chains for the specified hook""" + nft_ruleset = get_existing_nftables_ruleset() + chains = [] + + for entry in nft_ruleset["nftables"]: + if ( + not isinstance(entry, dict) + or "chain" not in entry + or "family" not in entry["chain"] + or entry["chain"]["family"] != family + or "hook" not in entry["chain"] + or entry["chain"]["hook"] != hook + ): + # Ignoring all entries that are not a base chain. + continue + + chains.append(entry) + + return chains + + +def get_table_for_hook(hook: str, family: str = "ip") -> str: + chains = get_base_chains_for_hook(hook, family) + table = chains.pop()["chain"]["table"] + return table + + +def check_if_table_exists(family: str, table: str) -> bool: + """Checks whether the specified table exists in the nftables ruleset""" + nft_ruleset = get_existing_nftables_ruleset() + for entry in nft_ruleset["nftables"]: + if ( + isinstance(entry, dict) + and "table" in entry + # Key "family" was reported by users as not always present, so we use .get() instead of []. + and entry.get("family") == family + and entry.get("name") == table + ): + return True + return False + + +def initialize_nftables() -> None: + """Creates basic chains and rules in the nftables ruleset to build on further. + Additionally, stores some information in the class for later use.""" + commands: list[dict] = [] + base_chains: dict[str, dict[str, str]] = { + "postrouting": {}, + "forward": {}, + } + for hook in base_chains: + chains = get_base_chains_for_hook(hook) + if len(chains) == 0: + table = "nat" if hook == "postrouting" else "filter" + chain = "POSTROUTING" if hook == "postrouting" else "FORWARD" + prio = 100 if hook == "postrouting" else 0 + if not check_if_table_exists("ip", table): + commands.append({"add": {"table": {"family": "ip", "name": table}}}) + new_chain = { + "chain": { + "family": "ip", + "table": table, + "name": chain, + "type": table, + "hook": hook, + "prio": prio, + } + } + commands.append({"add": new_chain}) + chains.append(new_chain) + elif len(chains) > 1: + msg = f"Multiple base chains for an nftables basechain are not supported: {hook}" + raise NotImplementedError(msg) + base_chains[hook] = chains.pop()["chain"] + + commands.append( + _make_add_chain_command( + "ip", + base_chains["postrouting"]["table"], + f"{settings.NFTABLES_CHAIN_PREFIX}-supervisor-nat", + ) + ) + commands.append( + { + "add": { + "rule": { + "family": "ip", + "table": base_chains["postrouting"]["table"], + "chain": base_chains["postrouting"]["name"], + "expr": [{"jump": {"target": f"{settings.NFTABLES_CHAIN_PREFIX}-supervisor-nat"}}], + } + } + } + ) + + commands.append( + _make_add_chain_command( + "ip", + base_chains["forward"]["table"], + f"{settings.NFTABLES_CHAIN_PREFIX}-supervisor-filter", + ) + ) + commands.append( + { + "add": { + "rule": { + "family": "ip", + "table": base_chains["forward"]["table"], + "chain": base_chains["forward"]["name"], + "expr": [{"jump": {"target": f"{settings.NFTABLES_CHAIN_PREFIX}-supervisor-filter"}}], + } + } + } + ) + commands.append( + { + "add": { + "rule": { + "family": "ip", + "table": base_chains["forward"]["table"], + "chain": f"{settings.NFTABLES_CHAIN_PREFIX}-supervisor-filter", + "expr": [ + { + "match": { + "op": "in", + "left": {"ct": {"key": "state"}}, + "right": ["related", "established"], + } + }, + {"accept": None}, + ], + } + } + } + ) + + execute_json_nft_commands(commands) + + +def teardown_nftables() -> None: + """Removes all of this project's related rules in the nftables ruleset.""" + logger.debug("Tearing down nftables setup") + remove_chain(f"{settings.NFTABLES_CHAIN_PREFIX}-supervisor-nat") + remove_chain(f"{settings.NFTABLES_CHAIN_PREFIX}-supervisor-filter") + + +def add_chain(family: str, table: str, name: str) -> int: + """Helper function to quickly create a new chain in the nftables ruleset + Returns the exit code from executing the nftables commands""" + commands = [_make_add_chain_command(family, table, name)] + return execute_json_nft_commands(commands) + + +def _make_add_chain_command(family: str, table: str, name: str) -> dict: + return { + "add": { + "chain": { + "family": family, + "table": table, + "name": name, + } + } + } + + +def remove_chain(name: str) -> int: + """Removes all rules that jump to the chain, and then removes the chain itself. + Returns the exit code from executing the nftables commands""" + nft_ruleset = get_existing_nftables_ruleset() + commands = [] + remove_chain_commands = [] + + for entry in nft_ruleset["nftables"]: + if ( + isinstance(entry, dict) + and "rule" in entry + and "expr" in entry["rule"] + and "jump" in entry["rule"]["expr"][0] + and entry["rule"]["expr"][0]["jump"]["target"] == name + ): + commands.append( + { + "delete": { + "rule": { + "family": entry["rule"]["family"], + "table": entry["rule"]["table"], + "chain": entry["rule"]["chain"], + "handle": entry["rule"]["handle"], + } + } + } + ) + elif isinstance(entry, dict) and "chain" in entry and entry["chain"]["name"] == name: + remove_chain_commands.append( + { + "delete": { + "chain": { + "family": entry["chain"]["family"], + "table": entry["chain"]["table"], + "name": entry["chain"]["name"], + } + } + } + ) + + commands += remove_chain_commands + return execute_json_nft_commands(commands) + + +def add_postrouting_chain(name: str) -> int: + """Adds a chain and creates a rule from the base chain with the postrouting hook. + Returns the exit code from executing the nftables commands""" + table = get_table_for_hook("postrouting") + add_chain("ip", table, name) + command = [ + { + "add": { + "rule": { + "family": "ip", + "table": table, + "chain": f"{settings.NFTABLES_CHAIN_PREFIX}-supervisor-nat", + "expr": [{"jump": {"target": name}}], + } + } + } + ] + return execute_json_nft_commands(command) + + +def add_forward_chain(name: str) -> int: + """Adds a chain and creates a rule from the base chain with the forward hook. + Returns the exit code from executing the nftables commands""" + table = get_table_for_hook("forward") + add_chain("ip", table, name) + command = [ + { + "add": { + "rule": { + "family": "ip", + "table": table, + "chain": f"{settings.NFTABLES_CHAIN_PREFIX}-supervisor-filter", + "expr": [{"jump": {"target": name}}], + } + } + } + ] + return execute_json_nft_commands(command) + + +def add_masquerading_rule(vm_id: int, interface: TapInterface) -> int: + """Creates a rule for the VM with the specified id to allow outbound traffic to be masqueraded (NAT) + Returns the exit code from executing the nftables commands""" + table = get_table_for_hook("postrouting") + command = [ + { + "add": { + "rule": { + "family": "ip", + "table": table, + "chain": f"{settings.NFTABLES_CHAIN_PREFIX}-vm-nat-{vm_id}", + "expr": [ + { + "match": { + "op": "==", + "left": {"meta": {"key": "iifname"}}, + "right": interface.device_name, + } + }, + { + "match": { + "op": "==", + "left": {"meta": {"key": "oifname"}}, + "right": settings.NETWORK_INTERFACE, + } + }, + {"masquerade": None}, + ], + } + } + } + ] + + return execute_json_nft_commands(command) + + +def add_forward_rule_to_external(vm_id: int, interface: TapInterface) -> int: + """Creates a rule for the VM with the specified id to allow outbound traffic + Returns the exit code from executing the nftables commands""" + table = get_table_for_hook("forward") + command = [ + { + "add": { + "rule": { + "family": "ip", + "table": table, + "chain": f"{settings.NFTABLES_CHAIN_PREFIX}-vm-filter-{vm_id}", + "expr": [ + { + "match": { + "op": "==", + "left": {"meta": {"key": "iifname"}}, + "right": interface.device_name, + } + }, + { + "match": { + "op": "==", + "left": {"meta": {"key": "oifname"}}, + "right": settings.NETWORK_INTERFACE, + } + }, + {"accept": None}, + ], + } + } + } + ] + + return execute_json_nft_commands(command) + + +def setup_nftables_for_vm(vm_id: int, interface: TapInterface) -> None: + """Sets up chains for filter and nat purposes specific to this VM, and makes sure those chains are jumped to""" + add_postrouting_chain(f"{settings.NFTABLES_CHAIN_PREFIX}-vm-nat-{vm_id}") + add_forward_chain(f"{settings.NFTABLES_CHAIN_PREFIX}-vm-filter-{vm_id}") + add_masquerading_rule(vm_id, interface) + add_forward_rule_to_external(vm_id, interface) + + +def teardown_nftables_for_vm(vm_id: int) -> None: + """Remove all nftables rules related to the specified VM""" + remove_chain(f"{settings.NFTABLES_CHAIN_PREFIX}-vm-nat-{vm_id}") + remove_chain(f"{settings.NFTABLES_CHAIN_PREFIX}-vm-filter-{vm_id}") diff --git a/src/aleph/vm/network/hostnetwork.py b/src/aleph/vm/network/hostnetwork.py new file mode 100644 index 000000000..e9fcaa794 --- /dev/null +++ b/src/aleph/vm/network/hostnetwork.py @@ -0,0 +1,228 @@ +import logging +from ipaddress import IPv6Network +from pathlib import Path +from typing import Protocol + +import pyroute2 +from aleph_message.models import ItemHash + +from aleph.vm.conf import IPv6AllocationPolicy +from aleph.vm.vm_type import VmType + +from .firewall import initialize_nftables, setup_nftables_for_vm, teardown_nftables +from .interfaces import TapInterface +from .ipaddresses import IPv4NetworkWithInterfaces +from .ndp_proxy import NdpProxy + +logger = logging.getLogger(__name__) + + +def _read_file_as_int(config_file: Path) -> int: + return int(config_file.read_text()) + + +def get_ipv4_forwarding_state() -> int: + """Reads the current IPv4 forwarding setting from the host, converts it to int and returns it""" + return _read_file_as_int(Path("/proc/sys/net/ipv4/ip_forward")) + + +def get_ipv6_forwarding_state() -> int: + """Reads the current IPv6 forwarding setting from the host, converts it to int and returns it""" + return _read_file_as_int(Path("/proc/sys/net/ipv6/conf/all/forwarding")) + + +class IPv6Allocator(Protocol): + def allocate_vm_ipv6_subnet(self, vm_id: int, vm_hash: ItemHash, vm_type: VmType) -> IPv6Network: ... + + +class StaticIPv6Allocator(IPv6Allocator): + """ + Static IPv6 allocator. + Computes IPv6 addresses based on the machine type and VM hash. The algorithm works + as follows: + + | Component | CRN /64 range | VM type | Item hash prefix | Instance range | + |-----------|---------------|---------|------------------|----------------| + | Length | 64 bits | 16 bits | 44 bits | 4 bits | + """ + + VM_TYPE_PREFIX = { + VmType.microvm: "1", + VmType.persistent_program: "2", + VmType.instance: "3", + } + + def __init__(self, ipv6_range: IPv6Network, subnet_prefix: int): + if ipv6_range.prefixlen not in (56, 64): + msg = "The static IP address allocation scheme requires a /64 or /56 subnet" + raise ValueError(msg) + if subnet_prefix < 124: + msg = "The IPv6 subnet prefix cannot be larger than /124." + raise ValueError(msg) + + self.ipv6_range = ipv6_range + self.subnet_prefix = subnet_prefix + + def allocate_vm_ipv6_subnet(self, vm_id: int, vm_hash: ItemHash, vm_type: VmType) -> IPv6Network: + ipv6_elems = self.ipv6_range.exploded.split(":")[:4] + ipv6_elems += [self.VM_TYPE_PREFIX[vm_type]] + + # Add the item hash of the VM as the last 44 bits of the IPv6 address. + # The last 4 bits are left for use to the VM owner as a /124 subnet. + ipv6_elems += [vm_hash[0:4], vm_hash[4:8], vm_hash[8:11] + "0"] + + return IPv6Network(":".join(ipv6_elems) + "/124") + + +class DynamicIPv6Allocator(IPv6Allocator): + """ + A dynamic allocator, for testing purposes. + This allocator slices the input IPv6 address range in subnets of the same size + and iterates through them. The first subnet is assumed to be reserved for use by the host, + as we use this allocator in situations where the address range is small and the host + subnet cannot be isolated from the VM subnets (ex: /124 network on Digital Ocean for the CI). + """ + + def __init__(self, ipv6_range: IPv6Network, subnet_prefix: int): + self.ipv6_range = ipv6_range + self.vm_subnet_prefix = subnet_prefix + + self.subnets_generator = ipv6_range.subnets(new_prefix=subnet_prefix) + # Assume the first subnet is reserved for the host + _ = next(self.subnets_generator) + + def allocate_vm_ipv6_subnet(self, vm_id: int, vm_hash: ItemHash, vm_type: VmType) -> IPv6Network: + return next(self.subnets_generator) + + +def make_ipv6_allocator( + allocation_policy: IPv6AllocationPolicy, address_pool: str, subnet_prefix: int +) -> IPv6Allocator: + if allocation_policy == IPv6AllocationPolicy.static: + return StaticIPv6Allocator(ipv6_range=IPv6Network(address_pool), subnet_prefix=subnet_prefix) + + return DynamicIPv6Allocator(ipv6_range=IPv6Network(address_pool), subnet_prefix=subnet_prefix) + + +class Network: + ipv4_forward_state_before_setup: int | None = None + ipv6_forward_state_before_setup: int | None = None + external_interface: str + ipv4_forwarding_enabled: bool + ipv6_forwarding_enabled: bool + use_ndp_proxy: bool + ipv4_address_pool: IPv4NetworkWithInterfaces = IPv4NetworkWithInterfaces("172.16.0.0/12") + ipv6_address_pool: IPv6Network + network_size: int + ndp_proxy: NdpProxy | None = None + + IPV6_SUBNET_PREFIX: int = 124 + + def __init__( + self, + vm_ipv4_address_pool_range: str, + vm_network_size: int, + external_interface: str, + ipv6_allocator: IPv6Allocator, + use_ndp_proxy: bool, + ipv4_forwarding_enabled: bool = True, + ipv6_forwarding_enabled: bool = True, + ) -> None: + """Initialize the Network class with the relevant configuration.""" + self.ipv4_address_pool = IPv4NetworkWithInterfaces(vm_ipv4_address_pool_range) + self.ipv6_allocator = ipv6_allocator + + self.network_size = vm_network_size + self.external_interface = external_interface + self.ipv4_forwarding_enabled = ipv4_forwarding_enabled + self.ipv6_forwarding_enabled = ipv6_forwarding_enabled + self.use_ndp_proxy = use_ndp_proxy + self.ndb = pyroute2.NDB() + + if not self.ipv4_address_pool.is_private: + logger.warning(f"Using a network range that is not private: {self.ipv4_address_pool}") + + def setup(self) -> None: + """Set up the network for use by the VMs""" + logger.debug("Enabling IPv4 forwarding") + if self.ipv4_forwarding_enabled: + self.enable_ipv4_forwarding() + else: + logger.warning("IPv4 forwarding is disabled, VMs will not have internet access on IPv4") + logger.debug("Enabling IPv6 forwarding") + if self.ipv6_forwarding_enabled: + self.enable_ipv6_forwarding() + else: + logger.warning("IPv6 forwarding is disabled, VMs will not have internet access on IPv6") + + logger.debug("Enabling NDP proxy") + if self.use_ndp_proxy: + self.ndp_proxy = NdpProxy(host_network_interface=self.external_interface) + + logger.debug("Initializing nftables") + initialize_nftables() + logger.debug("Network setup complete") + + def get_network_for_tap(self, vm_id: int) -> IPv4NetworkWithInterfaces: + subnets = list(self.ipv4_address_pool.subnets(new_prefix=self.network_size)) + return subnets[vm_id] + + def enable_ipv4_forwarding(self) -> None: + """Saves the hosts IPv4 forwarding state, and if it was disabled, enables it""" + logger.debug("Enabling IPv4 forwarding") + self.ipv4_forward_state_before_setup = get_ipv4_forwarding_state() + if not self.ipv4_forward_state_before_setup: + Path("/proc/sys/net/ipv4/ip_forward").write_text("1") + + def reset_ipv4_forwarding_state(self) -> None: + """Returns the hosts IPv4 forwarding state how it was before we enabled it""" + logger.debug("Resetting IPv4 forwarding state to state before we enabled it") + if self.ipv4_forward_state_before_setup is None: + return + + if self.ipv4_forward_state_before_setup != get_ipv4_forwarding_state(): + Path("/proc/sys/net/ipv4/ip_forward").write_text(str(self.ipv4_forward_state_before_setup)) + + def enable_ipv6_forwarding(self) -> None: + """Saves the host IPv6 forwarding state, and if it was disabled, enables it""" + logger.debug("Enabling IPv6 forwarding") + self.ipv6_forward_state_before_setup = get_ipv6_forwarding_state() + if not self.ipv6_forward_state_before_setup: + Path("/proc/sys/net/ipv6/conf/all/forwarding").write_text("1") + + def reset_ipv6_forwarding_state(self) -> None: + """Returns the host IPv6 forwarding state how it was before we enabled it""" + logger.debug("Resetting IPv6 forwarding state to state before we enabled it") + if self.ipv6_forward_state_before_setup is None: + return + + if self.ipv6_forward_state_before_setup != get_ipv6_forwarding_state(): + Path("/proc/sys/net/ipv6/conf/all/forwarding").write_text(str(self.ipv6_forward_state_before_setup)) + + def teardown(self) -> None: + teardown_nftables() + self.reset_ipv4_forwarding_state() + self.reset_ipv6_forwarding_state() + + async def prepare_tap(self, vm_id: int, vm_hash: ItemHash, vm_type: VmType) -> TapInterface: + """Prepare TAP interface to be used by VM""" + interface = TapInterface( + f"vmtap{vm_id}", + ip_network=self.get_network_for_tap(vm_id), + ipv6_network=self.ipv6_allocator.allocate_vm_ipv6_subnet( + vm_id=vm_id, + vm_hash=vm_hash, + vm_type=vm_type, + ), + ndp_proxy=self.ndp_proxy, + ) + return interface + + async def create_tap(self, vm_id: int, interface: TapInterface): + """Create TAP interface to be used by VM""" + await interface.create() + setup_nftables_for_vm(vm_id, interface) + + def interface_exists(self, vm_id: int): + interface_name = f"vmtap{vm_id}" + return self.ndb.interfaces.exists(interface_name) diff --git a/src/aleph/vm/network/interfaces.py b/src/aleph/vm/network/interfaces.py new file mode 100644 index 000000000..8c40f5eaa --- /dev/null +++ b/src/aleph/vm/network/interfaces.py @@ -0,0 +1,180 @@ +import asyncio +import errno +import logging +import shutil +from ipaddress import IPv4Interface, IPv6Interface, IPv6Network + +from pyroute2 import IPRoute, NetlinkError + +from .ipaddresses import IPv4NetworkWithInterfaces +from .ndp_proxy import NdpProxy + +logger = logging.getLogger(__name__) + + +class MissingInterfaceError(Exception): + """The interface is missing.""" + + pass + + +class InterfaceBusyError(Exception): + """The interface is busy.""" + + pass + + +def create_tap_interface(ipr: IPRoute, device_name: str): + """Create a TAP interface with the given name. If the interface already exists, which should not happen, a warning + is logged and the function returns without error.""" + try: + ipr.link("add", ifname=device_name, kind="tuntap", mode="tap") + except NetlinkError as error: + if error.code == 17: + logger.warning(f"Interface {device_name} already exists") + elif error.code == 16: + logger.warning(f"Interface {device_name} is busy - is there another process using it ?") + else: + logger.error(f"Unknown exception while creating interface {device_name}: {error}") + except OSError as error: + if error.errno == errno.EBUSY: + logger.warning(f"Interface {device_name} is busy - is there another process using it ?") + else: + logger.error(f"Unknown exception while creating interface {device_name}: {error}") + + +def add_ip_address(ipr: IPRoute, device_name: str, ip: IPv4Interface | IPv6Interface): + """Add an IP address to the given interface. If the address already exists, a warning is logged and the function + returns without error.""" + interface_index: list[int] = ipr.link_lookup(ifname=device_name) + if not interface_index: + msg = f"Interface {device_name} does not exist, can't add address {ip} to it." + raise MissingInterfaceError(msg) + try: + ipr.addr("add", index=interface_index[0], address=str(ip.ip), mask=ip.network.prefixlen) + except NetlinkError as e: + if e.code == 17: + logger.warning(f"Address {ip} already exists") + else: + logger.error(f"Unknown exception while adding address {ip} to interface {device_name}: {e}") + except OSError as e: + logger.error(f"Unknown exception while adding address {ip} to interface {device_name}: {e}") + + +def delete_ip_address(ipr: IPRoute, device_name: str, ip: str, mask: int): + """Delete an IP address to the given interface.""" + interface_index: list[int] = ipr.link_lookup(ifname=device_name) + if not interface_index: + msg = f"Interface {device_name} does not exist, can't delete address {ip} to it." + raise MissingInterfaceError(msg) + try: + ipr.addr("del", index=interface_index[0], address=ip, mask=mask) + except NetlinkError as e: + logger.exception(f"Unknown exception while deleting address {ip}/{mask} to interface {device_name}: {e}") + except OSError as e: + logger.exception(f"Unknown exception while deleting address {ip}/{mask} to interface {device_name}: {e}") + + +def set_link_up(ipr: IPRoute, device_name: str): + """Set the given interface up.""" + interface_index: list[int] = ipr.link_lookup(ifname=device_name) + if not interface_index: + msg = f"Interface {device_name} does not exist, can't set it up." + raise MissingInterfaceError(msg) + try: + ipr.link("set", index=interface_index[0], state="up") + except NetlinkError as e: + logger.error(f"Unknown exception while setting link up to interface {device_name}: {e}") + except OSError as e: + logger.error(f"Unknown exception while setting link up to interface {device_name}: {e}") + + +def delete_tap_interface(ipr: IPRoute, device_name: str): + interface_index: list[int] = ipr.link_lookup(ifname=device_name) + if not interface_index: + logger.debug(f"Interface {device_name} does not exist, won't be deleted.") + return + try: + ipr.link("del", index=interface_index[0]) + except NetlinkError as error: + logger.warning(f"Interface {device_name} cannot be deleted: {error}") + except OSError as error: + logger.warning(f"Interface {device_name} cannot be deleted: {error}") + + +class TapInterface: + device_name: str + ip_network: IPv4NetworkWithInterfaces + ipv6_network: IPv6Network + + def __init__( + self, + device_name: str, + ip_network: IPv4NetworkWithInterfaces, + ipv6_network: IPv6Network, + ndp_proxy: NdpProxy | None, + ): + self.device_name: str = device_name + self.ip_network: IPv4NetworkWithInterfaces = ip_network + self.ipv6_network = ipv6_network + self.ndp_proxy = ndp_proxy + + @property + def guest_ip(self) -> IPv4Interface: + return self.ip_network[2] + + @property + def host_ip(self) -> IPv4Interface: + return self.ip_network[1] + + @property + def guest_ipv6(self) -> IPv6Interface: + return IPv6Interface(f"{self.ipv6_network[1]}/{self.ipv6_network.prefixlen}") + + @property + def host_ipv6(self) -> IPv6Interface: + return IPv6Interface(f"{self.ipv6_network[0]}/{self.ipv6_network.prefixlen}") + + def to_dict(self): + return { + "device": self.device_name, + "ipv4": str(self.ip_network), + "ipv6": str(self.ipv6_network), + } + + async def create(self): + logger.debug("Create network interface") + + ip_command = shutil.which("ip") + if not ip_command: + msg = "ip command not found" + raise FileNotFoundError(msg) + + ipv6_gateway = self.host_ipv6 + + with IPRoute() as ipr: + create_tap_interface(ipr, self.device_name) + add_ip_address(ipr, self.device_name, self.host_ip) + add_ip_address(ipr, self.device_name, self.host_ipv6) + set_link_up(ipr, self.device_name) + + if self.ndp_proxy: + await self.ndp_proxy.add_range(self.device_name, ipv6_gateway.network) + logger.debug(f"Network interface created: {self.device_name}") + + async def delete(self) -> None: + """Asks the firewall to teardown any rules for the VM with id provided. + Then removes the interface from the host.""" + logger.debug(f"Removing interface {self.device_name}") + await asyncio.sleep(0.1) # Avoids Device/Resource busy bug + if self.ndp_proxy: + await self.ndp_proxy.delete_range(self.device_name) + with IPRoute() as ipr: + interface_index: list[int] = ipr.link_lookup(ifname=self.device_name) + for addr in ipr.get_addr(index=interface_index): + # The order of attributes in the attrs field comes from the Netlink protocol + attrs = dict(addr["attrs"]) + ip_addr: str = attrs["IFA_ADDRESS"] + mask: int = addr["prefixlen"] + delete_ip_address(ipr, self.device_name, ip_addr, mask) + delete_tap_interface(ipr, self.device_name) diff --git a/src/aleph/vm/network/ipaddresses.py b/src/aleph/vm/network/ipaddresses.py new file mode 100644 index 000000000..aef2a431f --- /dev/null +++ b/src/aleph/vm/network/ipaddresses.py @@ -0,0 +1,25 @@ +from collections.abc import Iterator +from ipaddress import IPv4Interface, IPv4Network + + +class IPv4NetworkWithInterfaces(IPv4Network): + def hosts(self) -> Iterator[IPv4Interface]: + network = int(self.network_address) + broadcast = int(self.broadcast_address) + for x in range(network + 1, broadcast): + yield IPv4Interface((x, self.prefixlen)) + + def __getitem__(self, n) -> IPv4Interface: + network = int(self.network_address) + broadcast = int(self.broadcast_address) + if n >= 0: + if network + n > broadcast: + msg = "address out of range" + raise IndexError(msg) + return IPv4Interface((network + n, self.prefixlen)) + else: + n += 1 + if broadcast + n < network: + msg = "address out of range" + raise IndexError(msg) + return IPv4Interface((broadcast + n, self.prefixlen)) diff --git a/src/aleph/vm/network/ndp_proxy.py b/src/aleph/vm/network/ndp_proxy.py new file mode 100644 index 000000000..60e73b287 --- /dev/null +++ b/src/aleph/vm/network/ndp_proxy.py @@ -0,0 +1,63 @@ +""" +Neighbourhood Discovery Proxy (NDP) functionalities. + +Some cloud providers do not route the whole advertised IPv6 address range to servers, but instead +only route one address. They will issue NDP requests to the network to determine if the other +addresses in the range are used. This means that our server (be it the hypervisor or the VMs) +has to answer to these requests to make the VMs routable. + +To achieve this, we use ndppd. Each time an update is required, we overwrite /etc/ndppd.conf +and restart the service. +""" + +import logging +from dataclasses import dataclass +from ipaddress import IPv6Network +from pathlib import Path +from subprocess import CalledProcessError + +from aleph.vm.utils import run_in_subprocess + +logger = logging.getLogger(__name__) + + +@dataclass +class NdpRule: + address_range: IPv6Network + + +class NdpProxy: + def __init__(self, host_network_interface: str): + self.host_network_interface = host_network_interface + self.interface_address_range_mapping: dict[str, IPv6Network] = {} + + @staticmethod + async def _restart_ndppd(): + logger.debug("Restarting ndppd") + try: + await run_in_subprocess(["systemctl", "restart", "ndppd"]) + except CalledProcessError as error: + logger.error("Failed to restart ndppd: %s", error) + # We do not raise the error here, since this should not crash the entire system + + async def _update_ndppd_conf(self): + config = f"proxy {self.host_network_interface} {{\n" + for interface, address_range in self.interface_address_range_mapping.items(): + config += f" rule {address_range} {{\n iface {interface}\n }}\n" + config += "}\n" + Path("/etc/ndppd.conf").write_text(config) + await self._restart_ndppd() + + async def add_range(self, interface: str, address_range: IPv6Network): + logger.debug("Proxying range %s -> %s", address_range, interface) + self.interface_address_range_mapping[interface] = address_range + await self._update_ndppd_conf() + + async def delete_range(self, interface: str): + try: + address_range = self.interface_address_range_mapping.pop(interface) + logger.debug("Deactivated proxying for %s (%s)", interface, address_range) + except KeyError: + return + + await self._update_ndppd_conf() diff --git a/src/aleph/vm/orchestrator/INSTANCES.md b/src/aleph/vm/orchestrator/INSTANCES.md new file mode 100644 index 000000000..dbe07bfe2 --- /dev/null +++ b/src/aleph/vm/orchestrator/INSTANCES.md @@ -0,0 +1,44 @@ + +# Instance Messages + +Support of Instance message in the aleph-message repository was added in this PR: +https://github.com/aleph-im/aleph-message/pull/48 + +## Changes added + +### Aleph message repository + +I added a new type of message called `InstanceMessage`, with the changes that we designed for VM instances. +The content of this message is a new type called `InstanceContent`, that replaces the field `runtime` by `rootfs` that +instead be an Immutable volume becomes a Persistent volume and adds a new field inside called `parent`, that will be the +item hash of the base filesystem of the VM. We will create a .ext4 file with the size of the volume and **"attach"** to it +the base filesystem. + +Note that this filesystem should be in **.ext4** format, cannot be an **squashfs** +file, because we will map it as a block device inside the machine. + +Also, I added a union type for Instance messages and Program message called `ExecutableMessage` and also a new one called +`ExecutableContent` as union of Instance and program content types. + +### Aleph VM repository + +I have created a function called `create_devmapper` in _**vm_supervisor/storage.py**_. This method can create a +dev-mapper device base on the parent reference. I followed +[this](https://community.aleph.im/t/deploying-mutable-vm-instances-on-aleph/56/2) implementation. + +In the _**firecracker/microvm.py**_ file I added the `mount_rootfs` method to mount the block device in the case that we +use jailer and also assign correct permissions. And when the VM goes down, I clear all these configurations in the +`teardown` process. As link a block device in a chroot doesn't work I had to do a workaround that consists of copy all +the "dm-*" block devices in the chroot and mount the entire `/dev/mapper` folder in the chroot to make it work. I didn't +found a better solution to it. + +Also, I added support to run a writable root filesystem in Firecracker. I have bypassed all the parts that we pass and +use the **_"code"_** properties, like the encoding or the entrypoint. + +A new instance message example has been added in **_examples/instance_message_from_aleph.json_**. + +### Current status + +Now the Dev-mapper device works well, Firecracker loads it in write state, but we need to fix 2 things: +- Route the requests from the CRN to the Firecracker VM on any port, not only using the 8080. +- ~~- Use the entire hard disk inside VM, because now only detects the size of the rootfs.~~(Done) diff --git a/vm_supervisor/README.md b/src/aleph/vm/orchestrator/README.md similarity index 58% rename from vm_supervisor/README.md rename to src/aleph/vm/orchestrator/README.md index b481751d2..c1d22ea0f 100644 --- a/vm_supervisor/README.md +++ b/src/aleph/vm/orchestrator/README.md @@ -28,22 +28,25 @@ Intel Skylake, Intel Cascade Lake, AMD Zen2 and ARM64 Neoverse N1. ### Operating System -These instructions have been tested on Debian 10 Buster, and should work on recent versions -of Ubuntu as well (20.04+). +These instructions have been tested on Debian 11 Bullseye, Debian 12 Bookworm and Ubuntu 22.04. ### Hosting providers Bare metal servers from most hosting providers should be compatible with the VM Supervisor. A few hosting providers offer compatible virtual machines. -- Compatible ✓ : DigitalOcean Droplet. -- Incompatible ✖ : AWS EC2. +- Compatible ✓ : DigitalOcean Droplet. AWS ECS Bare Metal. +- Incompatible ✖ : AWS EC2 other than Bare Metal. + +Probably [Google Cloud instances with Nested Virtualization](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances). ### Note on containers While not supported at the moment, it is possible to run the VM Supervisor inside a Docker -container. This will be less secure since the `Jailer` tool used to secure Firecracker MicroVMs -will not run inside containers. Pass the command-lien argument `--no-jailer` to disable the Jailer +container. + +This will be less secure since the `Jailer` tool used to secure Firecracker MicroVMs +will not run inside containers. Pass the command-line argument `--no-jailer` to disable the Jailer when running the VM Supervisor. ## 2. Installation @@ -52,7 +55,8 @@ when running the VM Supervisor. ```shell apt update -apt install -y git python3 python3-aiohttp sudo acl curl systemd-container +apt install -y git python3 python3-aiohttp python3-msgpack python3-aiodns python3-sqlalchemy python3-setproctitle redis python3-aioredis \ + python3-psutil sudo acl curl systemd-container squashfs-tools debootstrap python3-nftables python3-jsonschema ndppd useradd jailman ``` @@ -60,15 +64,14 @@ useradd jailman from the [Firecracker project releases](https://github.com/firecracker-microvm/firecracker/releases): ```shell mkdir /opt/firecracker -chown $(whoami) /opt/firecracker -curl -fsSL https://github.com/firecracker-microvm/firecracker/releases/download/v0.24.2/firecracker-v0.24.2-x86_64.tgz | tar -xz --directory /opt/firecracker +curl -fsSL https://github.com/firecracker-microvm/firecracker/releases/download/v1.3.3/firecracker-v1.3.3-x86_64.tgz | tar -xz --no-same-owner --directory /opt/firecracker # Link binaries on version-agnostic paths: -ln /opt/firecracker/firecracker-v* /opt/firecracker/firecracker -ln /opt/firecracker/jailer-v* /opt/firecracker/jailer +ln /opt/firecracker/release-*/firecracker-v* /opt/firecracker/firecracker +ln /opt/firecracker/release-*/jailer-v* /opt/firecracker/jailer ``` -### 2.d. Clone this reposotiry on the host machine and enter it. +### 2.d. Clone this repository on the host machine and enter it. ```shell git clone https://github.com/aleph-im/aleph-vm.git @@ -80,28 +83,27 @@ cd aleph-vm/ [PyDantic](https://pydantic-docs.helpmanual.io/) is used to parse and validate Aleph messages. -Via `apt` if available (on Debian 11+, Ubuntu 20.04+): - -```shell -apt install -y python3-pydantic -``` - -else (on Debian 10): ```shell apt install -y --no-install-recommends --no-install-suggests python3-pip -pip3 install pydantic +pip3 install pydantic[dotenv] +pip3 install 'aleph-message==0.4.9' ``` -### 2.f. Setup the jailer working directory: +### 2.f. Create the jailer working directory: ```shell -mkdir /srv/jailer +mkdir -p /var/lib/aleph/vm/jailer ``` ### 2.g. Download a Linux kernel +This downloads an optimized kernel built by the Aleph team. + +A more optimized kernel may be made available in the future. +See section _Compile your kernel_ below to build your own. + ```shell -curl -fsSL -o ./kernels/vmlinux.bin https://s3.amazonaws.com/spec.ccfc.min/img/quickstart_guide/x86_64/kernels/vmlinux.bin +curl -fsSL -o ./target/vmlinux.bin https://ipfs.aleph.cloud/ipfs/bafybeiaj2lf6g573jiulzacvkyw4zzav7dwbo5qbeiohoduopwxs2c6vvy ``` ## 3. Running @@ -109,43 +111,53 @@ curl -fsSL -o ./kernels/vmlinux.bin https://s3.amazonaws.com/spec.ccfc.min/img/q Run the VM Supervisor with Python: ```shell export PYTHONPATH=$(pwd) -python3 -m vm_supervisor +python3 -m orchestrator ``` or in debug mode: ```shell -python3 -m vm_supervisor -vv --system-logs +python3 -m orchestrator -vv --system-logs ``` Test accessing the service on -http://localhost:8080/ +http://localhost:4020/ ## 4. Configuration -The VM Supervisor can be configured using command-line arguments: +The VM Supervisor can be configured using command-line arguments or using environment variables. + +List the available command-line arguments using: ```shell -python3 -m vm_supervisor --help +python3 -m orchestrator --help ``` -and using environment variables, which can be found using: + +List available using environment variables using: +```shell +python3 -m orchestrator --print-config --do-not-run +``` + +Configuration environment variables can be stored in a file named `.env` in the local directory. + +Example content for `.env`: ```shell -python3 -m vm_supervisor --print-config --do-not-run +ALEPH_VM_DNS_RESOLUTION=resolvectl +ALEPH_VM_NETWORK_INTERFACE=enp7s0 ``` -## 5. Production configuration +## 6. Production security concerns See advanced security related concerns here: https://github.com/firecracker-microvm/firecracker/blob/main/docs/prod-host-setup.md -## 6. Customization +## 7. Customization -### 6.a. Build a runtime +### 7.a. Build a runtime A runtime consist in the root filesystem used by a VM. Runtimes contain a customized init that allows the VM Supervisor to run functions within the MicroVM. -Official Aleph runtimes are built using scripts located in -in [`../runtimes`](../runtimes), and are distributed on the Aleph network. +Official Aleph runtimes are built using scripts located in [`../runtimes`](../../../../runtimes), and are distributed on the Aleph network. To build the default runtime locally: @@ -157,7 +169,7 @@ bash ./create_disk_image.sh cd ../.. ``` -### 6.b. Compile your kernel +### 7.b. Compile your kernel Boot time can be shortened by disabling keyboard support in the kernel. See `dmesg` logs for the exact timing saved. diff --git a/src/aleph/vm/orchestrator/__init__.py b/src/aleph/vm/orchestrator/__init__.py new file mode 100644 index 000000000..d3c1f4225 --- /dev/null +++ b/src/aleph/vm/orchestrator/__init__.py @@ -0,0 +1,3 @@ +from aleph.vm.version import __version__ + +__all__ = ("__version__",) diff --git a/src/aleph/vm/orchestrator/__main__.py b/src/aleph/vm/orchestrator/__main__.py new file mode 100644 index 000000000..9ae637f13 --- /dev/null +++ b/src/aleph/vm/orchestrator/__main__.py @@ -0,0 +1,4 @@ +from .cli import main + +if __name__ == "__main__": + main() diff --git a/src/aleph/vm/orchestrator/alembic.ini b/src/aleph/vm/orchestrator/alembic.ini new file mode 100644 index 000000000..b472631ad --- /dev/null +++ b/src/aleph/vm/orchestrator/alembic.ini @@ -0,0 +1,103 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +;script_location = orchestrator/migrations +script_location = migrations + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. +prepend_sys_path = . + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python-dateutil library that can be +# installed by adding `alembic[tz]` to the pip requirements +# string value is passed to dateutil.tz.gettz() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the +# "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to migrations/versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "version_path_separator" below. +# version_locations = %(here)s/bar:%(here)s/bat:migrations/versions + +# version path separator; As mentioned above, this is the character used to split +# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. +# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. +# Valid values for version_path_separator are: +# +# version_path_separator = : +# version_path_separator = ; +# version_path_separator = space +version_path_separator = os # Use os.pathsep. Default configuration used for new projects. + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/src/aleph/vm/orchestrator/chain.py b/src/aleph/vm/orchestrator/chain.py new file mode 100644 index 000000000..0b4174397 --- /dev/null +++ b/src/aleph/vm/orchestrator/chain.py @@ -0,0 +1,72 @@ +import logging + +from aleph_message.models import Chain +from pydantic import BaseModel, root_validator + +logger = logging.getLogger(__name__) + + +class ChainInfo(BaseModel): + """ + A chain information. + """ + + chain_id: int + rpc: str + standard_token: str | None = None + super_token: str | None = None + testnet: bool = False + active: bool = True + + @property + def token(self) -> str | None: + return self.super_token or self.standard_token + + @root_validator(pre=True) + def check_tokens(cls, values): + if not values.get("standard_token") and not values.get("super_token"): + msg = "At least one of standard_token or super_token must be provided." + raise ValueError(msg) + return values + + +STREAM_CHAINS: dict[Chain | str, ChainInfo] = { + # TESTNETS + "SEPOLIA": ChainInfo( + chain_id=11155111, + rpc="https://eth-sepolia.public.blastapi.io", + standard_token="0xc4bf5cbdabe595361438f8c6a187bdc330539c60", + super_token="0x22064a21fee226d8ffb8818e7627d5ff6d0fc33a", + active=False, + testnet=True, + ), + # MAINNETS + Chain.ETH: ChainInfo( + chain_id=1, + rpc="https://eth-mainnet.public.blastapi.io", + standard_token="0x27702a26126e0B3702af63Ee09aC4d1A084EF628", + active=False, + ), + Chain.AVAX: ChainInfo( + chain_id=43114, + rpc="https://api.avax.network/ext/bc/C/rpc", + super_token="0xc0Fbc4967259786C743361a5885ef49380473dCF", + ), + Chain.BASE: ChainInfo( + chain_id=8453, + rpc="https://base-mainnet.public.blastapi.io", + super_token="0xc0Fbc4967259786C743361a5885ef49380473dCF", + ), +} + + +class InvalidChainError(ValueError): + pass + + +def get_chain(chain: str) -> ChainInfo: + try: + return STREAM_CHAINS[chain] + except KeyError: + msg = f"Unknown chain id for chain {chain}" + raise InvalidChainError(msg) diff --git a/src/aleph/vm/orchestrator/cli.py b/src/aleph/vm/orchestrator/cli.py new file mode 100644 index 000000000..740733e61 --- /dev/null +++ b/src/aleph/vm/orchestrator/cli.py @@ -0,0 +1,379 @@ +import argparse +import asyncio +import contextlib +import logging +import os +import sys +import time +from collections.abc import Callable +from pathlib import Path +from statistics import mean +from typing import cast + +import alembic.command +import alembic.config +import sentry_sdk +from aiohttp.web import Request, Response +from aleph_message.models import ItemHash +from sqlalchemy.ext.asyncio import create_async_engine + +from aleph.vm.conf import ALLOW_DEVELOPER_SSH_KEYS, make_db_url, settings +from aleph.vm.models import VmExecution +from aleph.vm.pool import VmPool +from aleph.vm.version import __version__, get_version_from_apt, get_version_from_git + +from . import metrics, supervisor +from .custom_logs import setup_handlers +from .pubsub import PubSub +from .run import run_code_on_event, run_code_on_request, start_persistent_vm + +logger = logging.getLogger(__name__) + + +def parse_args(args): + parser = argparse.ArgumentParser(prog="orchestrator", description="Aleph.im VM Supervisor") + parser.add_argument( + "--system-logs", + action="store_true", + dest="system_logs", + default=settings.PRINT_SYSTEM_LOGS, + ) + parser.add_argument( + "--no-network", + action="store_false", + dest="allow_vm_networking", + default=settings.ALLOW_VM_NETWORKING, + ) + parser.add_argument( + "--no-jailer", + action="store_false", + dest="use_jailer", + default=settings.USE_JAILER, + ) + parser.add_argument("--jailer", action="store_true", dest="use_jailer", default=settings.USE_JAILER) + parser.add_argument( + "--prealloc", + action="store", + type=int, + dest="prealloc_vm_count", + required=False, + default=settings.PREALLOC_VM_COUNT, + ) + parser.add_argument( + "-v", + "--verbose", + dest="loglevel", + help="set loglevel to INFO", + action="store_const", + const=logging.INFO, + default=settings.LOG_LEVEL, + ) + parser.add_argument( + "-vv", + "--very-verbose", + dest="loglevel", + help="set loglevel to DEBUG", + action="store_const", + const=logging.DEBUG, + ) + parser.add_argument( + "-d", + "--debug-asyncio", + dest="debug_asyncio", + help="Enable asyncio debugging", + action="store_true", + default=settings.DEBUG_ASYNCIO, + ) + parser.add_argument( + "-p", + "--print-settings", + dest="print_settings", + action="store_true", + default=False, + ) + parser.add_argument( + "-n", + "--do-not-run", + dest="do_not_run", + action="store_true", + default=False, + ) + parser.add_argument( + "--profile", + dest="profile", + action="store_true", + default=False, + help="Add extra info for profiling", + ) + parser.add_argument( + "--benchmark", + dest="benchmark", + type=int, + default=0, + help="Number of benchmarks to run", + ) + parser.add_argument( + "-f", + "--fake-data-program", + dest="fake_data_program", + type=str, + default=None, + help="Path to project containing fake data", + ) + parser.add_argument( + "-i", + "--run-test-instance", + dest="run_test_instance", + action="store_true", + default=False, + help="Run a test instance from the network instead of starting the entire supervisor", + ) + parser.add_argument( + "-k", + "--run-fake-instance", + dest="run_fake_instance", + action="store_true", + default=False, + help="Run a fake instance from a local rootfs instead of starting the entire supervisor", + ) + parser.add_argument( + "-r", + "--fake-instance-base", + dest="fake_instance_base", + type=str, + default=settings.FAKE_INSTANCE_BASE, + help="Filesystem path of the base for the rootfs of fake instances. An empty value signals a download instead.", + ) + parser.add_argument( + "--developer-ssh-keys", + dest="use_developer_ssh_keys", + action="store_true", + default=False, + help="Authorize the developer's SSH keys to connect instead of those specified in the message", + ) + return parser.parse_args(args) + + +class FakeRequest: + headers: dict[str, str] + raw_headers: list[tuple[bytes, bytes]] + match_info: dict + method: str + query_string: str + read: Callable + + +async def benchmark(runs: int): + """Measure program performance by immediately running the supervisor + with fake requests. + """ + ref = ItemHash("cafecafecafecafecafecafecafecafecafecafecafecafecafecafecafecafe") + settings.FAKE_DATA_PROGRAM = settings.BENCHMARK_FAKE_DATA_PROGRAM + + fake_request = FakeRequest() # type: ignore[operator] + fake_request.match_info = {"ref": ref, "suffix": "/"} + fake_request.method = "GET" + fake_request.query_string = "" + + fake_request.headers = {"host": "127.0.0.1", "content-type": "application/json"} + fake_request.raw_headers = [(name.encode(), value.encode()) for name, value in fake_request.headers.items()] + + async def fake_read() -> bytes: + return b"" + + fake_request.read = fake_read + + logger.info("--- Start benchmark ---") + + bench: list[float] = [] + + loop = asyncio.get_event_loop() + pool = VmPool(loop) + pool.setup() + + # Does not make sense in benchmarks + settings.WATCH_FOR_MESSAGES = False + settings.WATCH_FOR_UPDATES = False + + # Finish setting up the settings + settings.setup() + settings.check() + + # First test all methods + settings.REUSE_TIMEOUT = 0.1 + for path in ( + "/", + "/lifespan", + "/environ", + "/messages", + "/internet", + "/post_a_message", + "/cache/set/foo/bar", + "/cache/get/foo", + "/cache/keys", + ): + fake_request.match_info["suffix"] = path + response: Response = await run_code_on_request( + vm_hash=ref, path=path, pool=pool, request=cast(Request, fake_request) + ) + assert response.status == 200 + + # Disable VM timeout to exit benchmark properly + settings.REUSE_TIMEOUT = 0 if runs == 1 else 0.1 + path = "/" + for _run in range(runs): + t0 = time.time() + fake_request.match_info["suffix"] = path + response2: Response = await run_code_on_request( + vm_hash=ref, path=path, pool=pool, request=cast(Request, fake_request) + ) + assert response2.status == 200 + bench.append(time.time() - t0) + + logger.info(f"BENCHMARK: n={len(bench)} avg={mean(bench):03f} min={min(bench):03f} max={max(bench):03f}") + logger.info(bench) + + result = await run_code_on_event(vm_hash=ref, event=None, pubsub=PubSub(), pool=pool) + print("Event result", result) + + +async def start_instance(item_hash: ItemHash, pubsub: PubSub | None, pool) -> VmExecution: + """Run an instance from an InstanceMessage.""" + return await start_persistent_vm(item_hash, pubsub, pool) + + +async def run_instances(instances: list[ItemHash]) -> None: + """Run instances from a list of message identifiers.""" + logger.info(f"Instances to run: {instances}") + loop = asyncio.get_event_loop() + pool = VmPool(loop) + # The main program uses a singleton pubsub instance in order to watch for updates. + # We create another instance here since that singleton is not initialized yet. + # Watching for updates on this instance will therefore not work. + pubsub: PubSub | None = None + + await asyncio.gather(*[start_instance(instance_id, pubsub, pool) for instance_id in instances]) + + await asyncio.Event().wait() # wait forever + + +@contextlib.contextmanager +def change_dir(directory: Path): + current_directory = Path.cwd() + try: + os.chdir(directory) + yield + finally: + os.chdir(current_directory) + + +def run_db_migrations(connection): + project_dir = Path(__file__).parent + + alembic_cfg = alembic.config.Config("alembic.ini") + alembic_cfg.attributes["configure_logger"] = False + alembic_cfg.attributes["connection"] = connection + logging.getLogger("alembic").setLevel(logging.CRITICAL) + + with change_dir(project_dir): + alembic.command.upgrade(alembic_cfg, "head") + + +async def run_async_db_migrations(): + async_engine = create_async_engine(make_db_url(), echo=False) + async with async_engine.begin() as conn: + await conn.run_sync(run_db_migrations) + + +def main(): + args = parse_args(sys.argv[1:]) + + log_format = ( + "%(relativeCreated)4f | %(levelname)s | %(message)s" + if args.profile + else "%(asctime)s | %(levelname)s %(name)s:%(lineno)s | %(message)s" + ) + # log_format = "[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s" + + handlers = setup_handlers(args, log_format) + logging.basicConfig( + level=args.loglevel, + format=log_format, + handlers=handlers, + ) + + logging.getLogger("aiosqlite").setLevel(settings.LOG_LEVEL) + logging.getLogger("sqlalchemy.engine").setLevel(settings.LOG_LEVEL) + + settings.update( + USE_JAILER=args.use_jailer, + PRINT_SYSTEM_LOGS=args.system_logs, + PREALLOC_VM_COUNT=args.prealloc_vm_count, + ALLOW_VM_NETWORKING=args.allow_vm_networking, + FAKE_DATA_PROGRAM=args.fake_data_program, + DEBUG_ASYNCIO=args.debug_asyncio, + FAKE_INSTANCE_BASE=args.fake_instance_base, + ) + + if args.run_fake_instance: + settings.USE_FAKE_INSTANCE_BASE = True + + if args.use_developer_ssh_keys: + settings.USE_DEVELOPER_SSH_KEYS = ALLOW_DEVELOPER_SSH_KEYS + + if sentry_sdk: + if settings.SENTRY_DSN: + sentry_sdk.init( + dsn=settings.SENTRY_DSN, + server_name=settings.DOMAIN_NAME, + # Set traces_sample_rate to 1.0 to capture 100% + # of transactions for performance monitoring. + # We recommend adjusting this value in production. + traces_sample_rate=1.0, + release=__version__, + ) + sentry_sdk.set_context( + "version", + { + "git": get_version_from_git(), + "apt": get_version_from_apt(), + }, + ) + else: + logger.debug("Sentry SDK found with no DSN configured.") + else: + logger.debug( + "Sentry SDK not found. \n" + "Use `pip install sentry-sdk` and configure SENTRY_DSN if you'd like to monitor errors." + ) + + settings.setup() + if args.print_settings: + print(settings.display()) + + settings.check() + + logger.debug("Initialising the DB...") + # Check and create execution database + engine = metrics.setup_engine() + asyncio.run(metrics.create_tables(engine)) + # After creating it run the DB migrations + asyncio.run(run_async_db_migrations()) + logger.debug("DB up to date.") + + if args.benchmark > 0: + asyncio.run(benchmark(runs=args.benchmark), debug=args.debug_asyncio) + logger.info("Finished") + sys.exit(0) + elif args.do_not_run: + logger.info("Option --do-not-run, exiting") + elif args.run_test_instance: + asyncio.run(run_instances([ItemHash(settings.TEST_INSTANCE_ID)])) + logger.info("Finished") + sys.exit(0) + elif args.run_fake_instance: + asyncio.run(run_instances([ItemHash(settings.FAKE_INSTANCE_ID)])) + logger.info("Finished") + sys.exit(0) + else: + supervisor.run() diff --git a/src/aleph/vm/orchestrator/custom_logs.py b/src/aleph/vm/orchestrator/custom_logs.py new file mode 100644 index 000000000..c6a2a96e8 --- /dev/null +++ b/src/aleph/vm/orchestrator/custom_logs.py @@ -0,0 +1,53 @@ +import contextlib +import logging +from contextvars import ContextVar + +from aleph_message.models import ItemHash + +from aleph.vm.models import VmExecution + +ctx_current_execution: ContextVar[VmExecution | None] = ContextVar("current_execution") +ctx_current_execution_hash: ContextVar[ItemHash | None] = ContextVar("current_execution_hash") + + +@contextlib.contextmanager +def set_vm_for_logging(vm_hash): + token = ctx_current_execution_hash.set(vm_hash) + try: + yield + finally: + ctx_current_execution_hash.reset(token) + + +class InjectingFilter(logging.Filter): + """ + A filter which injects context-specific information into logs + """ + + def filter(self, record): + vm_hash = ctx_current_execution_hash.get(None) + if not vm_hash: + vm_execution: VmExecution | None = ctx_current_execution.get(None) + if vm_execution: + vm_hash = vm_execution.vm_hash + + if not vm_hash: + return False + + record.vm_hash = vm_hash + return True + + +def setup_handlers(args, log_format): + # Set up two custom handler, one that will add the VM information if present and the other print if not + execution_handler = logging.StreamHandler() + execution_handler.addFilter(InjectingFilter()) + execution_handler.setFormatter( + logging.Formatter("%(asctime)s | %(levelname)s %(name)s:%(lineno)s | {%(vm_hash)s} %(message)s ") + ) + non_execution_handler = logging.StreamHandler() + non_execution_handler.addFilter(lambda x: ctx_current_execution_hash.get(None) is None) + non_execution_handler.setFormatter( + logging.Formatter("%(asctime)s | %(levelname)s %(name)s:%(lineno)s | %(message)s ") + ) + return [non_execution_handler, execution_handler] diff --git a/src/aleph/vm/orchestrator/messages.py b/src/aleph/vm/orchestrator/messages.py new file mode 100644 index 000000000..f1826ba2c --- /dev/null +++ b/src/aleph/vm/orchestrator/messages.py @@ -0,0 +1,89 @@ +import asyncio +import copy + +from aiohttp import ClientConnectorError, ClientResponseError, ClientSession +from aiohttp.web_exceptions import HTTPNotFound, HTTPServiceUnavailable +from aleph_message.models import ExecutableMessage, ItemHash, MessageType +from aleph_message.status import MessageStatus + +from aleph.vm.conf import settings +from aleph.vm.storage import get_executable_message, get_latest_amend + + +async def try_get_message(ref: str) -> ExecutableMessage: + """Get the message or raise an aiohttp HTTP error""" + try: + return await get_executable_message(ref) + except ClientConnectorError as error: + raise HTTPServiceUnavailable(reason="Aleph Connector unavailable") from error + except ClientResponseError as error: + if error.status == HTTPNotFound.status_code: + raise HTTPNotFound(reason="Hash not found", text=f"Hash not found: {ref}") from error + else: + raise + + +async def get_latest_ref(item_hash: str) -> str: + try: + return await get_latest_amend(item_hash) + except ClientConnectorError as error: + raise HTTPServiceUnavailable(reason="Aleph Connector unavailable") from error + except ClientResponseError as error: + if error.status == HTTPNotFound.status_code: + raise HTTPNotFound(reason="Hash not found", text=f"Hash not found: {item_hash}") from error + else: + raise + + +async def update_with_latest_ref(obj): + """ + Update the reference `ref` inplace if a newer version is available. + + Useful to update references in parallel with asyncio.gather. + """ + if hasattr(obj, "use_latest") and obj.use_latest: + obj.ref = await get_latest_ref(obj.ref) + else: + return obj + + +async def update_message(message: ExecutableMessage): + if message.type == MessageType.program: + # Load amends + await asyncio.gather( + update_with_latest_ref(message.content.runtime), + update_with_latest_ref(message.content.code), + update_with_latest_ref(message.content.data), + *(update_with_latest_ref(volume) for volume in (message.content.volumes or [])), + ) + else: + assert message.type == MessageType.instance + await asyncio.gather( + update_with_latest_ref(message.content.rootfs.parent), + *(update_with_latest_ref(volume) for volume in (message.content.volumes or [])), + ) + + +async def load_updated_message( + ref: ItemHash, +) -> tuple[ExecutableMessage, ExecutableMessage]: + original_message = await try_get_message(ref) + message = copy.deepcopy(original_message) + await update_message(message) + return message, original_message + + +async def get_message_status(item_hash: ItemHash) -> MessageStatus: + """ + Fetch the status of an execution from the reference API server. + We use a normal API call to the CCN instead to use the connector because we want to get the updated status of the + message and bypass the messages cache. + """ + async with ClientSession() as session: + url = f"{settings.API_SERVER}/api/v0/messages/{item_hash}" + resp = await session.get(url) + # Raise an error if the request failed + resp.raise_for_status() + + resp_data = await resp.json() + return resp_data["status"] diff --git a/src/aleph/vm/orchestrator/metrics.py b/src/aleph/vm/orchestrator/metrics.py new file mode 100644 index 000000000..6c9b8eea0 --- /dev/null +++ b/src/aleph/vm/orchestrator/metrics.py @@ -0,0 +1,128 @@ +import logging +from collections.abc import Iterable +from pathlib import Path +from typing import Any +from uuid import UUID + +from sqlalchemy import ( + JSON, + Boolean, + Column, + DateTime, + Float, + Integer, + String, + delete, + select, +) +from sqlalchemy.ext.asyncio import ( + AsyncEngine, + AsyncSession, + async_sessionmaker, + create_async_engine, +) + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + from sqlalchemy.ext.declarative import declarative_base + +from aleph.vm.conf import make_db_url, settings + +AsyncSessionMaker: async_sessionmaker[AsyncSession] + +logger = logging.getLogger(__name__) + +Base: Any = declarative_base() + + +def setup_engine(): + global AsyncSessionMaker + engine = create_async_engine(make_db_url(), echo=False) + AsyncSessionMaker = async_sessionmaker(engine, expire_on_commit=False, class_=AsyncSession) + return engine + + +async def create_tables(engine: AsyncEngine): + async with engine.begin() as conn: + await conn.run_sync(Base.metadata.create_all) + + +class ExecutionRecord(Base): + __tablename__ = "executions" + + uuid = Column(String, primary_key=True) + vm_hash = Column(String, nullable=False) + vm_id = Column(Integer, nullable=True) + + time_defined = Column(DateTime, nullable=False) + time_prepared = Column(DateTime) + time_started = Column(DateTime) + time_stopping = Column(DateTime) + + cpu_time_user = Column(Float, nullable=True) + cpu_time_system = Column(Float, nullable=True) + + io_read_count = Column(Integer, nullable=True) + io_write_count = Column(Integer, nullable=True) + io_read_bytes = Column(Integer, nullable=True) + io_write_bytes = Column(Integer, nullable=True) + + vcpus = Column(Integer, nullable=False) + memory = Column(Integer, nullable=False) + network_tap = Column(String, nullable=True) + + message = Column(JSON, nullable=True) + original_message = Column(JSON, nullable=True) + persistent = Column(Boolean, nullable=True) + + gpus = Column(JSON, nullable=True) + + def __repr__(self): + return f"" + + def to_dict(self): + return {c.name: getattr(self, c.name) for c in self.__table__.c} + + +async def save_execution_data(execution_uuid: UUID, execution_data: str): + """Save the execution data in a file on disk""" + directory = Path(settings.EXECUTION_LOG_DIRECTORY) + directory.mkdir(exist_ok=True) + (directory / f"{execution_uuid}.json").write_text(execution_data) + + +async def save_record(record: ExecutionRecord): + """Record the resource usage in database""" + async with AsyncSessionMaker() as session: # Use AsyncSession in a context manager + session.add(record) + await session.commit() # Use await for commit + + +async def delete_record(execution_uuid: str): + """Delete the resource usage in database""" + async with AsyncSessionMaker() as session: + try: + statement = delete(ExecutionRecord).where(ExecutionRecord.uuid == execution_uuid) + await session.execute(statement) + await session.commit() + finally: + await session.close() + + +async def get_execution_records() -> Iterable[ExecutionRecord]: + """Get the execution records from the database.""" + async with AsyncSessionMaker() as session: # Use AsyncSession in a context manager + result = await session.execute(select(ExecutionRecord)) # Use execute for querying + executions = result.scalars().all() + await session.commit() + return executions + + +async def get_last_record_for_vm(vm_hash) -> ExecutionRecord | None: + """Get the execution records from the database.""" + async with AsyncSessionMaker() as session: # Use AsyncSession in a context manager + result = await session.execute( + select(ExecutionRecord).where(ExecutionRecord.vm_hash == vm_hash).limit(1) + ) # Use execute for querying + return result.scalar() diff --git a/src/aleph/vm/orchestrator/migrations/__init__.py b/src/aleph/vm/orchestrator/migrations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/orchestrator/migrations/env.py b/src/aleph/vm/orchestrator/migrations/env.py new file mode 100644 index 000000000..2cf116bb6 --- /dev/null +++ b/src/aleph/vm/orchestrator/migrations/env.py @@ -0,0 +1,69 @@ +from alembic import context +from sqlalchemy import create_engine + +from aleph.vm.conf import make_db_url + +# Auto-generate migrations +from aleph.vm.orchestrator.metrics import Base + +# # this is the Alembic Config object, which provides +# # access to the values within the .ini file in use. +# config = context.config +# +# # Interpret the config file for Python logging. +# # This line sets up loggers basically. +# if config.config_file_name is not None: +# fileConfig(config.config_file_name) + + +target_metadata = Base.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = make_db_url() + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = create_engine(make_db_url()) + with connectable.connect() as connection: + context.configure(connection=connection, target_metadata=target_metadata) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/src/aleph/vm/orchestrator/migrations/script.py.mako b/src/aleph/vm/orchestrator/migrations/script.py.mako new file mode 100644 index 000000000..55df2863d --- /dev/null +++ b/src/aleph/vm/orchestrator/migrations/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade() -> None: + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + ${downgrades if downgrades else "pass"} diff --git a/src/aleph/vm/orchestrator/migrations/versions/0001_bbb12a12372e_execution_records.py b/src/aleph/vm/orchestrator/migrations/versions/0001_bbb12a12372e_execution_records.py new file mode 100644 index 000000000..b10e8477d --- /dev/null +++ b/src/aleph/vm/orchestrator/migrations/versions/0001_bbb12a12372e_execution_records.py @@ -0,0 +1,63 @@ +"""execution records + +Revision ID: bbb12a12372e +Revises: +Create Date: 2022-09-28 18:52:16.431200 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +from sqlalchemy import create_engine +from sqlalchemy.engine import reflection + +from aleph.vm.conf import make_db_url + +revision = "bbb12a12372e" +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade() -> None: + engine = create_engine(make_db_url()) + inspector = reflection.Inspector.from_engine(engine) + + # The table already exists on most CRNs. + tables = inspector.get_table_names() + if "records" not in tables: + op.create_table( + "records", + sa.Column("uuid", sa.String(), nullable=False), + sa.Column("vm_hash", sa.String(), nullable=False), + sa.Column("time_defined", sa.DateTime(), nullable=False), + sa.Column("time_prepared", sa.DateTime(), nullable=True), + sa.Column("time_started", sa.DateTime(), nullable=True), + sa.Column("time_stopping", sa.DateTime(), nullable=True), + sa.Column("cpu_time_user", sa.Float(), nullable=True), + sa.Column("cpu_time_system", sa.Float(), nullable=True), + sa.Column("io_read_count", sa.Integer(), nullable=True), + sa.Column("io_write_count", sa.Integer(), nullable=True), + sa.Column("io_read_bytes", sa.Integer(), nullable=True), + sa.Column("io_write_bytes", sa.Integer(), nullable=True), + sa.Column("vcpus", sa.Integer(), nullable=False), + sa.Column("memory", sa.Integer(), nullable=False), + sa.Column("network_tap", sa.String(), nullable=True), + sa.PrimaryKeyConstraint("uuid"), + ) + + # Support intermediate versions that have the records table + # but without the network_tap column + records_columns = [column["name"] for column in inspector.get_columns("records")] + if "network_tap" not in records_columns: + op.add_column("records", sa.Column("network_tap", sa.String(), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("records") + # ### end Alembic commands ### diff --git a/src/aleph/vm/orchestrator/migrations/versions/0002_5c6ae643c69b_add_gpu_column_to_executions_table.py b/src/aleph/vm/orchestrator/migrations/versions/0002_5c6ae643c69b_add_gpu_column_to_executions_table.py new file mode 100644 index 000000000..4b739323b --- /dev/null +++ b/src/aleph/vm/orchestrator/migrations/versions/0002_5c6ae643c69b_add_gpu_column_to_executions_table.py @@ -0,0 +1,38 @@ +"""add gpu table + +Revision ID: 5c6ae643c69b +Revises: bbb12a12372e +Create Date: 2024-12-09 19:40:19.279735 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +from sqlalchemy import create_engine +from sqlalchemy.engine import reflection + +from aleph.vm.conf import make_db_url + +revision = "5c6ae643c69b" +down_revision = "bbb12a12372e" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + engine = create_engine(make_db_url()) + inspector = reflection.Inspector.from_engine(engine) + + # The table already exists on most CRNs. + tables = inspector.get_table_names() + if "executions" in tables: + columns = inspector.get_columns("executions") + column_names = [c["name"] for c in columns] + if "gpus" not in column_names: + op.add_column("executions", sa.Column("gpus", sa.JSON(), nullable=True)) + + +def downgrade() -> None: + op.drop_column("executions", "gpus") diff --git a/src/aleph/vm/orchestrator/migrations/versions/__init__.py b/src/aleph/vm/orchestrator/migrations/versions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/aleph/vm/orchestrator/payment.py b/src/aleph/vm/orchestrator/payment.py new file mode 100644 index 000000000..f5a79bbca --- /dev/null +++ b/src/aleph/vm/orchestrator/payment.py @@ -0,0 +1,143 @@ +import asyncio +import logging +from collections.abc import Iterable +from decimal import Decimal + +import aiohttp +from aleph_message.models import ItemHash, PaymentType +from eth_typing import HexAddress +from eth_utils import from_wei +from superfluid import CFA_V1, Web3FlowInfo + +from aleph.vm.conf import settings +from aleph.vm.models import VmExecution +from aleph.vm.utils import to_normalized_address + +from .chain import ChainInfo, InvalidChainError, get_chain + +logger = logging.getLogger(__name__) + + +async def fetch_balance_of_address(address: str) -> Decimal: + """ + Get the balance of the user from the PyAleph API. + + API Endpoint: + GET /api/v0/addresses/{address}/balance + + For more details, see the PyAleph API documentation: + https://github.com/aleph-im/pyaleph/blob/master/src/aleph/web/controllers/routes.py#L62 + """ + + async with aiohttp.ClientSession() as session: + url = f"{settings.API_SERVER}/api/v0/addresses/{address}/balance" + resp = await session.get(url) + + # Consider the balance as null if the address is not found + if resp.status == 404: + return Decimal(0) + + # Raise an error if the request failed + resp.raise_for_status() + + resp_data = await resp.json() + return resp_data["balance"] + + +async def fetch_execution_flow_price(item_hash: ItemHash) -> Decimal: + """Fetch the flow price of an execution from the reference API server.""" + async with aiohttp.ClientSession() as session: + url = f"{settings.API_SERVER}/api/v0/price/{item_hash}" + resp = await session.get(url) + # Raise an error if the request failed + resp.raise_for_status() + + resp_data = await resp.json() + required_flow: float = resp_data["required_tokens"] + payment_type: str | None = resp_data["payment_type"] + + if payment_type is None: + msg = "Payment type must be specified in the message" + raise ValueError(msg) + elif payment_type != PaymentType.superfluid: + msg = f"Payment type {payment_type} is not supported" + raise ValueError(msg) + + return Decimal(required_flow) + + +async def fetch_execution_hold_price(item_hash: ItemHash) -> Decimal: + """Fetch the hold price of an execution from the reference API server.""" + async with aiohttp.ClientSession() as session: + url = f"{settings.API_SERVER}/api/v0/price/{item_hash}" + resp = await session.get(url) + # Raise an error if the request failed + resp.raise_for_status() + + resp_data = await resp.json() + required_hold: float = resp_data["required_tokens"] + payment_type: str | None = resp_data["payment_type"] + + if payment_type not in (None, PaymentType.hold): + msg = f"Payment type {payment_type} is not supported" + raise ValueError(msg) + + return Decimal(required_hold) + + +class InvalidAddressError(ValueError): + """The blockchain address could not be parsed.""" + + pass + + +async def get_stream(sender: str, receiver: str, chain: str) -> Decimal: + """ + Get the stream of the user from the Superfluid API. + See https://community.aleph.im/t/pay-as-you-go-using-superfluid/98/11 + """ + chain_info: ChainInfo = get_chain(chain=chain) + if not chain_info.active: + msg = f"Chain : {chain} is not active for superfluid" + raise InvalidChainError(msg) + + superfluid_instance = CFA_V1(chain_info.rpc, chain_info.chain_id) + + try: + super_token: HexAddress = to_normalized_address(chain_info.super_token) + except ValueError as error: + msg = f"Invalid token address '{chain_info.super_token}' - {error.args}" + raise InvalidAddressError(msg) from error + + try: + sender_address: HexAddress = to_normalized_address(sender) + except ValueError as error: + msg = f"Invalid sender address '{sender}' - {error.args}" + raise InvalidAddressError(msg) from error + + try: + receiver_address: HexAddress = to_normalized_address(receiver) + except ValueError as error: + msg = f"Invalid receiver address '{receiver}' - {error.args}" + raise InvalidAddressError(msg) from error + + # Run the network request in a background thread and wait for it to complete. + loop = asyncio.get_event_loop() + flow_data: Web3FlowInfo = await loop.run_in_executor( + None, superfluid_instance.get_flow, super_token, sender_address, receiver_address + ) + # TODO: Implement and use the SDK to make the conversion + stream = from_wei(flow_data["flowRate"], "ether") + return Decimal(stream) + + +async def compute_required_balance(executions: Iterable[VmExecution]) -> Decimal: + """Get the balance required for the resources of the user from the messages and the pricing aggregate.""" + costs = await asyncio.gather(*(fetch_execution_hold_price(execution.vm_hash) for execution in executions)) + return sum(costs, Decimal(0)) + + +async def compute_required_flow(executions: Iterable[VmExecution]) -> Decimal: + """Compute the flow required for a collection of executions, typically all executions from a specific address""" + flows = await asyncio.gather(*(fetch_execution_flow_price(execution.vm_hash) for execution in executions)) + return sum(flows, Decimal(0)) diff --git a/src/aleph/vm/orchestrator/pubsub.py b/src/aleph/vm/orchestrator/pubsub.py new file mode 100644 index 000000000..b12a76bc8 --- /dev/null +++ b/src/aleph/vm/orchestrator/pubsub.py @@ -0,0 +1,58 @@ +""" +Small async PubSub implementation. +Used to trigger VM shutdown on updates. +""" + +import asyncio +import logging +from collections.abc import Hashable + +from aleph_message.models import AlephMessage, ChainRef, ItemHash + +logger = logging.getLogger(__name__) + + +class PubSub: + subscribers: dict[Hashable, set[asyncio.Queue[set]]] + + def __init__(self): + self.subscribers = {} + + async def subscribe(self, key): + queue: asyncio.Queue[AlephMessage] = asyncio.Queue() + self.subscribers.setdefault(key, set()).add(queue) + await queue.get() + + # Cleanup: remove the queue from the subscribers + subscriber = self.subscribers.get(key) + if subscriber: + subscriber.discard(queue) + # Remove keys with no remaining queue + if not self.subscribers.get(key): + self.subscribers.pop(key) + + async def msubscribe(self, *keys): + """Subscribe to multiple keys""" + keys = tuple(key for key in keys if key is not None) + logger.debug(f"msubscribe({keys})") + + queue: asyncio.Queue[AlephMessage] = asyncio.Queue() + + # Register the queue on all keys + for key in keys: + self.subscribers.setdefault(key, set()).add(queue) + + # Wait for any subscription + await queue.get() + + # Cleanup: remove the queue from the subscribers + for key in keys: + for subscriber in list(self.subscribers.values()): + subscriber.discard(queue) + # Remove keys with no remaining queue (empty set remaining) + if self.subscribers.get(key) == set(): + self.subscribers.pop(key) + + async def publish(self, key: ItemHash | str | ChainRef, value: AlephMessage): + for queue in self.subscribers.get(key, ()): + await queue.put(value) diff --git a/src/aleph/vm/orchestrator/reactor.py b/src/aleph/vm/orchestrator/reactor.py new file mode 100644 index 000000000..785f2c233 --- /dev/null +++ b/src/aleph/vm/orchestrator/reactor.py @@ -0,0 +1,77 @@ +import logging +from collections.abc import Coroutine + +from aleph_message.models import AlephMessage +from aleph_message.models.execution.environment import Subscription + +from aleph.vm.pool import VmPool +from aleph.vm.utils import create_task_log_exceptions + +from .pubsub import PubSub +from .run import run_code_on_event + +logger = logging.getLogger(__name__) + + +def is_equal_or_includes(value, compare_to) -> bool: + if isinstance(value, str): + return value == compare_to + elif isinstance(value, dict): + for subkey, subvalue in value.items(): + if not hasattr(compare_to, subkey): + return False + if not is_equal_or_includes(subvalue, getattr(compare_to, subkey)): + return False + return True + else: + msg = "Unsupported value" + raise ValueError(msg) + + +def subscription_matches(subscription: Subscription, message: AlephMessage) -> bool: + if not subscription: + # Require at least one value to match + return False + for key, value in subscription.dict().items(): + if not is_equal_or_includes(value, getattr(message, key)): + return False + return True + + +class Reactor: + pubsub: PubSub + pool: VmPool + listeners: list[AlephMessage] + + def __init__(self, pubsub: PubSub, pool: VmPool): + self.pubsub = pubsub + self.pool = pool + self.listeners = [] + + async def trigger(self, message: AlephMessage): + coroutines: list[Coroutine] = [] + + for listener in self.listeners: + if not listener.content.on.message: + logger.warning( + r"Program with no subscription was registered in reactor listeners: {listener.item_hash}" + ) + continue + + for subscription in listener.content.on.message: + if subscription_matches(subscription, message): + vm_hash = listener.item_hash + event = message.json() + # Register the listener in the list of coroutines to run asynchronously: + coroutines.append(run_code_on_event(vm_hash, event, self.pubsub, pool=self.pool)) + break + + # Call all listeners asynchronously from the event loop: + for coroutine in coroutines: + create_task_log_exceptions(coroutine) + + def register(self, message: AlephMessage): + if message.content.on.message: + self.listeners.append(message) + else: + logger.debug(f"Program with no subscription cannot be registered in reactor listeners: {message.item_hash}") diff --git a/src/aleph/vm/orchestrator/resources.py b/src/aleph/vm/orchestrator/resources.py new file mode 100644 index 000000000..b6d34a9f0 --- /dev/null +++ b/src/aleph/vm/orchestrator/resources.py @@ -0,0 +1,191 @@ +import math +from datetime import datetime, timezone +from functools import lru_cache +from typing import List, Optional + +import cpuinfo +import psutil +from aiohttp import web +from aleph_message.models import ItemHash +from aleph_message.models.execution.environment import CpuProperties +from pydantic import BaseModel, Field + +from aleph.vm.conf import settings +from aleph.vm.pool import VmPool +from aleph.vm.resources import GpuDevice +from aleph.vm.sevclient import SevClient +from aleph.vm.utils import ( + check_amd_sev_es_supported, + check_amd_sev_snp_supported, + check_amd_sev_supported, + cors_allow_all, +) + + +class Period(BaseModel): + datetime: datetime + + +class LoadAverage(BaseModel): + load1: float + load5: float + load15: float + + @classmethod + def from_psutil(cls, psutil_loadavg: tuple[float, float, float]): + return cls( + load1=psutil_loadavg[0], + load5=psutil_loadavg[1], + load15=psutil_loadavg[2], + ) + + +class CoreFrequencies(BaseModel): + min: float + max: float + + @classmethod + def from_psutil(cls, psutil_freq: psutil._common.scpufreq): + min_ = psutil_freq.min or psutil_freq.current + max_ = psutil_freq.max or psutil_freq.current + return cls(min=min_, max=max_) + + +class CpuUsage(BaseModel): + count: int + load_average: LoadAverage + core_frequencies: CoreFrequencies + + +class MemoryUsage(BaseModel): + total_kB: int + available_kB: int + + +class DiskUsage(BaseModel): + total_kB: int + available_kB: int + + +class UsagePeriod(BaseModel): + start_timestamp: datetime + duration_seconds: float + + +class MachineProperties(BaseModel): + cpu: CpuProperties + + +class GpuProperties(BaseModel): + devices: Optional[List[GpuDevice]] + available_devices: Optional[List[GpuDevice]] + + +class MachineUsage(BaseModel): + cpu: CpuUsage + mem: MemoryUsage + disk: DiskUsage + period: UsagePeriod + properties: MachineProperties + gpu: GpuProperties + active: bool = True + + +def get_machine_gpus(request: web.Request) -> GpuProperties: + pool: VmPool = request.app["vm_pool"] + gpus = pool.gpus + available_gpus = pool.get_available_gpus() + + return GpuProperties( + devices=gpus, + available_devices=available_gpus, + ) + + +@lru_cache +def get_machine_properties() -> MachineProperties: + """Fetch machine properties such as architecture, CPU vendor, ... + These should not change while the supervisor is running. + + In the future, some properties may have to be fetched from within a VM. + """ + cpu_info = cpuinfo.get_cpu_info() # Slow + + return MachineProperties( + cpu=CpuProperties( + architecture=cpu_info.get("raw_arch_string", cpu_info.get("arch_string_raw")), + vendor=cpu_info.get("vendor_id", cpu_info.get("vendor_id_raw")), + features=list( + filter( + None, + ( + "sev" if check_amd_sev_supported() else None, + "sev_es" if check_amd_sev_es_supported() else None, + "sev_snp" if check_amd_sev_snp_supported() else None, + ), + ) + ), + ), + ) + + +@cors_allow_all +async def about_system_usage(request: web.Request): + """Public endpoint to expose information about the system usage.""" + period_start = datetime.now(timezone.utc).replace(second=0, microsecond=0) + machine_properties = get_machine_properties() + + usage: MachineUsage = MachineUsage( + cpu=CpuUsage( + count=psutil.cpu_count(), + load_average=LoadAverage.from_psutil(psutil.getloadavg()), + core_frequencies=CoreFrequencies.from_psutil(psutil.cpu_freq()), + ), + mem=MemoryUsage( + total_kB=math.ceil(psutil.virtual_memory().total / 1000), + available_kB=math.floor(psutil.virtual_memory().available / 1000), + ), + disk=DiskUsage( + total_kB=psutil.disk_usage(str(settings.PERSISTENT_VOLUMES_DIR)).total // 1000, + available_kB=psutil.disk_usage(str(settings.PERSISTENT_VOLUMES_DIR)).free // 1000, + ), + period=UsagePeriod( + start_timestamp=period_start, + duration_seconds=60, + ), + properties=machine_properties, + gpu=get_machine_gpus(request), + ) + + return web.json_response(text=usage.json(exclude_none=True)) + + +@cors_allow_all +async def about_certificates(request: web.Request): + """Public endpoint to expose platform certificates for confidential computing.""" + + if not settings.ENABLE_CONFIDENTIAL_COMPUTING: + return web.HTTPBadRequest(reason="Confidential computing setting not enabled on that server") + + sev_client: SevClient = request.app["sev_client"] + + return web.FileResponse(await sev_client.get_certificates()) + + +class Allocation(BaseModel): + """An allocation is the set of resources that are currently allocated on this orchestrator. + It contains the item_hashes of all persistent VMs, instances, on-demand VMs and jobs. + """ + + persistent_vms: set[ItemHash] = Field(default_factory=set) + instances: set[ItemHash] = Field(default_factory=set) + on_demand_vms: set[ItemHash] | None = None + jobs: set[ItemHash] | None = None + + +class VMNotification(BaseModel): + """A notification to the orchestrator that a VM has been created or destroyed. + This is typically sent by a user that just created a VM in order to quickly ensure the creation of the VM. + """ + + instance: ItemHash diff --git a/src/aleph/vm/orchestrator/run.py b/src/aleph/vm/orchestrator/run.py new file mode 100644 index 000000000..f82a8ae17 --- /dev/null +++ b/src/aleph/vm/orchestrator/run.py @@ -0,0 +1,287 @@ +import asyncio +import logging +from typing import Any + +import msgpack +from aiohttp import ClientResponseError, web +from aiohttp.web_exceptions import ( + HTTPBadGateway, + HTTPBadRequest, + HTTPInternalServerError, +) +from aleph_message.models import ItemHash +from msgpack import UnpackValueError +from multidict import CIMultiDict + +from aleph.vm.conf import settings +from aleph.vm.controllers.firecracker.program import ( + FileTooLargeError, + ResourceDownloadError, + VmSetupError, +) +from aleph.vm.hypervisors.firecracker.microvm import MicroVMFailedInitError +from aleph.vm.models import VmExecution +from aleph.vm.pool import VmPool +from aleph.vm.utils import HostNotFoundError + +from .messages import load_updated_message +from .pubsub import PubSub + +logger = logging.getLogger(__name__) + + +async def build_asgi_scope(path: str, request: web.Request) -> dict[str, Any]: + # ASGI mandates lowercase header names + headers = tuple((name.lower(), value) for name, value in request.raw_headers) + return { + "type": "http", + "path": path, + "method": request.method, + "query_string": request.query_string, + "headers": headers, + "body": await request.read(), + } + + +async def build_event_scope(event) -> dict[str, Any]: + """Build an ASGI scope for an event.""" + return { + "type": "aleph.message", + "body": event, + } + + +async def create_vm_execution(vm_hash: ItemHash, pool: VmPool, persistent: bool = False) -> VmExecution: + message, original_message = await load_updated_message(vm_hash) + pool.message_cache[vm_hash] = message + + logger.debug(f"Message: {message.json(indent=4, sort_keys=True, exclude_none=True)}") + + execution = await pool.create_a_vm( + vm_hash=vm_hash, + message=message.content, + original=original_message.content, + persistent=persistent, + ) + + return execution + + +async def create_vm_execution_or_raise_http_error(vm_hash: ItemHash, pool: VmPool) -> VmExecution: + try: + return await create_vm_execution(vm_hash=vm_hash, pool=pool) + except ResourceDownloadError as error: + logger.exception(error) + pool.forget_vm(vm_hash=vm_hash) + raise HTTPBadRequest(reason="Code, runtime or data not available") from error + except FileTooLargeError as error: + raise HTTPInternalServerError(reason=error.args[0]) from error + except VmSetupError as error: + logger.exception(error) + pool.forget_vm(vm_hash=vm_hash) + raise HTTPInternalServerError(reason="Error during vm initialisation") from error + except MicroVMFailedInitError as error: + logger.exception(error) + pool.forget_vm(vm_hash=vm_hash) + raise HTTPInternalServerError(reason="Error during runtime initialisation") from error + except HostNotFoundError as error: + logger.exception(error) + pool.forget_vm(vm_hash=vm_hash) + raise HTTPInternalServerError(reason="Host did not respond to ping") from error + except ClientResponseError as error: + logger.exception(error) + if error.status == 404: + raise HTTPInternalServerError(reason=f"Item hash {vm_hash} not found") from error + else: + raise HTTPInternalServerError(reason=f"Error downloading {vm_hash}") from error + except Exception as error: + logger.exception(error) + pool.forget_vm(vm_hash=vm_hash) + raise HTTPInternalServerError(reason="Unhandled error during initialisation") from error + + +async def run_code_on_request(vm_hash: ItemHash, path: str, pool: VmPool, request: web.Request) -> web.Response: + """ + Execute the code corresponding to the 'code id' in the path. + """ + + execution: VmExecution | None = pool.get_running_vm(vm_hash=vm_hash) + + # Prevent execution issues if the execution resources are empty + # TODO: Improve expiration process to avoid that kind of issues. + if execution and not execution.has_resources: + pool.forget_vm(execution.vm_hash) + execution = None + + if not execution: + execution = await create_vm_execution_or_raise_http_error(vm_hash=vm_hash, pool=pool) + + logger.debug(f"Using vm={execution.vm_id}") + + scope: dict = await build_asgi_scope(path, request) + + try: + await execution.becomes_ready() + result_raw: bytes = await execution.run_code(scope=scope) + + if result_raw == b"": + # Missing result from the init process of the virtual machine, not even an error message. + # It may have completely crashed. + + # Stop the virtual machine due to failing init. + # It will be restarted on a future request. + await execution.stop() + + return web.Response( + status=HTTPBadGateway.status_code, + reason="No response from VM", + text="VM did not respond and was shut down", + ) + + except asyncio.TimeoutError: + logger.warning(f"VM{execution.vm_id} did not respond within `resource.seconds`") + return web.HTTPGatewayTimeout(body="Program did not respond within `resource.seconds`") + except UnpackValueError as error: + logger.exception(error) + return web.Response(status=HTTPBadGateway.status_code, reason="Invalid response from VM") + + try: + result = msgpack.loads(result_raw, raw=False) + + logger.debug(f"Result from VM: <<<\n\n{str(result)[:1000]}\n\n>>>") + + if "traceback" in result: + # An error took place, the stacktrace of the error will be returned. + # TODO: Add an option for VM developers to prevent stacktraces from being exposed. + + # The Diagnostics VM checks for the proper handling of exceptions. + # This fills the logs with noisy stack traces, so we ignore this specific error. + ignored_errors = ['raise CustomError("Whoops")', "main.CustomError: Whoops"] + + if settings.IGNORE_TRACEBACK_FROM_DIAGNOSTICS and any( + ignored_error in result["traceback"] for ignored_error in ignored_errors + ): + logger.debug('Ignored traceback from CustomError("Whoops")') + else: + logger.warning(result["traceback"]) + + return web.Response( + status=HTTPInternalServerError.status_code, + reason="Error in VM execution", + body=result["traceback"], + content_type="text/plain", + ) + + # HTTP Headers require specific data structure + headers = CIMultiDict([(key.decode().lower(), value.decode()) for key, value in result["headers"]["headers"]]) + if "content-length" not in headers: + headers["Content-Length".lower()] = str(len(result["body"]["body"])) + for header in ["Content-Encoding", "Transfer-Encoding", "Vary"]: + if header in headers: + del headers[header] + + headers.update( + { + "Aleph-Program-ItemHash": execution.vm_hash, + "Aleph-Program-Code-Ref": execution.message.code.ref, + # "Aleph-Compute-Vm-Id": str(execution.vm.vm_id), + } + ) + + return web.Response( + status=result["headers"]["status"], + body=result["body"]["body"], + headers=headers, + ) + except UnpackValueError as error: + logger.exception(error) + return web.Response(status=HTTPBadGateway.status_code, reason="Invalid response from VM") + finally: + if settings.REUSE_TIMEOUT > 0: + if settings.WATCH_FOR_UPDATES: + execution.start_watching_for_updates(pubsub=request.app["pubsub"]) + _ = execution.stop_after_timeout(timeout=settings.REUSE_TIMEOUT) + else: + await execution.stop() + pool.forget_vm(execution.vm_hash) + + +async def run_code_on_event(vm_hash: ItemHash, event, pubsub: PubSub, pool: VmPool): + """ + Execute code in response to an event. + """ + + execution: VmExecution | None = pool.get_running_vm(vm_hash=vm_hash) + + if not execution: + execution = await create_vm_execution_or_raise_http_error(vm_hash=vm_hash, pool=pool) + + logger.debug(f"Using vm={execution.vm_id}") + + scope: dict = await build_event_scope(event) + + try: + await execution.becomes_ready() + result_raw: bytes = await execution.run_code(scope=scope) + except UnpackValueError as error: + logger.exception(error) + return web.Response(status=HTTPBadGateway.status_code, reason="Invalid response from VM") + + try: + result = msgpack.loads(result_raw, raw=False) + + logger.debug(f"Result from VM: <<<\n\n{str(result)[:1000]}\n\n>>>") + + if "traceback" in result: + logger.warning(result["traceback"]) + return web.Response( + status=HTTPInternalServerError.status_code, + reason="Error in VM execution", + body=result["traceback"], + content_type="text/plain", + ) + + logger.info(f"Result: {result['body']}") + return result["body"] + + except UnpackValueError as error: + logger.exception(error) + return web.Response(status=HTTPBadGateway.status_code, reason="Invalid response from VM") + finally: + if settings.REUSE_TIMEOUT > 0: + if settings.WATCH_FOR_UPDATES: + execution.start_watching_for_updates(pubsub=pubsub) + _ = execution.stop_after_timeout(timeout=settings.REUSE_TIMEOUT) + else: + await execution.stop() + + +async def start_persistent_vm(vm_hash: ItemHash, pubsub: PubSub | None, pool: VmPool) -> VmExecution: + execution: VmExecution | None = pool.get_running_vm(vm_hash=vm_hash) + + if not execution: + logger.info(f"Starting persistent virtual machine with id: {vm_hash}") + execution = await create_vm_execution(vm_hash=vm_hash, pool=pool, persistent=True) + else: + logger.info(f"{vm_hash} is already running") + + await execution.becomes_ready() + + # If the VM was already running in lambda mode, it should not expire + # as long as it is also scheduled as long-running + execution.cancel_expiration() + + if pubsub and settings.WATCH_FOR_UPDATES: + execution.start_watching_for_updates(pubsub=pubsub) + + return execution + + +async def stop_persistent_vm(vm_hash: ItemHash, pool: VmPool) -> VmExecution | None: + logger.info(f"Stopping persistent VM {vm_hash}") + execution = pool.get_running_vm(vm_hash) + + if execution: + await execution.stop() + + return execution diff --git a/src/aleph/vm/orchestrator/status.py b/src/aleph/vm/orchestrator/status.py new file mode 100644 index 000000000..07b21b33f --- /dev/null +++ b/src/aleph/vm/orchestrator/status.py @@ -0,0 +1,208 @@ +""" +Used to check that the example_fastapi program works as expected +in a deployed supervisor. +""" + +import logging +from typing import Any + +from aiohttp import ClientResponseError, ClientSession +from aiohttp.web_exceptions import HTTPBadGateway, HTTPInternalServerError, HTTPOk +from aleph_message.models import ItemHash + +from aleph.vm.conf import settings + +logger = logging.getLogger(__name__) + + +def assemble_vm_url(vm_id: ItemHash) -> str: + """Assemble the URL for a VM based on the host and port that the orchestrator is running on and the VM ID.""" + return f"http://{settings.SUPERVISOR_HOST}:{settings.SUPERVISOR_PORT}/vm/{vm_id}" + + +async def get_json_from_vm(session: ClientSession, vm_id: ItemHash, suffix: str) -> Any: + """Get JSON from a VM running locally.""" + vm_url = assemble_vm_url(vm_id) + url = f"{vm_url}{suffix}" + async with session.get(url) as resp: + resp.raise_for_status() + return await resp.json() + + +async def post_to_vm(session: ClientSession, vm_id: ItemHash, suffix: str, data: Any = None) -> Any: + """Post data to a VM running locally.""" + vm_url = assemble_vm_url(vm_id) + url = f"{vm_url}{suffix}" + async with session.post(url, json=data) as resp: + resp.raise_for_status() + return await resp.json() + + +async def check_index(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the index page of the VM is working.""" + try: + result: dict = await get_json_from_vm(session, vm_id, "/") + assert result["Example"] == "example_fastapi" + return True + except ClientResponseError: + return False + + +async def check_lifespan(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the lifespan endpoint of the VM is working.""" + try: + result: dict = await get_json_from_vm(session, vm_id, "/lifespan") + return result["Lifespan"] is True + except ClientResponseError: + return False + + +async def check_environ(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the environ endpoint of the VM returns the expected environment variables.""" + try: + result: dict = await get_json_from_vm(session, vm_id, "/environ") + assert "ALEPH_API_HOST" in result + assert "ALEPH_API_UNIX_SOCKET" in result + assert "ALEPH_REMOTE_CRYPTO_HOST" in result + assert "ALEPH_REMOTE_CRYPTO_UNIX_SOCKET" in result + assert "ALEPH_ADDRESS_TO_USE" in result + return True + except ClientResponseError: + return False + + +async def check_messages(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the messages endpoint of the VM returns a list of messages.""" + try: + result: dict = await get_json_from_vm(session, vm_id, "/messages") + assert "Messages" in result + assert "messages" in result["Messages"] + assert "item_hash" in result["Messages"]["messages"][0] + return True + except ClientResponseError: + return False + + +async def check_dns(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the DNS endpoint of the VM returns both IPv4 and IPv6 results.""" + try: + result: dict = await get_json_from_vm(session, vm_id, "/dns") + assert result["ipv4"] + assert result["ipv6"] + return True + except ClientResponseError: + return False + + +async def check_ipv4(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the VM has IPv4 connectivity.""" + try: + result: dict = await get_json_from_vm(session, vm_id, "/ip/4") + return result["result"] + except ClientResponseError: + return False + + +async def check_ipv6(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the VM has IPv6 connectivity.""" + try: + result: dict = await get_json_from_vm(session, vm_id, "/ip/6") + return result["result"] + except ClientResponseError: + return False + + +async def check_internet(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the VM has internet connectivity. This requires DNS, IP, HTTP and TLS to work.""" + try: + response: dict = await get_json_from_vm(session, vm_id, "/internet") + + # The diagnostic VM returns HTTP 200 with {"result": False} when cannot connect to the internet. + # else it forwards the return code if its own test endpoint. + return response.get("result") == HTTPOk.status_code + except ClientResponseError: + return False + + +async def check_cache(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the VM can set and get a value in its cache.""" + try: + result1: bool = await get_json_from_vm(session, vm_id, "/cache/set/a/42") + assert result1 is True + result2: int = await get_json_from_vm(session, vm_id, "/cache/get/a") + assert result2 == "42" + keys: list[str] = await get_json_from_vm(session, vm_id, "/cache/keys") + assert "a" in keys + return True + except ClientResponseError: + return False + + +async def check_persistent_storage(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the VM can set and get a value in its persistent storage.""" + try: + result: dict = await get_json_from_vm(session, vm_id, "/state/increment") + counter = result["counter"] + result_2: dict = await get_json_from_vm(session, vm_id, "/state/increment") + counter_2 = result_2["counter"] + # Use >= to handle potential concurrency + assert counter_2 >= counter + 1 + return True + except ClientResponseError: + return False + + +async def check_error_raised(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the VM can raise an error and return a traceback instead of crashing.""" + vm_url = assemble_vm_url(vm_id) + try: + async with session.get(f"{vm_url}/raise") as resp: + text = await resp.text() + return resp.status == HTTPInternalServerError.status_code and "Traceback" in text + except ClientResponseError: + return False + + +async def check_crash_and_restart(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that a crash in the VM would cause it to restart and work as expected.""" + # Crash the VM init. + vm_url = assemble_vm_url(vm_id) + async with session.get(f"{vm_url}/crash") as resp: + if resp.status != HTTPBadGateway.status_code: + return False + + # Try loading the index page. A new execution should be created. + try: + result: dict = await get_json_from_vm(session, vm_id, "/") + assert result["Example"] == "example_fastapi" + return True + + except ClientResponseError: + return False + + +async def check_get_a_message(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the VM can get a message from the aleph.im network.""" + try: + result: dict = await get_json_from_vm(session, vm_id, "/get_a_message") + return "item_hash" in result + except ClientResponseError: + return False + + +async def check_post_a_message(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the VM can post a message to the aleph.im network using a remote key present on the host.""" + try: + result: dict = await post_to_vm(session, vm_id, "/post_a_message") + return "item_hash" in result + except ClientResponseError: + return False + + +async def check_sign_a_message(session: ClientSession, vm_id: ItemHash) -> bool: + """Check that the VM can sign a message using a key local to the VM.""" + try: + result: dict = await post_to_vm(session, vm_id, "/sign_a_message") + return "item_hash" in result + except ClientResponseError: + return False diff --git a/src/aleph/vm/orchestrator/supervisor.py b/src/aleph/vm/orchestrator/supervisor.py new file mode 100644 index 000000000..ae6436291 --- /dev/null +++ b/src/aleph/vm/orchestrator/supervisor.py @@ -0,0 +1,198 @@ +""" +The VM Supervisor is in charge of executing code, starting and stopping VMs and provides +and API to launch these operations. + +At it's core, it is currently an asynchronous HTTP server using aiohttp, but this may +evolve in the future. +""" + +import asyncio +import logging +from collections.abc import Awaitable, Callable +from pathlib import Path +from secrets import token_urlsafe + +from aiohttp import web +from aiohttp_cors import ResourceOptions, setup + +from aleph.vm.conf import settings +from aleph.vm.pool import VmPool +from aleph.vm.sevclient import SevClient +from aleph.vm.version import __version__ + +from .resources import about_certificates, about_system_usage +from .tasks import ( + start_payment_monitoring_task, + start_watch_for_messages_task, + stop_balances_monitoring_task, + stop_watch_for_messages_task, +) +from .views import ( + about_config, + about_execution_records, + about_executions, + about_login, + list_executions, + notify_allocation, + run_code_from_hostname, + run_code_from_path, + status_check_fastapi, + status_check_fastapi_legacy, + status_check_host, + status_check_ipv6, + status_check_version, + status_public_config, + update_allocations, +) +from .views.operator import ( + operate_confidential_initialize, + operate_confidential_inject_secret, + operate_confidential_measurement, + operate_erase, + operate_expire, + operate_logs_json, + operate_reboot, + operate_stop, + stream_logs, +) + +logger = logging.getLogger(__name__) + + +@web.middleware +async def server_version_middleware( + request: web.Request, + handler: Callable[[web.Request], Awaitable[web.StreamResponse]], +) -> web.StreamResponse: + """Add the version of Aleph-VM in the HTTP headers of the responses.""" + resp: web.StreamResponse = await handler(request) + resp.headers.update( + {"Server": f"aleph-vm/{__version__}"}, + ) + return resp + + +async def http_not_found(request: web.Request): + """Return a 404 error for unknown URLs.""" + return web.HTTPNotFound() + + +def setup_webapp(): + app = web.Application(middlewares=[server_version_middleware]) + cors = setup( + app, + defaults={ + "*": ResourceOptions( + allow_credentials=True, + expose_headers="*", + allow_headers="*", + ) + }, + ) + + # Routes that need CORS enabled + cors_routes = [ + # /about APIs return information about the VM Orchestrator + web.get("/about/login", about_login), + web.get("/about/executions/list", list_executions), + web.get("/about/executions/details", about_executions), + web.get("/about/executions/records", about_execution_records), + web.get("/about/usage/system", about_system_usage), + web.get("/about/certificates", about_certificates), + web.get("/about/config", about_config), + # /control APIs are used to control the VMs and access their logs + web.post("/control/allocation/notify", notify_allocation), + web.get("/control/machine/{ref}/stream_logs", stream_logs), + web.get("/control/machine/{ref}/logs", operate_logs_json), + web.post("/control/machine/{ref}/expire", operate_expire), + web.post("/control/machine/{ref}/stop", operate_stop), + web.post("/control/machine/{ref}/erase", operate_erase), + web.post("/control/machine/{ref}/reboot", operate_reboot), + web.post("/control/machine/{ref}/confidential/initialize", operate_confidential_initialize), + web.get("/control/machine/{ref}/confidential/measurement", operate_confidential_measurement), + web.post("/control/machine/{ref}/confidential/inject_secret", operate_confidential_inject_secret), + # /status APIs are used to check that the VM Orchestrator is running properly + web.get("/status/check/fastapi", status_check_fastapi), + web.get("/status/check/fastapi/legacy", status_check_fastapi_legacy), + web.get("/status/check/host", status_check_host), + web.get("/status/check/version", status_check_version), + web.get("/status/check/ipv6", status_check_ipv6), + web.get("/status/config", status_public_config), + ] + routes = app.add_routes(cors_routes) + for route in routes: + cors.add(route) + + # Routes that don't need CORS enabled + other_routes = [ + # /control APIs are used to control the VMs and access their logs + web.post("/control/allocations", update_allocations), + # Raise an HTTP Error 404 if attempting to access an unknown URL within these paths. + web.get("/about/{suffix:.*}", http_not_found), + web.get("/control/{suffix:.*}", http_not_found), + web.get("/status/{suffix:.*}", http_not_found), + # /static is used to serve static files + web.static("/static", Path(__file__).parent / "views/static"), + # /vm is used to launch VMs on-demand + web.route("*", "/vm/{ref}{suffix:.*}", run_code_from_path), + web.route("*", "/{suffix:.*}", run_code_from_hostname), + ] + app.add_routes(other_routes) + return app + + +async def stop_all_vms(app: web.Application): + pool: VmPool = app["vm_pool"] + await pool.stop() + + +def run(): + """Run the VM Supervisor.""" + settings.check() + + loop = asyncio.new_event_loop() + pool = VmPool(loop) + pool.setup() + + hostname = settings.DOMAIN_NAME + protocol = "http" if hostname == "localhost" else "https" + + # Require a random token to access /about APIs + secret_token = token_urlsafe(nbytes=32) + app = setup_webapp() + # Store app singletons. Note that app["pubsub"] will also be created. + app["secret_token"] = secret_token + app["vm_pool"] = pool + + # Store sevctl app singleton only if confidential feature is enabled + if settings.ENABLE_CONFIDENTIAL_COMPUTING: + sev_client = SevClient(settings.CONFIDENTIAL_DIRECTORY, settings.SEV_CTL_PATH) + app["sev_client"] = sev_client + # TODO: Review and check sevctl first initialization steps, like (sevctl generate and sevctl provision) + + logger.info(f"Login to /about pages {protocol}://{hostname}/about/login?token={secret_token}") + + try: + if settings.WATCH_FOR_MESSAGES: + app.on_startup.append(start_watch_for_messages_task) + app.on_startup.append(start_payment_monitoring_task) + app.on_cleanup.append(stop_watch_for_messages_task) + app.on_cleanup.append(stop_balances_monitoring_task) + app.on_cleanup.append(stop_all_vms) + + logger.info("Loading existing executions ...") + asyncio.run(pool.load_persistent_executions()) + + logger.info(f"Starting the web server on http://{settings.SUPERVISOR_HOST}:{settings.SUPERVISOR_PORT}") + web.run_app(app, host=settings.SUPERVISOR_HOST, port=settings.SUPERVISOR_PORT) + except OSError as e: + if e.errno == 98: + logger.error( + f"Port {settings.SUPERVISOR_PORT} already in use. " + f"Please check that no other instance of Aleph-VM is running." + ) + else: + raise + finally: + if settings.ALLOW_VM_NETWORKING: + pool.teardown() diff --git a/src/aleph/vm/orchestrator/tasks.py b/src/aleph/vm/orchestrator/tasks.py new file mode 100644 index 000000000..2bf9c7413 --- /dev/null +++ b/src/aleph/vm/orchestrator/tasks.py @@ -0,0 +1,271 @@ +import asyncio +import json +import logging +import math +import time +from collections.abc import AsyncIterable +from decimal import Decimal +from typing import TypeVar + +import aiohttp +import pydantic +from aiohttp import web +from aleph_message.models import ( + AlephMessage, + PaymentType, + ProgramMessage, + parse_message, +) +from aleph_message.status import MessageStatus +from yarl import URL + +from aleph.vm.conf import settings +from aleph.vm.orchestrator.utils import ( + format_cost, + get_community_wallet_address, + is_after_community_wallet_start, +) +from aleph.vm.pool import VmPool +from aleph.vm.utils import create_task_log_exceptions + +from .messages import get_message_status +from .payment import ( + compute_required_balance, + compute_required_flow, + fetch_balance_of_address, + get_stream, +) +from .pubsub import PubSub +from .reactor import Reactor + +logger = logging.getLogger(__name__) + +Value = TypeVar("Value") +COMMUNITY_STREAM_RATIO = Decimal(0.2) + + +async def retry_generator(generator: AsyncIterable[Value], max_seconds: int = 8) -> AsyncIterable[Value]: + retry_delay = 0.1 + while True: + async for value in generator: + yield value + + await asyncio.sleep(retry_delay) + retry_delay = max(retry_delay * 2, max_seconds) + + +async def subscribe_via_ws(url) -> AsyncIterable[AlephMessage]: + logger.debug("subscribe_via_ws()") + async with aiohttp.ClientSession() as session: + async with session.ws_connect(url) as ws: + logger.debug(f"Websocket connected on {url}") + async for msg in ws: + if msg.type == aiohttp.WSMsgType.TEXT: + try: + data = json.loads(msg.data) + except json.JSONDecodeError: + logger.error( + f"Invalid JSON from websocket subscription {msg.data}", + exc_info=True, + ) + + # Chain confirmation messages are published in the WS subscription + # but do not contain the fields "item_type" or "content, hence they + # are not valid Messages. + if "item_type" not in data: + assert "content" not in data + assert "confirmation" in data + logger.info(f"Ignoring confirmation message '{data['item_hash']}'") + continue + + try: + yield parse_message(data) + except pydantic.error_wrappers.ValidationError as error: + item_hash = data.get("item_hash", "ITEM_HASH_NOT_FOUND") + logger.warning( + f"Invalid Aleph message: {item_hash} \n {error.json()}\n {error.raw_errors}", + exc_info=False, + ) + continue + except KeyError: + logger.exception( + f"Invalid Aleph message could not be parsed '{data}'", + exc_info=True, + ) + continue + except Exception: + logger.exception( + f"Unknown error when parsing Aleph message {data}", + exc_info=True, + ) + continue + elif msg.type == aiohttp.WSMsgType.ERROR: + break + + +async def watch_for_messages(dispatcher: PubSub, reactor: Reactor): + """Watch for new Aleph messages""" + logger.debug("watch_for_messages()") + url = URL(f"{settings.API_SERVER}/api/ws0/messages").with_query({"startDate": math.floor(time.time())}) + + async for message in retry_generator(subscribe_via_ws(url)): + # Dispatch update to running VMs + await dispatcher.publish(key=message.item_hash, value=message) + if hasattr(message.content, "ref") and message.content.ref: + await dispatcher.publish(key=message.content.ref, value=message) + + # Register new VM to run on future messages: + if isinstance(message, ProgramMessage): + if message.content.on.message: + reactor.register(message) + await reactor.trigger(message=message) + + +async def start_watch_for_messages_task(app: web.Application): + logger.debug("start_watch_for_messages_task()") + pubsub = PubSub() + pool: VmPool = app["vm_pool"] + reactor = Reactor(pubsub, pool) + + # Register an hardcoded initial program + # TODO: Register all programs with subscriptions + # sample_message, _ = await load_updated_message( + # ref=ItemHash("cad11970efe9b7478300fd04d7cc91c646ca0a792b9cc718650f86e1ccfac73e") + # ) + # if isinstance(sample_message, ProgramMessage): + # assert sample_message.content.on.message, sample_message + # reactor.register(sample_message) + + app["pubsub"] = pubsub + app["reactor"] = reactor + app["messages_listener"] = create_task_log_exceptions(watch_for_messages(pubsub, reactor)) + + +async def stop_watch_for_messages_task(app: web.Application): + app["messages_listener"].cancel() + try: + await app["messages_listener"] + except asyncio.CancelledError: + logger.debug("Task messages_listener is cancelled now") + + +async def monitor_payments(app: web.Application): + """Periodically checks and stops VMs if payment conditions are unmet, such as insufficient + wallet balance or payment stream coverage. Handles forgotten VMs, balance checks for the + "hold" tier, and stream flow validation for the "superfluid" tier to ensure compliance. + """ + pool: VmPool = app["vm_pool"] + while True: + await asyncio.sleep(settings.PAYMENT_MONITOR_INTERVAL) + # noinspection PyBroadException + try: + logger.debug("Monitoring balances task running") + await check_payment(pool) + logger.debug("Monitoring balances task ended") + except Exception as e: + # Catch all exceptions as to never stop the task. + logger.warning(f"check_payment failed {e}", exc_info=True) + + +async def check_payment(pool: VmPool): + """Ensures VMs are stopped if payment conditions are unmet, such as insufficient + funds in the wallet or inadequate payment stream coverage. Handles forgotten VMs + balance checks for the "hold" tier, and stream flow validation for the "superfluid" tier + stopping executions as needed to maintain compliance. + """ + # Check if the executions continues existing or are forgotten before checking the payment + # this is actually the main workflow for properly stopping PAYG instances, a user agent would stop the payment stream + # and forget the instance message. Compared to just stopping or decreasing the payment stream as the CRN don't know + # which VM it affects. + for vm_hash in list(pool.executions.keys()): + message_status = await get_message_status(vm_hash) + if message_status != MessageStatus.PROCESSED: + logger.debug(f"Stopping {vm_hash} execution due to {message_status} message status") + await pool.stop_vm(vm_hash) + pool.forget_vm(vm_hash) + + # Check if the balance held in the wallet is sufficient holder tier resources (Not do it yet) + for sender, chains in pool.get_executions_by_sender(payment_type=PaymentType.hold).items(): + for chain, executions in chains.items(): + executions = [execution for execution in executions if execution.is_confidential] + balance = await fetch_balance_of_address(sender) + + # Stop executions until the required balance is reached + required_balance = await compute_required_balance(executions) + logger.debug(f"Required balance for Sender {sender} executions: {required_balance}") + # Stop executions until the required balance is reached + while executions and balance < (required_balance + settings.PAYMENT_BUFFER): + last_execution = executions.pop(-1) + logger.debug(f"Stopping {last_execution} due to insufficient balance") + await pool.stop_vm(last_execution.vm_hash) + required_balance = await compute_required_balance(executions) + community_wallet = await get_community_wallet_address() + if not community_wallet: + logger.error("Monitor payment ERROR: No community wallet set. Cannot check community payment") + + # Check if the balance held in the wallet is sufficient stream tier resources + for sender, chains in pool.get_executions_by_sender(payment_type=PaymentType.superfluid).items(): + for chain, executions in chains.items(): + try: + stream = await get_stream(sender=sender, receiver=settings.PAYMENT_RECEIVER_ADDRESS, chain=chain) + + logger.debug( + f"Stream flow from {sender} to {settings.PAYMENT_RECEIVER_ADDRESS} = {stream} {chain.value}" + ) + except ValueError as error: + logger.error(f"Error found getting stream for chain {chain} and sender {sender}: {error}") + continue + try: + community_stream = await get_stream(sender=sender, receiver=community_wallet, chain=chain) + logger.debug(f"Stream flow from {sender} to {community_wallet} (community) : {stream} {chain}") + + except ValueError as error: + logger.error(f"Error found getting stream for chain {chain} and sender {sender}: {error}") + continue + + while executions: + executions_with_community = [ + execution + for execution in executions + if await is_after_community_wallet_start(execution.times.started_at) + ] + + required_stream = await compute_required_flow(executions_with_community) + executions_without_community = [ + execution + for execution in executions + if not await is_after_community_wallet_start(execution.times.started_at) + ] + logger.info("flow community %s", executions_with_community) + logger.info("flow without community %s", executions_without_community) + required_stream_without_community = await compute_required_flow(executions_without_community) + # TODO, rounding should be done per executions to not have the extra accumulate before rounding + required_crn_stream = format_cost( + required_stream * (1 - COMMUNITY_STREAM_RATIO) + required_stream_without_community + ) + required_community_stream = format_cost(required_stream * COMMUNITY_STREAM_RATIO) + logger.debug( + f"Stream for senders {sender} {len(executions)} executions. CRN : {stream} / {required_crn_stream}." + f"Community: {community_stream} / {required_community_stream}" + ) + # Can pay all executions + if (stream + settings.PAYMENT_BUFFER) > required_crn_stream and ( + community_stream + settings.PAYMENT_BUFFER + ) > required_community_stream: + break + # Stop executions until the required stream is reached + last_execution = executions.pop(-1) + logger.info(f"Stopping {last_execution} of {sender} due to insufficient stream") + await pool.stop_vm(last_execution.vm_hash) + + +async def start_payment_monitoring_task(app: web.Application): + app["payments_monitor"] = create_task_log_exceptions(monitor_payments(app), name="payment_monitor") + + +async def stop_balances_monitoring_task(app: web.Application): + app["payments_monitor"].cancel() + try: + await app["payments_monitor"] + except asyncio.CancelledError: + logger.debug("Task payments_monitor is cancelled now") diff --git a/src/aleph/vm/orchestrator/utils.py b/src/aleph/vm/orchestrator/utils.py new file mode 100644 index 000000000..0bd79ddf1 --- /dev/null +++ b/src/aleph/vm/orchestrator/utils.py @@ -0,0 +1,107 @@ +from datetime import datetime, timedelta, timezone +from decimal import ROUND_FLOOR, Decimal +from logging import getLogger +from typing import Any, TypedDict + +import aiohttp + +from aleph.vm.conf import settings + +logger = getLogger(__name__) + + +class AggregateSettingsDict(TypedDict): + compatible_gpus: list[Any] + community_wallet_address: str + community_wallet_timestamp: int + + +LAST_AGGREGATE_SETTINGS: AggregateSettingsDict | None = None +LAST_AGGREGATE_SETTINGS_FETCHED_AT: datetime | None = None +PRICE_PRECISION = 18 # Price precision + + +async def fetch_aggregate_settings() -> AggregateSettingsDict | None: + """ + Get the settings Aggregate dict from the PyAleph API Aggregate. + + API Endpoint: + GET /api/v0/aggregates/{address}.json?keys=settings + + For more details, see the PyAleph API documentation: + https://github.com/aleph-im/pyaleph/blob/master/src/aleph/web/controllers/routes.py#L62 + """ + async with aiohttp.ClientSession() as session: + url = f"{settings.API_SERVER}/api/v0/aggregates/{settings.SETTINGS_AGGREGATE_ADDRESS}.json?keys=settings" + logger.info(f"Fetching settings aggregate from {url}") + resp = await session.get(url) + + # Raise an error if the request failed + resp.raise_for_status() + + resp_data = await resp.json() + return resp_data["data"]["settings"] + + +async def update_aggregate_settings(): + global LAST_AGGREGATE_SETTINGS # noqa: PLW0603 + global LAST_AGGREGATE_SETTINGS_FETCHED_AT # noqa: PLW0603 + + if ( + not LAST_AGGREGATE_SETTINGS + or LAST_AGGREGATE_SETTINGS_FETCHED_AT + and datetime.now(tz=timezone.utc) - LAST_AGGREGATE_SETTINGS_FETCHED_AT > timedelta(minutes=1) + ): + try: + aggregate = await fetch_aggregate_settings() + LAST_AGGREGATE_SETTINGS = aggregate + LAST_AGGREGATE_SETTINGS_FETCHED_AT = datetime.now(tz=timezone.utc) + + except Exception: + logger.exception("Failed to fetch aggregate settings") + + +async def get_aggregate_settings() -> AggregateSettingsDict | None: + """The settings aggregate is a special aggregate used to share some common settings for VM setup + + Ensure the cached version is up to date and return it""" + await update_aggregate_settings() + + if not LAST_AGGREGATE_SETTINGS: + logger.error("No setting aggregate") + return LAST_AGGREGATE_SETTINGS + + +async def get_community_wallet_address() -> str | None: + setting_aggr = await get_aggregate_settings() + return setting_aggr and setting_aggr.get("community_wallet_address") + + +async def get_community_wallet_start() -> datetime: + """Community wallet start time. + + After this timestamp. New PAYG must include a payment to the community wallet""" + setting_aggr = await get_aggregate_settings() + if setting_aggr is None or "community_wallet_timestamp" not in setting_aggr: + return datetime.now(tz=timezone.utc) + timestamp = setting_aggr["community_wallet_timestamp"] + start_datetime = datetime.fromtimestamp(timestamp, tz=timezone.utc) + return start_datetime + + +async def is_after_community_wallet_start(dt: datetime | None = None) -> bool: + """Community wallet start time""" + if not dt: + dt = datetime.now(tz=timezone.utc) + start_dt = await get_community_wallet_start() + return dt > start_dt + + +def format_cost(v: Decimal | str, p: int = PRICE_PRECISION) -> Decimal: + return Decimal(v).quantize(Decimal(1) / Decimal(10**p), ROUND_FLOOR) + + +def get_compatible_gpus() -> list[Any]: + if not LAST_AGGREGATE_SETTINGS: + return [] + return LAST_AGGREGATE_SETTINGS["compatible_gpus"] diff --git a/src/aleph/vm/orchestrator/views/__init__.py b/src/aleph/vm/orchestrator/views/__init__.py new file mode 100644 index 000000000..54a3d452d --- /dev/null +++ b/src/aleph/vm/orchestrator/views/__init__.py @@ -0,0 +1,614 @@ +import binascii +import logging +from decimal import Decimal +from hashlib import sha256 +from json import JSONDecodeError +from packaging.version import InvalidVersion, Version +from pathlib import Path +from secrets import compare_digest +from string import Template + +import aiodns +import aiohttp +from aiohttp import web +from aiohttp.web_exceptions import HTTPBadRequest, HTTPNotFound +from aleph_message.exceptions import UnknownHashError +from aleph_message.models import ItemHash, MessageType, PaymentType +from pydantic import ValidationError + +from aleph.vm.conf import settings +from aleph.vm.controllers.firecracker.executable import ( + ResourceDownloadError, + VmSetupError, +) +from aleph.vm.controllers.firecracker.program import FileTooLargeError +from aleph.vm.hypervisors.firecracker.microvm import MicroVMFailedInitError +from aleph.vm.orchestrator import payment, status +from aleph.vm.orchestrator.chain import STREAM_CHAINS +from aleph.vm.orchestrator.custom_logs import set_vm_for_logging +from aleph.vm.orchestrator.messages import try_get_message +from aleph.vm.orchestrator.metrics import get_execution_records +from aleph.vm.orchestrator.payment import ( + InvalidAddressError, + InvalidChainError, + fetch_execution_flow_price, + get_stream, +) +from aleph.vm.orchestrator.pubsub import PubSub +from aleph.vm.orchestrator.resources import Allocation, VMNotification +from aleph.vm.orchestrator.run import run_code_on_request, start_persistent_vm +from aleph.vm.orchestrator.tasks import COMMUNITY_STREAM_RATIO +from aleph.vm.orchestrator.utils import ( + format_cost, + get_community_wallet_address, + is_after_community_wallet_start, + update_aggregate_settings, +) +from aleph.vm.orchestrator.views.host_status import ( + check_dns_ipv4, + check_dns_ipv6, + check_domain_resolution_ipv4, + check_domain_resolution_ipv6, + check_host_egress_ipv4, + check_host_egress_ipv6, +) +from aleph.vm.pool import VmPool +from aleph.vm.utils import ( + HostNotFoundError, + b32_to_b16, + cors_allow_all, + dumps_for_json, + get_ref_from_dns, +) +from aleph.vm.version import __version__ + +logger = logging.getLogger(__name__) + + +async def run_code_from_path(request: web.Request) -> web.Response: + """Allow running an Aleph VM function from a URL path + + The path is expected to follow the scheme defined in `app.add_routes` below, + where the identifier of the message is named `ref`. + """ + path = request.match_info["suffix"] + path = path if path.startswith("/") else f"/{path}" + + try: + message_ref = ItemHash(request.match_info["ref"]) + except UnknownHashError as e: + raise HTTPBadRequest( + reason="Invalid message reference", text=f"Invalid message reference: {request.match_info['ref']}" + ) from e + + pool: VmPool = request.app["vm_pool"] + with set_vm_for_logging(vm_hash=message_ref): + return await run_code_on_request(message_ref, path, pool, request) + + +async def run_code_from_hostname(request: web.Request) -> web.Response: + """Allow running an Aleph VM function from a hostname + + The first component of the hostname is used as identifier of the message defining the + Aleph VM function. + + Since hostname labels are limited to 63 characters and hex(sha256(...)) has a length of 64, + we expect the hash to be encoded in base32 instead of hexadecimal. Padding is added + automatically. + """ + if request.host.split(":")[0] == settings.DOMAIN_NAME and request.method == "GET" and request.path == "/": + # Serve the index page + return await index(request=request) + + path = request.match_info["suffix"] + path = path if path.startswith("/") else f"/{path}" + + message_ref_base32 = request.host.split(".")[0] + if settings.FAKE_DATA_PROGRAM: + message_ref = ItemHash("cafecafecafecafecafecafecafecafecafecafecafecafecafecafecafecafe") + else: + try: + message_ref = ItemHash(b32_to_b16(message_ref_base32).decode()) + logger.debug(f"Using base32 message id from hostname to obtain '{message_ref}") + except binascii.Error: + try: + message_ref = ItemHash(await get_ref_from_dns(domain=f"_aleph-id.{request.host}")) + logger.debug(f"Using DNS TXT record to obtain '{message_ref}'") + except aiodns.error.DNSError: + return HTTPNotFound(reason="Invalid message reference") + except UnknownHashError: + return HTTPNotFound(reason="Invalid message reference") + + pool = request.app["vm_pool"] + with set_vm_for_logging(vm_hash=message_ref): + return await run_code_on_request(message_ref, path, pool, request) + + +def authenticate_request(request: web.Request) -> None: + """Check that the token in the cookies matches the app's secret token.""" + if request.cookies.get("token") != request.app["secret_token"]: + raise web.HTTPUnauthorized(reason="Invalid token", text="401 Invalid token") + + +@cors_allow_all +async def about_login(request: web.Request) -> web.Response: + secret_token = request.app["secret_token"] + request_token = request.query.get("token") + + if request_token and secret_token and compare_digest(request_token, secret_token): + response = web.HTTPFound("/about/config") + response.cookies["token"] = request_token + return response + else: + return web.json_response({"success": False}, status=401) + + +@cors_allow_all +async def about_executions(request: web.Request) -> web.Response: + authenticate_request(request) + pool: VmPool = request.app["vm_pool"] + return web.json_response( + [dict(pool.executions.items())], + dumps=dumps_for_json, + ) + + +@cors_allow_all +async def list_executions(request: web.Request) -> web.Response: + pool: VmPool = request.app["vm_pool"] + return web.json_response( + { + item_hash: { + "networking": { + "ipv4": execution.vm.tap_interface.ip_network, + "ipv6": execution.vm.tap_interface.ipv6_network, + }, + } + for item_hash, execution in pool.executions.items() + if execution.is_running + }, + dumps=dumps_for_json, + ) + + +@cors_allow_all +async def about_config(request: web.Request) -> web.Response: + authenticate_request(request) + return web.json_response( + settings, + dumps=dumps_for_json, + ) + + +@cors_allow_all +async def about_execution_records(_: web.Request): + records = await get_execution_records() + return web.json_response(records, dumps=dumps_for_json) + + +async def index(request: web.Request): + assert request.method == "GET" + body = (Path(__file__).parent.absolute() / "templates/index.html").read_text() + s = Template(body) + body = s.substitute( + public_url=f"https://{settings.DOMAIN_NAME}/", + multiaddr_dns4=f"/dns4/{settings.DOMAIN_NAME}/tcp/443/https", + multiaddr_dns6=f"/dns6/{settings.DOMAIN_NAME}/tcp/443/https", + check_fastapi_vm_id=settings.CHECK_FASTAPI_VM_ID, + version=__version__, + ) + return web.Response(content_type="text/html", body=body) + + +@cors_allow_all +async def status_check_fastapi(request: web.Request, vm_id: ItemHash | None = None): + """Check that the FastAPI diagnostic VM runs correctly""" + + # Retro-compatibility mode ignores some of the newer checks. It is used to check the status of legacy VMs. + retro_compatibility: bool = ( + vm_id == settings.LEGACY_CHECK_FASTAPI_VM_ID + or request.rel_url.query.get("retro-compatibility", "false") == "true" + ) + # Default to the value in the settings. + fastapi_vm_id: ItemHash = vm_id or ItemHash(settings.CHECK_FASTAPI_VM_ID) + + try: + async with aiohttp.ClientSession() as session: + result = { + "index": await status.check_index(session, fastapi_vm_id), + "environ": await status.check_environ(session, fastapi_vm_id), + "messages": await status.check_messages(session, fastapi_vm_id), + # Using the remote account currently causes issues + # "post_a_message": await status.check_post_a_message(session, fastapi_vm_id), + # "sign_a_message": await status.check_sign_a_message(session, fastapi_vm_id), + "dns": await status.check_dns(session, fastapi_vm_id), + "ipv4": await status.check_ipv4(session, fastapi_vm_id), + "internet": await status.check_internet(session, fastapi_vm_id), + "cache": await status.check_cache(session, fastapi_vm_id), + "persistent_storage": await status.check_persistent_storage(session, fastapi_vm_id), + "error_handling": await status.check_error_raised(session, fastapi_vm_id), + } + if not retro_compatibility: + # These fields were added in the runtime running Debian 12. + result = result | { + "get_a_message": await status.check_get_a_message(session, fastapi_vm_id), + "lifespan": await status.check_lifespan(session, fastapi_vm_id), + # IPv6 requires extra work from node operators and is not required yet. + # "ipv6": await status.check_ipv6(session), + } + + return web.json_response(result, status=200 if all(result.values()) else 503) + except aiohttp.ServerDisconnectedError as error: + return web.json_response({"error": f"Server disconnected: {error}"}, status=503) + + +@cors_allow_all +async def status_check_fastapi_legacy(request: web.Request): + """Check that the legacy FastAPI VM runs correctly""" + return await status_check_fastapi(request, vm_id=ItemHash(settings.LEGACY_CHECK_FASTAPI_VM_ID)) + + +@cors_allow_all +async def status_check_host(request: web.Request): + """Check that the platform is supported and configured correctly""" + + result = { + "ipv4": { + "egress": await check_host_egress_ipv4(), + "dns": await check_dns_ipv4(), + "domain": await check_domain_resolution_ipv4(), + }, + "ipv6": { + "egress": await check_host_egress_ipv6(), + "dns": await check_dns_ipv6(), + "domain": await check_domain_resolution_ipv6(), + }, + } + result_status = 200 if all(result["ipv4"].values()) and all(result["ipv6"].values()) else 503 + return web.json_response(result, status=result_status) + + +@cors_allow_all +async def status_check_ipv6(request: web.Request): + """Check that the platform has IPv6 egress connectivity""" + timeout = aiohttp.ClientTimeout(total=2) + async with aiohttp.ClientSession(timeout=timeout) as session: + try: + vm_ipv6 = await status.check_ipv6(session, vm_id=ItemHash(settings.CHECK_FASTAPI_VM_ID)) + except TimeoutError: + vm_ipv6 = False + + result = {"host": await check_host_egress_ipv6(), "vm": vm_ipv6} + return web.json_response(result) + + +@cors_allow_all +async def status_check_version(request: web.Request): + """Check if the software is running a version equal or newer than the given one""" + reference_str: str | None = request.query.get("reference") + if not reference_str: + raise web.HTTPBadRequest(text="Query field '?reference=` must be specified") + try: + reference = Version(reference_str) + except InvalidVersion as error: + raise web.HTTPBadRequest(text=error.args[0]) from error + + try: + current = Version(__version__) + except InvalidVersion as error: + raise web.HTTPServiceUnavailable(text=error.args[0]) from error + + if current >= reference: + return web.Response( + status=200, + text=f"Up-to-date: version {current} >= {reference}", + ) + else: + return web.HTTPForbidden(text=f"Outdated: version {current} < {reference}") + + +@cors_allow_all +async def status_public_config(request: web.Request): + """Expose the public fields from the configuration""" + + available_payments = { + str(chain_name): chain_info for chain_name, chain_info in STREAM_CHAINS.items() if chain_info.active + } + + return web.json_response( + { + "DOMAIN_NAME": settings.DOMAIN_NAME, + "version": __version__, + "references": { + "API_SERVER": settings.API_SERVER, + "CHECK_FASTAPI_VM_ID": settings.CHECK_FASTAPI_VM_ID, + "CONNECTOR_URL": settings.CONNECTOR_URL, + }, + "security": { + "USE_JAILER": settings.USE_JAILER, + "PRINT_SYSTEM_LOGS": settings.PRINT_SYSTEM_LOGS, + "WATCH_FOR_UPDATES": settings.WATCH_FOR_UPDATES, + "ALLOW_VM_NETWORKING": settings.ALLOW_VM_NETWORKING, + "USE_DEVELOPER_SSH_KEYS": bool(settings.USE_DEVELOPER_SSH_KEYS), + }, + "networking": { + "IPV6_ADDRESS_POOL": settings.IPV6_ADDRESS_POOL, + "IPV6_ALLOCATION_POLICY": str(settings.IPV6_ALLOCATION_POLICY), + "IPV6_SUBNET_PREFIX": settings.IPV6_SUBNET_PREFIX, + "IPV6_FORWARDING_ENABLED": settings.IPV6_FORWARDING_ENABLED, + "USE_NDP_PROXY": settings.USE_NDP_PROXY, + }, + "debug": { + "SENTRY_DSN_CONFIGURED": bool(settings.SENTRY_DSN), + "DEBUG_ASYNCIO": settings.DEBUG_ASYNCIO, + "EXECUTION_LOG_ENABLED": settings.EXECUTION_LOG_ENABLED, + }, + "payment": { + "PAYMENT_RECEIVER_ADDRESS": settings.PAYMENT_RECEIVER_ADDRESS, + "AVAILABLE_PAYMENTS": available_payments, + "PAYMENT_MONITOR_INTERVAL": settings.PAYMENT_MONITOR_INTERVAL, + }, + "computing": { + "ENABLE_QEMU_SUPPORT": settings.ENABLE_QEMU_SUPPORT, + "INSTANCE_DEFAULT_HYPERVISOR": settings.INSTANCE_DEFAULT_HYPERVISOR, + "ENABLE_CONFIDENTIAL_COMPUTING": settings.ENABLE_CONFIDENTIAL_COMPUTING, + "ENABLE_GPU_SUPPORT": settings.ENABLE_GPU_SUPPORT, + }, + }, + dumps=dumps_for_json, + ) + + +def authenticate_api_request(request: web.Request) -> bool: + """Authenticate an API request to update the VM allocations.""" + signature: bytes = request.headers.get("X-Auth-Signature", "").encode() + + if not signature: + raise web.HTTPUnauthorized(text="Authentication token is missing") + + # Use a simple authentication method: the hash of the signature should match the value in the settings + return sha256(signature).hexdigest() == settings.ALLOCATION_TOKEN_HASH + + +async def update_allocations(request: web.Request): + """Main entry for the start of persistence VM and instance, called by the Scheduler, + + + auth via the SETTINGS.ALLOCATION_TOKEN_HASH sent in header X-Auth-Signature. + Receive a list of vm and instance that should be present and then match that state by stopping and launching VMs + """ + if not authenticate_api_request(request): + return web.HTTPUnauthorized(text="Authentication token received is invalid") + + try: + data = await request.json() + allocation = Allocation.parse_obj(data) + except ValidationError as error: + return web.json_response(text=error.json(), status=web.HTTPBadRequest.status_code) + + pubsub: PubSub = request.app["pubsub"] + pool: VmPool = request.app["vm_pool"] + + # First free resources from persistent programs and instances that are not scheduled anymore. + allocations = allocation.persistent_vms | allocation.instances + # Make a copy since the pool is modified + for execution in list(pool.get_persistent_executions()): + if execution.vm_hash not in allocations and execution.is_running and not execution.uses_payment_stream: + vm_type = "instance" if execution.is_instance else "persistent program" + logger.info("Stopping %s %s", vm_type, execution.vm_hash) + await pool.stop_vm(execution.vm_hash) + pool.forget_vm(execution.vm_hash) + + # Second start persistent VMs and instances sequentially to limit resource usage. + + # Exceptions that can be raised when starting a VM: + vm_creation_exceptions = ( + UnknownHashError, + ResourceDownloadError, + FileTooLargeError, + VmSetupError, + MicroVMFailedInitError, + HostNotFoundError, + HTTPNotFound, + ) + + scheduling_errors: dict[ItemHash, Exception] = {} + + # Schedule the start of persistent VMs: + for vm_hash in allocation.persistent_vms: + try: + logger.info(f"Starting long running VM '{vm_hash}'") + vm_hash = ItemHash(vm_hash) + await start_persistent_vm(vm_hash, pubsub, pool) + except vm_creation_exceptions as error: + logger.exception("Error while starting VM '%s': %s", vm_hash, error) + scheduling_errors[vm_hash] = error + except Exception as error: + # Handle unknown exception separately, to avoid leaking data + logger.exception("Unhandled Error while starting VM '%s': %s", vm_hash, error) + scheduling_errors[vm_hash] = Exception("Unhandled Error") + + # Schedule the start of instances: + for instance_hash in allocation.instances: + logger.info(f"Starting instance '{instance_hash}'") + instance_item_hash = ItemHash(instance_hash) + try: + await start_persistent_vm(instance_item_hash, pubsub, pool) + except vm_creation_exceptions as error: + logger.exception("Error while starting VM '%s': %s", instance_hash, error) + scheduling_errors[instance_item_hash] = error + except Exception as error: + # Handle unknown exception separately, to avoid leaking data + logger.exception("Unhandled Error while starting VM '%s': %s", instance_hash, error) + scheduling_errors[instance_hash] = Exception("Unhandled Error") + + # Log unsupported features + if allocation.on_demand_vms: + logger.warning("Not supported yet: 'allocation.on_demand_vms'") + if allocation.jobs: + logger.warning("Not supported yet: 'allocation.jobs'") + + failing = set(scheduling_errors.keys()) + successful = allocations - failing + + status_code: int + if not failing: + status_code = 200 # OK + elif not successful: + status_code = 503 # Service Unavailable + else: + status_code = 207 # Multi-Status + + return web.json_response( + data={ + "success": not failing, + "successful": list(successful), + "failing": list(failing), + "errors": {vm_hash: repr(error) for vm_hash, error in scheduling_errors.items()}, + }, + status=status_code, + ) + + +@cors_allow_all +async def notify_allocation(request: web.Request): + """Notify instance allocation, only used for Pay as you Go feature""" + await update_aggregate_settings() + try: + data = await request.json() + vm_notification = VMNotification.parse_obj(data) + except JSONDecodeError: + return web.HTTPBadRequest(reason="Body is not valid JSON") + except ValidationError as error: + return web.json_response(data=error.json(), status=web.HTTPBadRequest.status_code) + + pubsub: PubSub = request.app["pubsub"] + pool: VmPool = request.app["vm_pool"] + + item_hash: ItemHash = vm_notification.instance + message = await try_get_message(item_hash) + if message.type != MessageType.instance: + return web.HTTPBadRequest(reason="Message is not an instance") + + payment_type = message.content.payment and message.content.payment.type or PaymentType.hold + + is_confidential = message.content.environment.trusted_execution is not None + have_gpu = message.content.requirements and message.content.requirements.gpu is not None + + if payment_type == PaymentType.hold and (is_confidential or have_gpu): + # Log confidential and instances with GPU support + if is_confidential: + logger.debug(f"Confidential instance {item_hash} not using PAYG") + if have_gpu: + logger.debug(f"GPU Instance {item_hash} not using PAYG") + user_balance = await payment.fetch_balance_of_address(message.sender) + hold_price = await payment.fetch_execution_hold_price(item_hash) + logger.debug(f"Address {message.sender} Balance: {user_balance}, Price: {hold_price}") + if hold_price > user_balance: + return web.HTTPPaymentRequired( + reason="Insufficient balance", + text="Insufficient balance for this instance\n\n" + f"Required: {hold_price} token \n" + f"Current user balance: {user_balance}", + ) + elif payment_type == PaymentType.superfluid: + # Payment via PAYG + if message.content.payment.receiver != settings.PAYMENT_RECEIVER_ADDRESS: + return web.HTTPBadRequest(reason="Message is not for this instance") + + # Check that there is a payment stream for this instance + try: + active_flow: Decimal = await get_stream( + sender=message.sender, receiver=message.content.payment.receiver, chain=message.content.payment.chain + ) + except InvalidAddressError as error: + logger.warning(f"Invalid address {error}", exc_info=True) + return web.HTTPBadRequest(reason=f"Invalid address {error}") + except InvalidChainError as error: + logger.warning(f"Invalid chain {error}", exc_info=True) + return web.HTTPBadRequest(reason=f"Invalid Chain {error}") + + if not active_flow: + raise web.HTTPPaymentRequired(reason="Empty payment stream for this instance") + + required_flow: Decimal = await fetch_execution_flow_price(item_hash) + community_wallet = await get_community_wallet_address() + required_crn_stream: Decimal + required_community_stream: Decimal + if await is_after_community_wallet_start() and community_wallet: + required_crn_stream = format_cost(required_flow * (1 - COMMUNITY_STREAM_RATIO)) + required_community_stream = format_cost(required_flow * COMMUNITY_STREAM_RATIO) + else: # No community wallet payment + required_crn_stream = format_cost(required_flow) + required_community_stream = Decimal(0) + + if active_flow < (required_crn_stream - settings.PAYMENT_BUFFER): + active_flow_per_month = active_flow * 60 * 60 * 24 * (Decimal("30.41666666666923904761904784")) + required_flow_per_month = required_crn_stream * 60 * 60 * 24 * Decimal("30.41666666666923904761904784") + return web.HTTPPaymentRequired( + reason="Insufficient payment stream", + text="Insufficient payment stream for this instance\n\n" + f"Required: {required_flow_per_month} / month (flow = {required_crn_stream})\n" + f"Present: {active_flow_per_month} / month (flow = {active_flow})", + ) + + if community_wallet and required_community_stream: + community_flow: Decimal = await get_stream( + sender=message.sender, + receiver=community_wallet, + chain=message.content.payment.chain, + ) + if community_flow < (required_community_stream - settings.PAYMENT_BUFFER): + active_flow_per_month = community_flow * 60 * 60 * 24 * (Decimal("30.41666666666923904761904784")) + required_flow_per_month = ( + required_community_stream * 60 * 60 * 24 * Decimal("30.41666666666923904761904784") + ) + return web.HTTPPaymentRequired( + reason="Insufficient payment stream to community", + text="Insufficient payment stream for community \n\n" + f"Required: {required_flow_per_month} / month (flow = {required_community_stream})\n" + f"Present: {active_flow_per_month} / month (flow = {community_flow})\n" + f"Address: {community_wallet}", + ) + else: + return web.HTTPBadRequest(reason="Invalid payment method") + + # Exceptions that can be raised when starting a VM: + vm_creation_exceptions = ( + UnknownHashError, + ResourceDownloadError, + FileTooLargeError, + VmSetupError, + MicroVMFailedInitError, + HostNotFoundError, + ) + + scheduling_errors: dict[ItemHash, Exception] = {} + try: + logger.info(f"Starting persistent vm {item_hash} from notify_allocation") + await start_persistent_vm(item_hash, pubsub, pool) + successful = True + except vm_creation_exceptions as error: + logger.exception(error) + scheduling_errors[item_hash] = error + successful = False + + failing = set(scheduling_errors.keys()) + + status_code: int + if not failing: + status_code = 200 # OK + elif not successful: + status_code = 503 # Service Unavailable + else: + status_code = 207 # Multi-Status + + return web.json_response( + data={ + "success": not failing, + "successful": successful, + "failing": list(failing), + "errors": {vm_hash: repr(error) for vm_hash, error in scheduling_errors.items()}, + }, + status=status_code, + ) diff --git a/src/aleph/vm/orchestrator/views/authentication.py b/src/aleph/vm/orchestrator/views/authentication.py new file mode 100644 index 000000000..55ed624ef --- /dev/null +++ b/src/aleph/vm/orchestrator/views/authentication.py @@ -0,0 +1,297 @@ +"""Functions for authentications + +See /doc/operator_auth.md for the explanation of how the operator authentication works. + +Can be enabled on an endpoint using the @require_jwk_authentication decorator +""" + +# Keep datetime import as is as it allow patching in test +import datetime +import functools +import json +import logging +from collections.abc import Awaitable, Callable, Coroutine +from typing import Any, Literal + +import cryptography.exceptions +import pydantic +from aiohttp import web +from aleph_message.models import Chain +from eth_account import Account +from eth_account.messages import encode_defunct +from jwcrypto import jwk +from jwcrypto.jwa import JWA +from nacl.exceptions import BadSignatureError +from pydantic import BaseModel, ValidationError, root_validator, validator +from solathon.utils import verify_signature + +from aleph.vm.conf import settings + +logger = logging.getLogger(__name__) + + +def is_token_still_valid(datestr: str): + """ + Checks if a token has expired based on its expiry timestamp + """ + current_datetime = datetime.datetime.now(tz=datetime.timezone.utc) + expiry_datetime = datetime.datetime.fromisoformat(datestr.replace("Z", "+00:00")) + + return expiry_datetime > current_datetime + + +def verify_eth_wallet_signature(signature, message, address): + """ + Verifies a signature issued by a wallet + """ + enc_msg = encode_defunct(hexstr=message) + computed_address = Account.recover_message(enc_msg, signature=signature) + return computed_address.lower() == address.lower() + + +def check_wallet_signature_or_raise(address, chain, payload, signature): + if chain == Chain.SOL: + try: + verify_signature(address, signature, payload.hex()) + except BadSignatureError: + msg = "Invalid signature" + raise ValueError(msg) + elif chain == "ETH": + if not verify_eth_wallet_signature(signature, payload.hex(), address): + msg = "Invalid signature" + raise ValueError(msg) + else: + raise ValueError("Unsupported chain") + + +class SignedPubKeyPayload(BaseModel): + """This payload is signed by the wallet of the user to authorize an ephemeral key to act on his behalf.""" + + pubkey: dict[str, Any] + # {'pubkey': {'alg': 'ES256', 'crv': 'P-256', 'ext': True, 'key_ops': ['verify'], 'kty': 'EC', + # 'x': '4blJBYpltvQLFgRvLE-2H7dsMr5O0ImHkgOnjUbG2AU', 'y': '5VHnq_hUSogZBbVgsXMs0CjrVfMy4Pa3Uv2BEBqfrN4'} + # alg: Literal["ECDSA"] + address: str + expires: str + chain: Chain = Chain.ETH + + def check_chain(self, v: Chain): + if v not in (Chain.ETH, Chain.SOL): + raise ValueError("Chain not supported") + return v + + @property + def json_web_key(self) -> jwk.JWK: + """Return the ephemeral public key as Json Web Key""" + return jwk.JWK(**self.pubkey) + + +class SignedPubKeyHeader(BaseModel): + signature: bytes + payload: bytes + + @validator("signature") + def signature_must_be_hex(cls, v: bytes) -> bytes: + """Convert the signature from hexadecimal to bytes""" + return bytes.fromhex(v.removeprefix(b"0x").decode()) + + @validator("payload") + def payload_must_be_hex(cls, v: bytes) -> bytes: + """Convert the payload from hexadecimal to bytes""" + return bytes.fromhex(v.decode()) + + @root_validator(pre=False, skip_on_failure=True) + def check_expiry(cls, values) -> dict[str, bytes]: + """Check that the token has not expired""" + payload: bytes = values["payload"] + content = SignedPubKeyPayload.parse_raw(payload) + if not is_token_still_valid(content.expires): + msg = "Token expired" + raise ValueError(msg) + return values + + @root_validator(pre=False, skip_on_failure=True) + def check_signature(cls, values) -> dict[str, bytes]: + """Check that the signature is valid""" + signature: list = values["signature"] + payload: bytes = values["payload"] + content = SignedPubKeyPayload.parse_raw(payload) + check_wallet_signature_or_raise(content.address, content.chain, payload, signature) + return values + + @property + def content(self) -> SignedPubKeyPayload: + """Return the content of the header""" + return SignedPubKeyPayload.parse_raw(self.payload) + + +class SignedOperationPayload(BaseModel): + time: datetime.datetime + method: Literal["POST"] | Literal["GET"] + domain: str + path: str + # body_sha256: str # disabled since there is no body + + @validator("time") + def time_is_current(cls, v: datetime.datetime) -> datetime.datetime: + """Check that the time is current and the payload is not a replay attack.""" + max_past = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(minutes=2) + max_future = datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(minutes=2) + if v < max_past: + msg = "Time is too far in the past" + raise ValueError(msg) + if v > max_future: + msg = "Time is too far in the future" + raise ValueError(msg) + return v + + +class SignedOperation(BaseModel): + """This payload is signed by the ephemeral key authorized above.""" + + signature: bytes + payload: bytes + + @validator("signature") + def signature_must_be_hex(cls, v) -> bytes: + """Convert the signature from hexadecimal to bytes""" + try: + return bytes.fromhex(v.removeprefix(b"0x").decode()) + except pydantic.ValidationError as error: + print(v) + logger.warning(v) + raise error + + @validator("payload") + def payload_must_be_hex(cls, v) -> bytes: + """Convert the payload from hexadecimal to bytes""" + v = bytes.fromhex(v.decode()) + _ = SignedOperationPayload.parse_raw(v) + return v + + @property + def content(self) -> SignedOperationPayload: + """Return the content of the header""" + return SignedOperationPayload.parse_raw(self.payload) + + +def get_signed_pubkey(request: web.Request) -> SignedPubKeyHeader: + """Get the ephemeral public key that is signed by the wallet from the request headers.""" + signed_pubkey_header = request.headers.get("X-SignedPubKey") + if not signed_pubkey_header: + raise web.HTTPBadRequest(reason="Missing X-SignedPubKey header") + + try: + return SignedPubKeyHeader.parse_raw(signed_pubkey_header) + except KeyError as error: + logger.debug(f"Missing X-SignedPubKey header: {error}") + raise web.HTTPBadRequest(reason="Invalid X-SignedPubKey fields") from error + except json.JSONDecodeError as error: + raise web.HTTPBadRequest(reason="Invalid X-SignedPubKey format") from error + except ValueError as errors: + logging.debug(errors) + for err in errors.args[0]: + if isinstance(err.exc, json.JSONDecodeError): + raise web.HTTPBadRequest(reason="Invalid X-SignedPubKey format") from errors + if str(err.exc) == "Token expired": + raise web.HTTPUnauthorized(reason="Token expired") from errors + if str(err.exc) == "Invalid signature": + raise web.HTTPUnauthorized(reason="Invalid signature") from errors + raise errors + + +def get_signed_operation(request: web.Request) -> SignedOperation: + """Get the signed operation public key that is signed by the ephemeral key from the request headers.""" + try: + signed_operation = request.headers["X-SignedOperation"] + return SignedOperation.parse_raw(signed_operation) + except KeyError as error: + raise web.HTTPBadRequest(reason="Missing X-SignedOperation header") from error + except json.JSONDecodeError as error: + raise web.HTTPBadRequest(reason="Invalid X-SignedOperation format") from error + except ValidationError as error: + logger.debug(f"Invalid X-SignedOperation fields: {error}") + raise web.HTTPBadRequest(reason="Invalid X-SignedOperation fields") from error + + +def verify_signed_operation(signed_operation: SignedOperation, signed_pubkey: SignedPubKeyHeader) -> str: + """Verify that the operation is signed by the ephemeral key authorized by the wallet.""" + pubkey = signed_pubkey.content.json_web_key + + try: + JWA.signing_alg("ES256").verify(pubkey, signed_operation.payload, signed_operation.signature) + logger.debug("Signature verified") + return signed_pubkey.content.address + except cryptography.exceptions.InvalidSignature as e: + logger.debug("Failing to validate signature for operation", e) + raise web.HTTPUnauthorized(reason="Signature could not verified") + + +async def authenticate_jwk(request: web.Request) -> str: + """Authenticate a request using the X-SignedPubKey and X-SignedOperation headers.""" + signed_pubkey = get_signed_pubkey(request) + + signed_operation = get_signed_operation(request) + if signed_operation.content.domain != settings.DOMAIN_NAME: + logger.debug(f"Invalid domain '{signed_operation.content.domain}' != '{settings.DOMAIN_NAME}'") + raise web.HTTPUnauthorized(reason="Invalid domain") + if signed_operation.content.path != request.path: + logger.debug(f"Invalid path '{signed_operation.content.path}' != '{request.path}'") + raise web.HTTPUnauthorized(reason="Invalid path") + if signed_operation.content.method != request.method: + logger.debug(f"Invalid method '{signed_operation.content.method}' != '{request.method}'") + raise web.HTTPUnauthorized(reason="Invalid method") + return verify_signed_operation(signed_operation, signed_pubkey) + + +async def authenticate_websocket_message(message) -> str: + """Authenticate a websocket message since JS cannot configure headers on WebSockets.""" + if not isinstance(message, dict): + raise Exception("Invalid format for auth packet, see /doc/operator_auth.md") + signed_pubkey = SignedPubKeyHeader.parse_obj(message["X-SignedPubKey"]) + signed_operation = SignedOperation.parse_obj(message["X-SignedOperation"]) + if signed_operation.content.domain != settings.DOMAIN_NAME: + logger.debug(f"Invalid domain '{signed_operation.content.domain}' != '{settings.DOMAIN_NAME}'") + raise web.HTTPUnauthorized(reason="Invalid domain") + return verify_signed_operation(signed_operation, signed_pubkey) + + +def require_jwk_authentication( + handler: Callable[[web.Request, str], Coroutine[Any, Any, web.StreamResponse]], +) -> Callable[[web.Request], Awaitable[web.StreamResponse]]: + """A decorator to enforce JWK-based authentication for HTTP requests. + + The decorator ensures that the incoming request includes valid authentication headers + (as per the VM owner authentication protocol) and provides the authenticated wallet address (`authenticated_sender`) + to the handler. The handler can then use this address to verify access to the requested resource. + + Args: + handler (Callable[[web.Request, str], Coroutine[Any, Any, web.StreamResponse]]): + The request handler function that will receive the `authenticated_sender` (the authenticated wallet address) + as an additional argument. + + Returns: + Callable[[web.Request], Awaitable[web.StreamResponse]]: + A wrapped handler that verifies the authentication and passes the wallet address to the handler. + + Note: + Refer to the "Authentication protocol for VM owner" documentation for detailed information on the authentication + headers and validation process. + """ + + @functools.wraps(handler) + async def wrapper(request): + try: + authenticated_sender: str = await authenticate_jwk(request) + except web.HTTPException as e: + return web.json_response(data={"error": e.reason}, status=e.status) + except Exception as e: + # Unexpected make sure to log it + logging.exception(e) + raise + + # authenticated_sender is the authenticate wallet address of the requester (as a string) + response = await handler(request, authenticated_sender) + return response + + return wrapper diff --git a/src/aleph/vm/orchestrator/views/host_status.py b/src/aleph/vm/orchestrator/views/host_status.py new file mode 100644 index 000000000..605de9b62 --- /dev/null +++ b/src/aleph/vm/orchestrator/views/host_status.py @@ -0,0 +1,108 @@ +import logging +import socket +from collections.abc import Awaitable, Callable +from typing import Any + +import aiohttp + +from aleph.vm.conf import settings + +logger = logging.getLogger(__name__) + + +def return_false_on_timeout(func: Callable[..., Awaitable[Any]]) -> Callable[..., Awaitable[bool]]: + async def wrapper(*args: Any, **kwargs: Any) -> bool: + try: + return await func(*args, **kwargs) + except TimeoutError: + logger.warning(f"Timeout while checking {func.__name__}") + return False + + return wrapper + + +async def check_ip_connectivity(url: str, socket_family: socket.AddressFamily = socket.AF_UNSPEC) -> bool: + timeout = aiohttp.ClientTimeout(total=5) + async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(family=socket_family), timeout=timeout) as session: + try: + async with session.get(url) as resp: + # We expect the Quad9 endpoints to return a 404 error, but other endpoints may return a 200 + if resp.status not in (200, 404): + resp.raise_for_status() + return True + except aiohttp.ClientConnectorError: + return False + + +@return_false_on_timeout +async def check_host_egress_ipv4() -> bool: + """Check if the host has IPv4 connectivity.""" + return await check_ip_connectivity(settings.CONNECTIVITY_IPV4_URL) + + +@return_false_on_timeout +async def check_host_egress_ipv6() -> bool: + """Check if the host has IPv6 connectivity.""" + return await check_ip_connectivity(settings.CONNECTIVITY_IPV6_URL) + + +async def resolve_dns(hostname: str) -> tuple[str | None, str | None]: + """Resolve a hostname to an IPv4 and IPv6 address.""" + ipv4: str | None = None + ipv6: str | None = None + + info = socket.getaddrinfo(hostname, 80, proto=socket.IPPROTO_TCP) + if not info: + logger.error("DNS resolution failed") + + # Iterate over the results to find the IPv4 and IPv6 addresses they may not all be present. + # The function returns a list of 5-tuples with the following structure: + # (family, type, proto, canonname, sockaddr) + for info_tuple in info: + if info_tuple[0] == socket.AF_INET: + ipv4 = info_tuple[4][0] + elif info_tuple[0] == socket.AF_INET6: + ipv6 = info_tuple[4][0] + + if ipv4 and not ipv6: + logger.warning(f"DNS resolution for {hostname} returned only an IPv4 address") + elif ipv6 and not ipv4: + logger.warning(f"DNS resolution for {hostname} returned only an IPv6 address") + + return ipv4, ipv6 + + +async def check_dns_ipv4() -> bool: + """Check if DNS resolution is working via IPv4.""" + ipv4, _ = await resolve_dns(settings.CONNECTIVITY_DNS_HOSTNAME) + return bool(ipv4) + + +async def check_dns_ipv6() -> bool: + """Check if DNS resolution is working via IPv6.""" + _, ipv6 = await resolve_dns(settings.CONNECTIVITY_DNS_HOSTNAME) + return bool(ipv6) + + +async def check_domain_resolution_ipv4() -> bool: + """Check if the host's hostname resolves to an IPv4 address.""" + ipv4, _ = await resolve_dns(settings.DOMAIN_NAME) + return bool(ipv4) + + +async def check_domain_resolution_ipv6() -> bool: + """Check if the host's hostname resolves to an IPv6 address.""" + _, ipv6 = await resolve_dns(settings.DOMAIN_NAME) + return bool(ipv6) + + +@return_false_on_timeout +async def check_domain_ipv4() -> bool: + """Check if the host's hostname is accessible via IPv4.""" + return await check_ip_connectivity(settings.DOMAIN_NAME, socket.AF_INET) + + +@return_false_on_timeout +async def check_domain_ipv6() -> bool: + """Check if the host's hostname is accessible via IPv6.""" + return await check_ip_connectivity(settings.DOMAIN_NAME, socket.AF_INET6) diff --git a/src/aleph/vm/orchestrator/views/operator.py b/src/aleph/vm/orchestrator/views/operator.py new file mode 100644 index 000000000..b808e94fe --- /dev/null +++ b/src/aleph/vm/orchestrator/views/operator.py @@ -0,0 +1,423 @@ +import json +import logging +from datetime import timedelta +from http import HTTPStatus + +import aiohttp.web_exceptions +import pydantic +from aiohttp import web +from aiohttp.web_urldispatcher import UrlMappingMatchInfo +from aleph_message.exceptions import UnknownHashError +from aleph_message.models import ItemHash +from aleph_message.models.execution import BaseExecutableContent +from pydantic import BaseModel + +from aleph.vm.conf import settings +from aleph.vm.controllers.qemu.client import QemuVmClient +from aleph.vm.models import VmExecution +from aleph.vm.orchestrator import metrics +from aleph.vm.orchestrator.custom_logs import set_vm_for_logging +from aleph.vm.orchestrator.run import create_vm_execution_or_raise_http_error +from aleph.vm.orchestrator.views.authentication import ( + authenticate_websocket_message, + require_jwk_authentication, +) +from aleph.vm.pool import VmPool +from aleph.vm.utils import ( + cors_allow_all, + dumps_for_json, + get_message_executable_content, +) +from aleph.vm.utils.logs import get_past_vm_logs + +logger = logging.getLogger(__name__) + + +def get_itemhash_or_400(match_info: UrlMappingMatchInfo) -> ItemHash: + try: + ref = match_info["ref"] + except KeyError as error: + raise aiohttp.web_exceptions.HTTPBadRequest(body="Missing field: 'ref'") from error + try: + return ItemHash(ref) + except UnknownHashError as error: + raise aiohttp.web_exceptions.HTTPBadRequest(body=f"Invalid ref: '{ref}'") from error + + +def get_execution_or_404(ref: ItemHash, pool: VmPool) -> VmExecution: + """Return the execution corresponding to the ref or raise an HTTP 404 error.""" + # TODO: Check if this should be execution.message.address or execution.message.content.address? + execution = pool.executions.get(ref) + if execution: + return execution + else: + raise web.HTTPNotFound(body=f"No virtual machine with ref {ref}") + + +def is_sender_authorized(authenticated_sender: str, message: BaseExecutableContent) -> bool: + if authenticated_sender.lower() == message.address.lower(): + return True + else: + logger.debug(f"Unauthorized sender {authenticated_sender} is not {message.address}") + return False + + +@cors_allow_all +async def stream_logs(request: web.Request) -> web.StreamResponse: + """Stream the logs of a VM. + + The authentication method is slightly different because browsers do not + allow Javascript to set headers in WebSocket requests. + """ + vm_hash = get_itemhash_or_400(request.match_info) + with set_vm_for_logging(vm_hash=vm_hash): + pool: VmPool = request.app["vm_pool"] + execution = get_execution_or_404(vm_hash, pool=pool) + + if execution.vm is None: + raise web.HTTPBadRequest(body=f"VM {vm_hash} is not running") + queue = None + try: + ws = web.WebSocketResponse() + logger.info(f"starting websocket: {request.path}") + await ws.prepare(request) + try: + await authenticate_websocket_for_vm_or_403(execution, vm_hash, ws) + await ws.send_json({"status": "connected"}) + + queue = execution.vm.get_log_queue() + + while True: + log_type, message = await queue.get() + assert log_type in ("stdout", "stderr") + logger.debug(message) + + await ws.send_json({"type": log_type, "message": message}) + queue.task_done() + + finally: + await ws.close() + logger.info(f"connection {ws} closed") + + finally: + if queue: + execution.vm.unregister_queue(queue) + + +@cors_allow_all +@require_jwk_authentication +async def operate_logs_json(request: web.Request, authenticated_sender: str) -> web.StreamResponse: + """Logs of a VM (not streaming) as json""" + vm_hash = get_itemhash_or_400(request.match_info) + with set_vm_for_logging(vm_hash=vm_hash): + # This endpoint allow logs for past executions, so we look into the database if any execution by that hash + # occurred, which we can then use to look for rights. We still check in the pool first, it is faster + pool: VmPool = request.app["vm_pool"] + execution = pool.executions.get(vm_hash) + if execution: + message = execution.message + else: + record = await metrics.get_last_record_for_vm(vm_hash=vm_hash) + if not record: + raise aiohttp.web_exceptions.HTTPNotFound(body="No execution found for this VM") + message = get_message_executable_content(json.loads(record.message)) + if not is_sender_authorized(authenticated_sender, message): + return web.Response(status=403, body="Unauthorized sender") + + _journal_stdout_name = f"vm-{vm_hash}-stdout" + _journal_stderr_name = f"vm-{vm_hash}-stderr" + + response = web.StreamResponse() + response.headers["Transfer-encoding"] = "chunked" + response.headers["Content-Type"] = "application/json" + await response.prepare(request) + await response.write(b"[") + + first = True + for entry in get_past_vm_logs(_journal_stdout_name, _journal_stderr_name): + if not first: + await response.write(b",\n") + first = False + log_type = "stdout" if entry["SYSLOG_IDENTIFIER"] == _journal_stdout_name else "stderr" + msg = { + "SYSLOG_IDENTIFIER": entry["SYSLOG_IDENTIFIER"], + "MESSAGE": entry["MESSAGE"], + "file": log_type, + "__REALTIME_TIMESTAMP": entry["__REALTIME_TIMESTAMP"], + } + await response.write(dumps_for_json(msg).encode()) + await response.write(b"]") + + await response.write_eof() + return response + + +async def authenticate_websocket_for_vm_or_403(execution: VmExecution, vm_hash: ItemHash, ws: web.WebSocketResponse): + """Authenticate a websocket connection. + + Web browsers do not allow setting headers in WebSocket requests, so the authentication + relies on the first message sent by the client. + """ + try: + first_message = await ws.receive_json() + except TypeError as error: + logging.exception(error) + await ws.send_json({"status": "failed", "reason": str(error)}) + raise web.HTTPForbidden(body="Invalid auth package") + credentials = first_message["auth"] + + try: + authenticated_sender = await authenticate_websocket_message(credentials) + + if is_sender_authorized(authenticated_sender, execution.message): + logger.debug(f"Accepted request to access logs by {authenticated_sender} on {vm_hash}") + return True + except Exception as error: + # Error occurred (invalid auth packet or other + await ws.send_json({"status": "failed", "reason": str(error)}) + raise web.HTTPForbidden(body="Unauthorized sender") + + # Auth was valid but not the correct user + logger.debug(f"Denied request to access logs by {authenticated_sender} on {vm_hash}") + await ws.send_json({"status": "failed", "reason": "unauthorized sender"}) + raise web.HTTPForbidden(body="Unauthorized sender") + + +@cors_allow_all +@require_jwk_authentication +async def operate_expire(request: web.Request, authenticated_sender: str) -> web.Response: + """Stop the virtual machine, smoothly if possible. + + A timeout may be specified to delay the action.""" + vm_hash = get_itemhash_or_400(request.match_info) + with set_vm_for_logging(vm_hash=vm_hash): + try: + timeout = float(ItemHash(request.match_info["timeout"])) + except (KeyError, ValueError) as error: + raise web.HTTPBadRequest(body="Invalid timeout duration") from error + if not 0 < timeout < timedelta(days=10).total_seconds(): + return web.HTTPBadRequest(body="Invalid timeout duration") + + pool: VmPool = request.app["vm_pool"] + execution = get_execution_or_404(vm_hash, pool=pool) + + if not is_sender_authorized(authenticated_sender, execution.message): + return web.Response(status=403, body="Unauthorized sender") + + logger.info(f"Expiring in {timeout} seconds: {execution.vm_hash}") + await execution.expire(timeout=timeout) + execution.persistent = False + + return web.Response(status=200, body=f"Expiring VM with ref {vm_hash} in {timeout} seconds") + + +@cors_allow_all +@require_jwk_authentication +async def operate_confidential_initialize(request: web.Request, authenticated_sender: str) -> web.Response: + """Start the confidential virtual machine if possible.""" + vm_hash = get_itemhash_or_400(request.match_info) + with set_vm_for_logging(vm_hash=vm_hash): + pool: VmPool = request.app["vm_pool"] + logger.debug(f"Iterating through running executions... {pool.executions}") + execution = get_execution_or_404(vm_hash, pool=pool) + + if not is_sender_authorized(authenticated_sender, execution.message): + return web.Response(status=403, body="Unauthorized sender") + + if execution.is_running: + return web.json_response( + {"code": "vm_running", "description": "Operation not allowed, instance already running"}, + status=HTTPStatus.BAD_REQUEST, + ) + if not execution.is_confidential: + return web.json_response( + {"code": "not_confidential", "description": "Instance is not a confidential instance"}, + status=HTTPStatus.BAD_REQUEST, + ) + + post = await request.post() + + vm_session_path = settings.CONFIDENTIAL_SESSION_DIRECTORY / vm_hash + vm_session_path.mkdir(exist_ok=True) + + session_file_content = post.get("session") + if not session_file_content: + return web.json_response( + {"code": "field_missing", "description": "Session field is missing"}, + status=HTTPStatus.BAD_REQUEST, + ) + + session_file_path = vm_session_path / "vm_session.b64" + session_file_path.write_bytes(session_file_content.file.read()) + + godh_file_content = post.get("godh") + if not godh_file_content: + return web.json_response( + {"code": "field_missing", "description": "godh field is missing. Please provide a GODH file"}, + status=HTTPStatus.BAD_REQUEST, + ) + + godh_file_path = vm_session_path / "vm_godh.b64" + godh_file_path.write_bytes(godh_file_content.file.read()) + + pool.systemd_manager.enable_and_start(execution.controller_service) + + return web.Response(status=200, body=f"Started VM with ref {vm_hash}") + + +@cors_allow_all +@require_jwk_authentication +async def operate_stop(request: web.Request, authenticated_sender: str) -> web.Response: + """Stop the virtual machine, smoothly if possible.""" + vm_hash = get_itemhash_or_400(request.match_info) + with set_vm_for_logging(vm_hash=vm_hash): + pool: VmPool = request.app["vm_pool"] + logger.debug(f"Iterating through running executions... {pool.executions}") + execution = get_execution_or_404(vm_hash, pool=pool) + + if not is_sender_authorized(authenticated_sender, execution.message): + return web.Response(status=403, body="Unauthorized sender") + + if not is_sender_authorized(authenticated_sender, execution.message): + return web.Response(status=403, body="Unauthorized sender") + + if execution.is_running: + logger.info(f"Stopping {execution.vm_hash}") + await pool.stop_vm(execution.vm_hash) + return web.Response(status=200, body=f"Stopped VM with ref {vm_hash}") + else: + return web.Response(status=200, body="Already stopped, nothing to do") + + +@cors_allow_all +@require_jwk_authentication +async def operate_reboot(request: web.Request, authenticated_sender: str) -> web.Response: + """ + Reboots the virtual machine, smoothly if possible. + """ + vm_hash = get_itemhash_or_400(request.match_info) + with set_vm_for_logging(vm_hash=vm_hash): + pool: VmPool = request.app["vm_pool"] + execution = get_execution_or_404(vm_hash, pool=pool) + + if not is_sender_authorized(authenticated_sender, execution.message): + return web.Response(status=403, body="Unauthorized sender") + + if execution.is_running: + logger.info(f"Rebooting {execution.vm_hash}") + if execution.persistent: + pool.systemd_manager.restart(execution.controller_service) + else: + await pool.stop_vm(vm_hash) + pool.forget_vm(vm_hash) + + await create_vm_execution_or_raise_http_error(vm_hash=vm_hash, pool=pool) + return web.Response(status=200, body=f"Rebooted VM with ref {vm_hash}") + else: + return web.Response(status=200, body=f"Starting VM (was not running) with ref {vm_hash}") + + +@cors_allow_all +@require_jwk_authentication +async def operate_confidential_measurement(request: web.Request, authenticated_sender) -> web.Response: + """ + Fetch the sev measurement for the VM + """ + vm_hash = get_itemhash_or_400(request.match_info) + with set_vm_for_logging(vm_hash=vm_hash): + pool: VmPool = request.app["vm_pool"] + execution = get_execution_or_404(vm_hash, pool=pool) + + if not is_sender_authorized(authenticated_sender, execution.message): + return web.Response(status=403, body="Unauthorized sender") + + if not execution.is_running: + raise web.HTTPForbidden(body="Operation not running") + vm_client = QemuVmClient(execution.vm) + vm_sev_info = vm_client.query_sev_info() + launch_measure = vm_client.query_launch_measure() + + return web.json_response( + data={"sev_info": vm_sev_info, "launch_measure": launch_measure}, + status=200, + dumps=dumps_for_json, + ) + + +class InjectSecretParams(BaseModel): + """ + packet_header: as base64 string + secret : encrypted secret table as base64 string + """ + + packet_header: str + secret: str + + +@cors_allow_all +@require_jwk_authentication +async def operate_confidential_inject_secret(request: web.Request, authenticated_sender) -> web.Response: + """ + Send secret to the VM and start it + """ + try: + data = await request.json() + params = InjectSecretParams.parse_obj(data) + except json.JSONDecodeError: + return web.HTTPBadRequest(reason="Body is not valid JSON") + except pydantic.ValidationError as error: + return web.json_response(data=error.json(), status=web.HTTPBadRequest.status_code) + + vm_hash = get_itemhash_or_400(request.match_info) + with set_vm_for_logging(vm_hash=vm_hash): + pool: VmPool = request.app["vm_pool"] + execution = get_execution_or_404(vm_hash, pool=pool) + if not is_sender_authorized(authenticated_sender, execution.message): + return web.Response(status=403, body="Unauthorized sender") + + # if not execution.is_running: + # raise web.HTTPForbidden(body="Operation not running") + vm_client = QemuVmClient(execution.vm) + vm_client.inject_secret(params.packet_header, params.secret) + vm_client.continue_execution() + + status = vm_client.query_status() + print(status["status"] != "running") + + return web.json_response( + data={"status": status}, + status=200, + dumps=dumps_for_json, + ) + + +@cors_allow_all +@require_jwk_authentication +async def operate_erase(request: web.Request, authenticated_sender: str) -> web.Response: + """Delete all data stored by a virtual machine. + Stop the virtual machine first if needed. + """ + vm_hash = get_itemhash_or_400(request.match_info) + with set_vm_for_logging(vm_hash=vm_hash): + pool: VmPool = request.app["vm_pool"] + execution = get_execution_or_404(vm_hash, pool=pool) + + if not is_sender_authorized(authenticated_sender, execution.message): + return web.Response(status=403, body="Unauthorized sender") + + logger.info(f"Erasing {execution.vm_hash}") + + # Stop the VM + await pool.stop_vm(execution.vm_hash) + if execution.vm_hash in pool.executions: + logger.warning(f"VM {execution.vm_hash} was not stopped properly, forgetting it anyway") + pool.forget_vm(execution.vm_hash) + + # Delete all data + if execution.resources is not None: + for volume in execution.resources.volumes: + if not volume.read_only: + logger.info(f"Deleting volume {volume.path_on_host}") + volume.path_on_host.unlink() + + return web.Response(status=200, body=f"Erased VM with ref {vm_hash}") diff --git a/src/aleph/vm/orchestrator/views/static/aleph-cloud-v1.svg b/src/aleph/vm/orchestrator/views/static/aleph-cloud-v1.svg new file mode 100644 index 000000000..c32715bdc --- /dev/null +++ b/src/aleph/vm/orchestrator/views/static/aleph-cloud-v1.svg @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/src/aleph/vm/orchestrator/views/static/aleph-cloud-v2.svg b/src/aleph/vm/orchestrator/views/static/aleph-cloud-v2.svg new file mode 100644 index 000000000..f6b39e621 --- /dev/null +++ b/src/aleph/vm/orchestrator/views/static/aleph-cloud-v2.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/src/aleph/vm/orchestrator/views/static/helpers.js b/src/aleph/vm/orchestrator/views/static/helpers.js new file mode 100644 index 000000000..f7e9925dd --- /dev/null +++ b/src/aleph/vm/orchestrator/views/static/helpers.js @@ -0,0 +1,169 @@ + +// Add optional "legacy" argument to this function +async function fetchFastapiCheckStatus (legacy = false) { + const path = legacy ? '/status/check/fastapi/legacy' : '/status/check/fastapi'; + const q = await fetch(path); + let res = { + status: q.status, + details: [] + } + if(q.ok){ + res.status = " is working properly ✅"; + } + else { + switch(Number(q.status)){ + case 503: + res.status = " is not working properly ❌"; + res.details = await q.json(); + break; + case 500: + res.status = " ❌ Failed"; + break; + default: + res.status = q.status; + } + } + + return res; +} + +async function fetchHostCheckStatus () { + const q = await fetch('/status/check/host'); + let res = { + status: q.status, + details: [] + } + if(q.ok){ + res.status = " is working properly ✅"; + } + else { + switch(Number(q.status)){ + case 503: + res.status = " is not working properly ❌"; + res.details = await q.json(); + break; + case 500: + res.status = " ❌ Failed"; + break; + default: + res.status = q.status; + } + } + + return res; +} + +async function fetchHostSystemUsage () { + const q = await fetch('/about/usage/system'); + let res = { + status: q.status, + details: [] + } + if(q.ok){ + const answer = await q.json(); + const gpu_devices = answer.gpu.devices; + if (gpu_devices.length <= 0) { + res.status = "No GPUs detected"; + }else{ + res.status = "
      "; + for (const gpu_device of gpu_devices){ + let compatible_str = " is compatible ✅"; + if (!gpu_device.compatible) { + compatible_str = " isn't compatible ❌"; + } + res.status += "
    • " + gpu_device.vendor + " | " + gpu_device.device_name + "" + compatible_str + "
    • "; + } + res.status += "
    "; + } + } + else { + switch(Number(q.status)){ + case 500: + res.status = "Getting Node usage failed ❌"; + break; + default: + res.status = q.status; + } + } + + return res; +} + +function objectToString (obj) { + return Object.entries(obj).reduce((acc, [k, v]) => acc + `
  • ${k}: ${v}
  • \n`, ''); +} + +const buildQueryParams = (params) => Object.entries(params).reduce((acc, [k, v]) => acc + `${k}=${v}&`, '?').slice(0, -1); + +const isLatestRelease = async () => { + const q = await fetch('https://api.github.com/repos/aleph-im/aleph-vm/releases/latest'); + if(q.ok){ + const res = await q.json(); + return res.tag_name + } + throw new Error('Failed to fetch latest release'); +} + +const buildMetricViewset = (metricsMsg, hostname, metricsResult) => { + const thisNode = metricsMsg.content.metrics.crn.find(node => node.url === hostname) + // Fixes a bug if a node has no metrics for the given timeframe + if(thisNode){ + const factory = keyName => ({ time: thisNode.measured_at, value: thisNode[keyName] * 100 }) + const keys = ['base_latency', 'base_latency_ipv4', 'diagnostic_vm_latency', 'full_check_latency'] + keys.map(key => metricsResult[key].push(factory(key))) + } +} + +async function* fetchLatestMetrics (hostname, fromDate) { + const defaultWindowSize = 50; + const API_URL = 'https://api2.aleph.im/api/v0/posts.json'; + + const data = { + base_latency: [], + base_latency_ipv4: [], + diagnostic_vm_latency: [], + full_check_latency: [], + } + + const qp = { + startDate: fromDate / 1000 | 0, + types: 'aleph-network-metrics', + pagination: defaultWindowSize + } + const count = await fetch(API_URL + buildQueryParams({...qp, pagination: 1})); + if(!count.ok) + throw new Error('Failed to fetch metrics'); + const countRes = await count.json(); + const totalDataPoints = countRes.pagination_total; + + if(totalDataPoints === 0) + throw new Error('No metrics found'); + + if(!countRes?.posts[0]?.content?.metrics?.crn?.find(node => node.url === hostname)) + throw new Error('Hostname not found in metrics'); + + + const totalPages = Math.ceil(totalDataPoints / qp.pagination); + let currentPage = 0; + let retries = 0; + const RETRY_THRESHOLD = 5; + + while(currentPage < totalPages){ + if(retries > RETRY_THRESHOLD) + throw new Error('Network error: too many retries') + + const q = await fetch(API_URL + buildQueryParams({...qp, page: currentPage + 1})); + if(q.ok){ + const res = await q.json(); + res.posts.map(post => buildMetricViewset(post, hostname, data)); + currentPage++; + yield { + progress: currentPage / totalPages, + data + }; + } + else{ + retries++; + } + } +} diff --git a/src/aleph/vm/orchestrator/views/static/lightweight-charts.standalone.production.js b/src/aleph/vm/orchestrator/views/static/lightweight-charts.standalone.production.js new file mode 100644 index 000000000..73c825b46 --- /dev/null +++ b/src/aleph/vm/orchestrator/views/static/lightweight-charts.standalone.production.js @@ -0,0 +1,7 @@ +/*! + * @license + * TradingView Lightweight Charts™ v4.1.1 + * Copyright (c) 2023 TradingView, Inc. + * Licensed under Apache License 2.0 https://www.apache.org/licenses/LICENSE-2.0 + */ +!function(){"use strict";const t={upColor:"#26a69a",downColor:"#ef5350",wickVisible:!0,borderVisible:!0,borderColor:"#378658",borderUpColor:"#26a69a",borderDownColor:"#ef5350",wickColor:"#737375",wickUpColor:"#26a69a",wickDownColor:"#ef5350"},i={upColor:"#26a69a",downColor:"#ef5350",openVisible:!0,thinBars:!0},n={color:"#2196f3",lineStyle:0,lineWidth:3,lineType:0,lineVisible:!0,crosshairMarkerVisible:!0,crosshairMarkerRadius:4,crosshairMarkerBorderColor:"",crosshairMarkerBorderWidth:2,crosshairMarkerBackgroundColor:"",lastPriceAnimation:0,pointMarkersVisible:!1},s={topColor:"rgba( 46, 220, 135, 0.4)",bottomColor:"rgba( 40, 221, 100, 0)",invertFilledArea:!1,lineColor:"#33D778",lineStyle:0,lineWidth:3,lineType:0,lineVisible:!0,crosshairMarkerVisible:!0,crosshairMarkerRadius:4,crosshairMarkerBorderColor:"",crosshairMarkerBorderWidth:2,crosshairMarkerBackgroundColor:"",lastPriceAnimation:0,pointMarkersVisible:!1},e={baseValue:{type:"price",price:0},topFillColor1:"rgba(38, 166, 154, 0.28)",topFillColor2:"rgba(38, 166, 154, 0.05)",topLineColor:"rgba(38, 166, 154, 1)",bottomFillColor1:"rgba(239, 83, 80, 0.05)",bottomFillColor2:"rgba(239, 83, 80, 0.28)",bottomLineColor:"rgba(239, 83, 80, 1)",lineWidth:3,lineStyle:0,lineType:0,lineVisible:!0,crosshairMarkerVisible:!0,crosshairMarkerRadius:4,crosshairMarkerBorderColor:"",crosshairMarkerBorderWidth:2,crosshairMarkerBackgroundColor:"",lastPriceAnimation:0,pointMarkersVisible:!1},r={color:"#26a69a",base:0},h={color:"#2196f3"},l={title:"",visible:!0,lastValueVisible:!0,priceLineVisible:!0,priceLineSource:0,priceLineWidth:1,priceLineColor:"",priceLineStyle:2,baseLineVisible:!0,baseLineWidth:1,baseLineColor:"#B2B5BE",baseLineStyle:0,priceFormat:{type:"price",precision:2,minMove:.01}};var a,o;function _(t,i){const n={0:[],1:[t.lineWidth,t.lineWidth],2:[2*t.lineWidth,2*t.lineWidth],3:[6*t.lineWidth,6*t.lineWidth],4:[t.lineWidth,4*t.lineWidth]}[i];t.setLineDash(n)}function u(t,i,n,s){t.beginPath();const e=t.lineWidth%2?.5:0;t.moveTo(n,i+e),t.lineTo(s,i+e),t.stroke()}function c(t,i){if(!t)throw new Error("Assertion failed"+(i?": "+i:""))}function d(t){if(void 0===t)throw new Error("Value is undefined");return t}function f(t){if(null===t)throw new Error("Value is null");return t}function v(t){return f(d(t))}!function(t){t[t.Simple=0]="Simple",t[t.WithSteps=1]="WithSteps",t[t.Curved=2]="Curved"}(a||(a={})),function(t){t[t.Solid=0]="Solid",t[t.Dotted=1]="Dotted",t[t.Dashed=2]="Dashed",t[t.LargeDashed=3]="LargeDashed",t[t.SparseDotted=4]="SparseDotted"}(o||(o={}));const p={khaki:"#f0e68c",azure:"#f0ffff",aliceblue:"#f0f8ff",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gainsboro:"#dcdcdc",gray:"#808080",green:"#008000",honeydew:"#f0fff0",floralwhite:"#fffaf0",lightblue:"#add8e6",lightcoral:"#f08080",lemonchiffon:"#fffacd",hotpink:"#ff69b4",lightyellow:"#ffffe0",greenyellow:"#adff2f",lightgoldenrodyellow:"#fafad2",limegreen:"#32cd32",linen:"#faf0e6",lightcyan:"#e0ffff",magenta:"#f0f",maroon:"#800000",olive:"#808000",orange:"#ffa500",oldlace:"#fdf5e6",mediumblue:"#0000cd",transparent:"#0000",lime:"#0f0",lightpink:"#ffb6c1",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",midnightblue:"#191970",orchid:"#da70d6",mediumorchid:"#ba55d3",mediumturquoise:"#48d1cc",orangered:"#ff4500",royalblue:"#4169e1",powderblue:"#b0e0e6",red:"#f00",coral:"#ff7f50",turquoise:"#40e0d0",white:"#fff",whitesmoke:"#f5f5f5",wheat:"#f5deb3",teal:"#008080",steelblue:"#4682b4",bisque:"#ffe4c4",aquamarine:"#7fffd4",aqua:"#0ff",sienna:"#a0522d",silver:"#c0c0c0",springgreen:"#00ff7f",antiquewhite:"#faebd7",burlywood:"#deb887",brown:"#a52a2a",beige:"#f5f5dc",chocolate:"#d2691e",chartreuse:"#7fff00",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cadetblue:"#5f9ea0",tomato:"#ff6347",fuchsia:"#f0f",blue:"#00f",salmon:"#fa8072",blanchedalmond:"#ffebcd",slateblue:"#6a5acd",slategray:"#708090",thistle:"#d8bfd8",tan:"#d2b48c",cyan:"#0ff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",blueviolet:"#8a2be2",black:"#000",darkmagenta:"#8b008b",darkslateblue:"#483d8b",darkkhaki:"#bdb76b",darkorchid:"#9932cc",darkorange:"#ff8c00",darkgreen:"#006400",darkred:"#8b0000",dodgerblue:"#1e90ff",darkslategray:"#2f4f4f",dimgray:"#696969",deepskyblue:"#00bfff",firebrick:"#b22222",forestgreen:"#228b22",indigo:"#4b0082",ivory:"#fffff0",lavenderblush:"#fff0f5",feldspar:"#d19275",indianred:"#cd5c5c",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightskyblue:"#87cefa",lightslategray:"#789",lightslateblue:"#8470ff",snow:"#fffafa",lightseagreen:"#20b2aa",lightsalmon:"#ffa07a",darksalmon:"#e9967a",darkviolet:"#9400d3",mediumpurple:"#9370d8",mediumaquamarine:"#66cdaa",skyblue:"#87ceeb",lavender:"#e6e6fa",lightsteelblue:"#b0c4de",mediumvioletred:"#c71585",mintcream:"#f5fffa",navajowhite:"#ffdead",navy:"#000080",olivedrab:"#6b8e23",palevioletred:"#d87093",violetred:"#d02090",yellow:"#ff0",yellowgreen:"#9acd32",lawngreen:"#7cfc00",pink:"#ffc0cb",paleturquoise:"#afeeee",palegoldenrod:"#eee8aa",darkolivegreen:"#556b2f",darkseagreen:"#8fbc8f",darkturquoise:"#00ced1",peachpuff:"#ffdab9",deeppink:"#ff1493",violet:"#ee82ee",palegreen:"#98fb98",mediumseagreen:"#3cb371",peru:"#cd853f",saddlebrown:"#8b4513",sandybrown:"#f4a460",rosybrown:"#bc8f8f",purple:"#800080",seagreen:"#2e8b57",seashell:"#fff5ee",papayawhip:"#ffefd5",mediumslateblue:"#7b68ee",plum:"#dda0dd",mediumspringgreen:"#00fa9a"};function m(t){return t<0?0:t>255?255:Math.round(t)||0}function b(t){return t<=0||t>0?t<0?0:t>1?1:Math.round(1e4*t)/1e4:0}const w=/^#([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?$/i,g=/^#([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?$/i,M=/^rgb\(\s*(-?\d{1,10})\s*,\s*(-?\d{1,10})\s*,\s*(-?\d{1,10})\s*\)$/,x=/^rgba\(\s*(-?\d{1,10})\s*,\s*(-?\d{1,10})\s*,\s*(-?\d{1,10})\s*,\s*(-?[\d]{0,10}(?:\.\d+)?)\s*\)$/;function S(t){(t=t.toLowerCase())in p&&(t=p[t]);{const i=x.exec(t)||M.exec(t);if(i)return[m(parseInt(i[1],10)),m(parseInt(i[2],10)),m(parseInt(i[3],10)),b(i.length<5?1:parseFloat(i[4]))]}{const i=g.exec(t);if(i)return[m(parseInt(i[1],16)),m(parseInt(i[2],16)),m(parseInt(i[3],16)),1]}{const i=w.exec(t);if(i)return[m(17*parseInt(i[1],16)),m(17*parseInt(i[2],16)),m(17*parseInt(i[3],16)),1]}throw new Error(`Cannot parse color: ${t}`)}function y(t){const i=S(t);return{t:`rgb(${i[0]}, ${i[1]}, ${i[2]})`,i:(n=i,.199*n[0]+.687*n[1]+.114*n[2]>160?"black":"white")};var n}class k{constructor(){this.h=[]}l(t,i,n){const s={o:t,_:i,u:!0===n};this.h.push(s)}v(t){const i=this.h.findIndex((i=>t===i.o));i>-1&&this.h.splice(i,1)}p(t){this.h=this.h.filter((i=>i._!==t))}m(t,i,n){const s=[...this.h];this.h=this.h.filter((t=>!t.u)),s.forEach((s=>s.o(t,i,n)))}M(){return this.h.length>0}S(){this.h=[]}}function C(t,...i){for(const n of i)for(const i in n)void 0!==n[i]&&("object"!=typeof n[i]||void 0===t[i]||Array.isArray(n[i])?t[i]=n[i]:C(t[i],n[i]));return t}function T(t){return"number"==typeof t&&isFinite(t)}function P(t){return"number"==typeof t&&t%1==0}function R(t){return"string"==typeof t}function D(t){return"boolean"==typeof t}function O(t){const i=t;if(!i||"object"!=typeof i)return i;let n,s,e;for(s in n=Array.isArray(i)?[]:{},i)i.hasOwnProperty(s)&&(e=i[s],n[s]=e&&"object"==typeof e?O(e):e);return n}function A(t){return null!==t}function B(t){return null===t?void 0:t}const V="-apple-system, BlinkMacSystemFont, 'Trebuchet MS', Roboto, Ubuntu, sans-serif";function z(t,i,n){return void 0===i&&(i=V),`${n=void 0!==n?`${n} `:""}${t}px ${i}`}class E{constructor(t){this.k={C:1,T:5,P:NaN,R:"",D:"",O:"",A:"",B:0,V:0,I:0,L:0,N:0},this.F=t}W(){const t=this.k,i=this.j(),n=this.H();return t.P===i&&t.D===n||(t.P=i,t.D=n,t.R=z(i,n),t.L=2.5/12*i,t.B=t.L,t.V=i/12*t.T,t.I=i/12*t.T,t.N=0),t.O=this.$(),t.A=this.U(),this.k}$(){return this.F.W().layout.textColor}U(){return this.F.q()}j(){return this.F.W().layout.fontSize}H(){return this.F.W().layout.fontFamily}}class I{constructor(){this.Y=[]}X(t){this.Y=t}K(t,i,n){this.Y.forEach((s=>{s.K(t,i,n)}))}}class L{K(t,i,n){t.useMediaCoordinateSpace((t=>this.Z(t,i,n)))}G(t,i,n){t.useMediaCoordinateSpace((t=>this.J(t,i,n)))}J(t,i,n){}}class N extends L{constructor(){super(...arguments),this.tt=null}it(t){this.tt=t}Z({context:t}){if(null===this.tt||null===this.tt.nt)return;const i=this.tt.nt,n=this.tt,s=s=>{t.beginPath();for(let e=i.to-1;e>=i.from;--e){const i=n.st[e];t.moveTo(i.et,i.rt),t.arc(i.et,i.rt,s,0,2*Math.PI)}t.fill()};n.ht>0&&(t.fillStyle=n.lt,s(n.ot+n.ht)),t.fillStyle=n._t,s(n.ot)}}function F(){return{st:[{et:0,rt:0,ut:0,ct:0}],_t:"",lt:"",ot:0,ht:0,nt:null}}const W={from:0,to:1};class j{constructor(t,i){this.dt=new I,this.ft=[],this.vt=[],this.bt=!0,this.F=t,this.wt=i,this.dt.X(this.ft)}gt(t){const i=this.F.Mt();i.length!==this.ft.length&&(this.vt=i.map(F),this.ft=this.vt.map((t=>{const i=new N;return i.it(t),i})),this.dt.X(this.ft)),this.bt=!0}xt(){return this.bt&&(this.St(),this.bt=!1),this.dt}St(){const t=2===this.wt.W().mode,i=this.F.Mt(),n=this.wt.yt(),s=this.F.kt();i.forEach(((i,e)=>{var r;const h=this.vt[e],l=i.Ct(n);if(t||null===l||!i.Tt())return void(h.nt=null);const a=f(i.Pt());h._t=l.Rt,h.ot=l.ot,h.ht=l.Dt,h.st[0].ct=l.ct,h.st[0].rt=i.At().Ot(l.ct,a.Bt),h.lt=null!==(r=l.Vt)&&void 0!==r?r:this.F.zt(h.st[0].rt/i.At().Et()),h.st[0].ut=n,h.st[0].et=s.It(n),h.nt=W}))}}class H{K(t,i,n){t.useBitmapCoordinateSpace((t=>this.Z(t,i,n)))}}class $ extends H{constructor(t){super(),this.Lt=t}Z({context:t,bitmapSize:i,horizontalPixelRatio:n,verticalPixelRatio:s}){if(null===this.Lt)return;const e=this.Lt.Nt.Tt,r=this.Lt.Ft.Tt;if(!e&&!r)return;const h=Math.round(this.Lt.et*n),l=Math.round(this.Lt.rt*s);t.lineCap="butt",e&&h>=0&&(t.lineWidth=Math.floor(this.Lt.Nt.ht*n),t.strokeStyle=this.Lt.Nt.O,t.fillStyle=this.Lt.Nt.O,_(t,this.Lt.Nt.Wt),function(t,i,n,s){t.beginPath();const e=t.lineWidth%2?.5:0;t.moveTo(i+e,n),t.lineTo(i+e,s),t.stroke()}(t,h,0,i.height)),r&&l>=0&&(t.lineWidth=Math.floor(this.Lt.Ft.ht*s),t.strokeStyle=this.Lt.Ft.O,t.fillStyle=this.Lt.Ft.O,_(t,this.Lt.Ft.Wt),u(t,l,0,i.width))}}class U{constructor(t){this.bt=!0,this.jt={Nt:{ht:1,Wt:0,O:"",Tt:!1},Ft:{ht:1,Wt:0,O:"",Tt:!1},et:0,rt:0},this.Ht=new $(this.jt),this.$t=t}gt(){this.bt=!0}xt(){return this.bt&&(this.St(),this.bt=!1),this.Ht}St(){const t=this.$t.Tt(),i=f(this.$t.Ut()),n=i.qt().W().crosshair,s=this.jt;if(2===n.mode)return s.Ft.Tt=!1,void(s.Nt.Tt=!1);s.Ft.Tt=t&&this.$t.Yt(i),s.Nt.Tt=t&&this.$t.Xt(),s.Ft.ht=n.horzLine.width,s.Ft.Wt=n.horzLine.style,s.Ft.O=n.horzLine.color,s.Nt.ht=n.vertLine.width,s.Nt.Wt=n.vertLine.style,s.Nt.O=n.vertLine.color,s.et=this.$t.Kt(),s.rt=this.$t.Zt()}}function q(t,i,n,s,e,r){t.fillRect(i+r,n,s-2*r,r),t.fillRect(i+r,n+e-r,s-2*r,r),t.fillRect(i,n,r,e),t.fillRect(i+s-r,n,r,e)}function Y(t,i,n,s,e,r){t.save(),t.globalCompositeOperation="copy",t.fillStyle=r,t.fillRect(i,n,s,e),t.restore()}function X(t,i){return t.map((t=>0===t?t:t+i))}function K(t,i,n,s,e,r){t.beginPath(),t.lineTo(i+s-r[1],n),0!==r[1]&&t.arcTo(i+s,n,i+s,n+r[1],r[1]),t.lineTo(i+s,n+e-r[2]),0!==r[2]&&t.arcTo(i+s,n+e,i+s-r[2],n+e,r[2]),t.lineTo(i+r[3],n+e),0!==r[3]&&t.arcTo(i,n+e,i,n+e-r[3],r[3]),t.lineTo(i,n+r[0]),0!==r[0]&&t.arcTo(i,n,i+r[0],n,r[0])}function Z(t,i,n,s,e,r,h=0,l=[0,0,0,0],a=""){if(t.save(),!h||!a||a===r)return K(t,i,n,s,e,l),t.fillStyle=r,t.fill(),void t.restore();const o=h/2;if("transparent"!==r){K(t,i+h,n+h,s-2*h,e-2*h,X(l,-h)),t.fillStyle=r,t.fill()}if("transparent"!==a){K(t,i+o,n+o,s-h,e-h,X(l,-o)),t.lineWidth=h,t.strokeStyle=a,t.closePath(),t.stroke()}t.restore()}function G(t,i,n,s,e,r,h){t.save(),t.globalCompositeOperation="copy";const l=t.createLinearGradient(0,0,0,e);l.addColorStop(0,r),l.addColorStop(1,h),t.fillStyle=l,t.fillRect(i,n,s,e),t.restore()}class J{constructor(t,i){this.it(t,i)}it(t,i){this.Lt=t,this.Gt=i}Et(t,i){return this.Lt.Tt?t.P+t.L+t.B:0}K(t,i,n,s){if(!this.Lt.Tt||0===this.Lt.Jt.length)return;const e=this.Lt.O,r=this.Gt.t,h=t.useBitmapCoordinateSpace((t=>{const h=t.context;h.font=i.R;const l=this.Qt(t,i,n,s),a=l.ti,o=(t,i)=>{l.ii?Z(h,a.ni,a.si,a.ei,a.ri,t,a.hi,[a.ot,0,0,a.ot],i):Z(h,a.li,a.si,a.ei,a.ri,t,a.hi,[0,a.ot,a.ot,0],i)};return o(r,"transparent"),this.Lt.ai&&(h.fillStyle=e,h.fillRect(a.li,a.oi,a._i-a.li,a.ui)),o("transparent",r),this.Lt.ci&&(h.fillStyle=i.A,h.fillRect(l.ii?a.di-a.hi:0,a.si,a.hi,a.fi-a.si)),l}));t.useMediaCoordinateSpace((({context:t})=>{const n=h.vi;t.font=i.R,t.textAlign=h.ii?"right":"left",t.textBaseline="middle",t.fillStyle=e,t.fillText(this.Lt.Jt,n.pi,(n.si+n.fi)/2+n.mi)}))}Qt(t,i,n,s){var e;const{context:r,bitmapSize:h,mediaSize:l,horizontalPixelRatio:a,verticalPixelRatio:o}=t,_=this.Lt.ai||!this.Lt.bi?i.T:0,u=this.Lt.wi?i.C:0,c=i.L+this.Gt.gi,d=i.B+this.Gt.Mi,f=i.V,v=i.I,p=this.Lt.Jt,m=i.P,b=n.xi(r,p),w=Math.ceil(n.Si(r,p)),g=m+c+d,M=i.C+f+v+w+_,x=Math.max(1,Math.floor(o));let S=Math.round(g*o);S%2!=x%2&&(S+=1);const y=u>0?Math.max(1,Math.floor(u*a)):0,k=Math.round(M*a),C=Math.round(_*a),T=null!==(e=this.Gt.yi)&&void 0!==e?e:this.Gt.ki,P=Math.round(T*o)-Math.floor(.5*o),R=Math.floor(P+x/2-S/2),D=R+S,O="right"===s,A=O?l.width-u:u,B=O?h.width-y:y;let V,z,E;return O?(V=B-k,z=B-C,E=A-_-f-u):(V=B+k,z=B+C,E=A+_+f),{ii:O,ti:{si:R,oi:P,fi:D,ei:k,ri:S,ot:2*a,hi:y,ni:V,li:B,_i:z,ui:x,di:h.width},vi:{si:R/o,fi:D/o,pi:E,mi:b}}}}class Q{constructor(t){this.Ci={ki:0,t:"#000",Mi:0,gi:0},this.Ti={Jt:"",Tt:!1,ai:!0,bi:!1,Vt:"",O:"#FFF",ci:!1,wi:!1},this.Pi={Jt:"",Tt:!1,ai:!1,bi:!0,Vt:"",O:"#FFF",ci:!0,wi:!0},this.bt=!0,this.Ri=new(t||J)(this.Ti,this.Ci),this.Di=new(t||J)(this.Pi,this.Ci)}Jt(){return this.Oi(),this.Ti.Jt}ki(){return this.Oi(),this.Ci.ki}gt(){this.bt=!0}Et(t,i=!1){return Math.max(this.Ri.Et(t,i),this.Di.Et(t,i))}Ai(){return this.Ci.yi||0}Bi(t){this.Ci.yi=t}Vi(){return this.Oi(),this.Ti.Tt||this.Pi.Tt}zi(){return this.Oi(),this.Ti.Tt}xt(t){return this.Oi(),this.Ti.ai=this.Ti.ai&&t.W().ticksVisible,this.Pi.ai=this.Pi.ai&&t.W().ticksVisible,this.Ri.it(this.Ti,this.Ci),this.Di.it(this.Pi,this.Ci),this.Ri}Ei(){return this.Oi(),this.Ri.it(this.Ti,this.Ci),this.Di.it(this.Pi,this.Ci),this.Di}Oi(){this.bt&&(this.Ti.ai=!0,this.Pi.ai=!1,this.Ii(this.Ti,this.Pi,this.Ci))}}class tt extends Q{constructor(t,i,n){super(),this.$t=t,this.Li=i,this.Ni=n}Ii(t,i,n){if(t.Tt=!1,2===this.$t.W().mode)return;const s=this.$t.W().horzLine;if(!s.labelVisible)return;const e=this.Li.Pt();if(!this.$t.Tt()||this.Li.Fi()||null===e)return;const r=y(s.labelBackgroundColor);n.t=r.t,t.O=r.i;const h=2/12*this.Li.P();n.gi=h,n.Mi=h;const l=this.Ni(this.Li);n.ki=l.ki,t.Jt=this.Li.Wi(l.ct,e),t.Tt=!0}}const it=/[1-9]/g;class nt{constructor(){this.Lt=null}it(t){this.Lt=t}K(t,i){if(null===this.Lt||!1===this.Lt.Tt||0===this.Lt.Jt.length)return;const n=t.useMediaCoordinateSpace((({context:t})=>(t.font=i.R,Math.round(i.ji.Si(t,f(this.Lt).Jt,it)))));if(n<=0)return;const s=i.Hi,e=n+2*s,r=e/2,h=this.Lt.$i;let l=this.Lt.ki,a=Math.floor(l-r)+.5;a<0?(l+=Math.abs(0-a),a=Math.floor(l-r)+.5):a+e>h&&(l-=Math.abs(h-(a+e)),a=Math.floor(l-r)+.5);const o=a+e,_=Math.ceil(0+i.C+i.T+i.L+i.P+i.B);t.useBitmapCoordinateSpace((({context:t,horizontalPixelRatio:n,verticalPixelRatio:s})=>{const e=f(this.Lt);t.fillStyle=e.t;const r=Math.round(a*n),h=Math.round(0*s),l=Math.round(o*n),u=Math.round(_*s),c=Math.round(2*n);if(t.beginPath(),t.moveTo(r,h),t.lineTo(r,u-c),t.arcTo(r,u,r+c,u,c),t.lineTo(l-c,u),t.arcTo(l,u,l,u-c,c),t.lineTo(l,h),t.fill(),e.ai){const r=Math.round(e.ki*n),l=h,a=Math.round((l+i.T)*s);t.fillStyle=e.O;const o=Math.max(1,Math.floor(n)),_=Math.floor(.5*n);t.fillRect(r-_,l,o,a-l)}})),t.useMediaCoordinateSpace((({context:t})=>{const n=f(this.Lt),e=0+i.C+i.T+i.L+i.P/2;t.font=i.R,t.textAlign="left",t.textBaseline="middle",t.fillStyle=n.O;const r=i.ji.xi(t,"Apr0");t.translate(a+s,e+r),t.fillText(n.Jt,0,0)}))}}class st{constructor(t,i,n){this.bt=!0,this.Ht=new nt,this.jt={Tt:!1,t:"#4c525e",O:"white",Jt:"",$i:0,ki:NaN,ai:!0},this.wt=t,this.Ui=i,this.Ni=n}gt(){this.bt=!0}xt(){return this.bt&&(this.St(),this.bt=!1),this.Ht.it(this.jt),this.Ht}St(){const t=this.jt;if(t.Tt=!1,2===this.wt.W().mode)return;const i=this.wt.W().vertLine;if(!i.labelVisible)return;const n=this.Ui.kt();if(n.Fi())return;t.$i=n.$i();const s=this.Ni();if(null===s)return;t.ki=s.ki;const e=n.qi(this.wt.yt());t.Jt=n.Yi(f(e)),t.Tt=!0;const r=y(i.labelBackgroundColor);t.t=r.t,t.O=r.i,t.ai=n.W().ticksVisible}}class et{constructor(){this.Xi=null,this.Ki=0}Zi(){return this.Ki}Gi(t){this.Ki=t}At(){return this.Xi}Ji(t){this.Xi=t}Qi(t){return[]}tn(){return[]}Tt(){return!0}}var rt;!function(t){t[t.Normal=0]="Normal",t[t.Magnet=1]="Magnet",t[t.Hidden=2]="Hidden"}(rt||(rt={}));class ht extends et{constructor(t,i){super(),this.nn=null,this.sn=NaN,this.en=0,this.rn=!0,this.hn=new Map,this.ln=!1,this.an=NaN,this.on=NaN,this._n=NaN,this.un=NaN,this.Ui=t,this.cn=i,this.dn=new j(t,this);this.fn=((t,i)=>n=>{const s=i(),e=t();if(n===f(this.nn).vn())return{ct:e,ki:s};{const t=f(n.Pt());return{ct:n.pn(s,t),ki:s}}})((()=>this.sn),(()=>this.on));const n=((t,i)=>()=>{const n=this.Ui.kt().mn(t()),s=i();return n&&Number.isFinite(s)?{ut:n,ki:s}:null})((()=>this.en),(()=>this.Kt()));this.bn=new st(this,t,n),this.wn=new U(this)}W(){return this.cn}gn(t,i){this._n=t,this.un=i}Mn(){this._n=NaN,this.un=NaN}xn(){return this._n}Sn(){return this.un}yn(t,i,n){this.ln||(this.ln=!0),this.rn=!0,this.kn(t,i,n)}yt(){return this.en}Kt(){return this.an}Zt(){return this.on}Tt(){return this.rn}Cn(){this.rn=!1,this.Tn(),this.sn=NaN,this.an=NaN,this.on=NaN,this.nn=null,this.Mn()}Pn(t){return null!==this.nn?[this.wn,this.dn]:[]}Yt(t){return t===this.nn&&this.cn.horzLine.visible}Xt(){return this.cn.vertLine.visible}Rn(t,i){this.rn&&this.nn===t||this.hn.clear();const n=[];return this.nn===t&&n.push(this.Dn(this.hn,i,this.fn)),n}tn(){return this.rn?[this.bn]:[]}Ut(){return this.nn}On(){this.wn.gt(),this.hn.forEach((t=>t.gt())),this.bn.gt(),this.dn.gt()}An(t){return t&&!t.vn().Fi()?t.vn():null}kn(t,i,n){this.Bn(t,i,n)&&this.On()}Bn(t,i,n){const s=this.an,e=this.on,r=this.sn,h=this.en,l=this.nn,a=this.An(n);this.en=t,this.an=isNaN(t)?NaN:this.Ui.kt().It(t),this.nn=n;const o=null!==a?a.Pt():null;return null!==a&&null!==o?(this.sn=i,this.on=a.Ot(i,o)):(this.sn=NaN,this.on=NaN),s!==this.an||e!==this.on||h!==this.en||r!==this.sn||l!==this.nn}Tn(){const t=this.Ui.Mt().map((t=>t.zn().Vn())).filter(A),i=0===t.length?null:Math.max(...t);this.en=null!==i?i:NaN}Dn(t,i,n){let s=t.get(i);return void 0===s&&(s=new tt(this,i,n),t.set(i,s)),s}}function lt(t){return"left"===t||"right"===t}class at{constructor(t){this.En=new Map,this.In=[],this.Ln=t}Nn(t,i){const n=function(t,i){return void 0===t?i:{Fn:Math.max(t.Fn,i.Fn),Wn:t.Wn||i.Wn}}(this.En.get(t),i);this.En.set(t,n)}jn(){return this.Ln}Hn(t){const i=this.En.get(t);return void 0===i?{Fn:this.Ln}:{Fn:Math.max(this.Ln,i.Fn),Wn:i.Wn}}$n(){this.Un(),this.In=[{qn:0}]}Yn(t){this.Un(),this.In=[{qn:1,Bt:t}]}Xn(t){this.Kn(),this.In.push({qn:5,Bt:t})}Un(){this.Kn(),this.In.push({qn:6})}Zn(){this.Un(),this.In=[{qn:4}]}Gn(t){this.Un(),this.In.push({qn:2,Bt:t})}Jn(t){this.Un(),this.In.push({qn:3,Bt:t})}Qn(){return this.In}ts(t){for(const i of t.In)this.ns(i);this.Ln=Math.max(this.Ln,t.Ln),t.En.forEach(((t,i)=>{this.Nn(i,t)}))}static ss(){return new at(2)}static es(){return new at(3)}ns(t){switch(t.qn){case 0:this.$n();break;case 1:this.Yn(t.Bt);break;case 2:this.Gn(t.Bt);break;case 3:this.Jn(t.Bt);break;case 4:this.Zn();break;case 5:this.Xn(t.Bt);break;case 6:this.Kn()}}Kn(){const t=this.In.findIndex((t=>5===t.qn));-1!==t&&this.In.splice(t,1)}}const ot=".";function _t(t,i){if(!T(t))return"n/a";if(!P(i))throw new TypeError("invalid length");if(i<0||i>16)throw new TypeError("invalid length");if(0===i)return t.toString();return("0000000000000000"+t.toString()).slice(-i)}class ut{constructor(t,i){if(i||(i=1),T(t)&&P(t)||(t=100),t<0)throw new TypeError("invalid base");this.Li=t,this.rs=i,this.hs()}format(t){const i=t<0?"−":"";return t=Math.abs(t),i+this.ls(t)}hs(){if(this.os=0,this.Li>0&&this.rs>0){let t=this.Li;for(;t>1;)t/=10,this.os++}}ls(t){const i=this.Li/this.rs;let n=Math.floor(t),s="";const e=void 0!==this.os?this.os:NaN;if(i>1){let r=+(Math.round(t*i)-n*i).toFixed(this.os);r>=i&&(r-=i,n+=1),s=ot+_t(+r.toFixed(this.os)*this.rs,e)}else n=Math.round(n*i)/i,e>0&&(s=ot+_t(0,e));return n.toFixed(0)+s}}class ct extends ut{constructor(t=100){super(t)}format(t){return`${super.format(t)}%`}}class dt{constructor(t){this._s=t}format(t){let i="";return t<0&&(i="-",t=-t),t<995?i+this.us(t):t<999995?i+this.us(t/1e3)+"K":t<999999995?(t=1e3*Math.round(t/1e3),i+this.us(t/1e6)+"M"):(t=1e6*Math.round(t/1e6),i+this.us(t/1e9)+"B")}us(t){let i;const n=Math.pow(10,this._s);return i=(t=Math.round(t*n)/n)>=1e-15&&t<1?t.toFixed(this._s).replace(/\.?0+$/,""):String(t),i.replace(/(\.[1-9]*)0+$/,((t,i)=>i))}}function ft(t,i,n,s,e,r,h){if(0===i.length||s.from>=i.length||s.to<=0)return;const{context:l,horizontalPixelRatio:a,verticalPixelRatio:o}=t,_=i[s.from];let u=r(t,_),c=_;if(s.to-s.from<2){const i=e/2;l.beginPath();const n={et:_.et-i,rt:_.rt},s={et:_.et+i,rt:_.rt};l.moveTo(n.et*a,n.rt*o),l.lineTo(s.et*a,s.rt*o),h(t,u,n,s)}else{const e=(i,n)=>{h(t,u,c,n),l.beginPath(),u=i,c=n};let d=c;l.beginPath(),l.moveTo(_.et*a,_.rt*o);for(let h=s.from+1;h=s.from;--n){const s=i[n];if(s){const i=e(t,s);i!==a&&(l.beginPath(),null!==a&&l.fill(),l.fillStyle=i,a=i);const n=Math.round(s.et*r)+o,u=s.rt*h;l.moveTo(n,u),l.arc(n,u,_,0,2*Math.PI)}}l.fill()}(t,i,l,n,o)}}class Pt extends Tt{Ds(t,i){return i._t}}function Rt(t,i,n,s,e=0,r=i.length){let h=r-e;for(;0>1,l=e+r;s(i[l],n)===t?(e=l+1,h-=r+1):h=r}return e}const Dt=Rt.bind(null,!0),Ot=Rt.bind(null,!1);function At(t,i){return t.ut0&&r=s&&(l=r-1),h>0&&hObject.assign(Object.assign({},t),this.Is.$s().Hs(t.ut))))}Us(){this.Es=null}Fs(){this.Bs&&(this.qs(),this.Bs=!1),this.Vs&&(this.js(),this.Vs=!1),this.As&&(this.Ys(),this.As=!1)}Ys(){const t=this.Is.At(),i=this.Ls.kt();if(this.Us(),i.Fi()||t.Fi())return;const n=i.Xs();if(null===n)return;if(0===this.Is.zn().Ks())return;const s=this.Is.Pt();null!==s&&(this.Es=Vt(this.zs,n,this.Ns),this.Zs(t,i,s.Bt),this.Gs())}}class Et extends zt{constructor(t,i){super(t,i,!0)}Zs(t,i,n){i.Js(this.zs,B(this.Es)),t.Qs(this.zs,n,B(this.Es))}te(t,i){return{ut:t,ct:i,et:NaN,rt:NaN}}qs(){const t=this.Is.$s();this.zs=this.Is.zn().ie().map((i=>{const n=i.Bt[3];return this.ne(i.se,n,t)}))}}class It extends Et{constructor(t,i){super(t,i),this.Ws=new I,this.ee=new kt,this.re=new Pt,this.Ws.X([this.ee,this.re])}ne(t,i,n){return Object.assign(Object.assign({},this.te(t,i)),n.Hs(t))}Gs(){const t=this.Is.W();this.ee.it({ds:t.lineType,st:this.zs,Wt:t.lineStyle,ht:t.lineWidth,fs:null,vs:t.invertFilledArea,nt:this.Es,cs:this.Ls.kt().he()}),this.re.it({ds:t.lineVisible?t.lineType:void 0,st:this.zs,Wt:t.lineStyle,ht:t.lineWidth,nt:this.Es,cs:this.Ls.kt().he(),Rs:t.pointMarkersVisible?t.pointMarkersRadius||t.lineWidth/2+2:void 0})}}class Lt extends H{constructor(){super(...arguments),this.Lt=null,this.le=0,this.ae=0}it(t){this.Lt=t}Z({context:t,horizontalPixelRatio:i,verticalPixelRatio:n}){if(null===this.Lt||0===this.Lt.zn.length||null===this.Lt.nt)return;if(this.le=this.oe(i),this.le>=2){Math.max(1,Math.floor(i))%2!=this.le%2&&this.le--}this.ae=this.Lt._e?Math.min(this.le,Math.floor(i)):this.le;let s=null;const e=this.ae<=this.le&&this.Lt.he>=Math.floor(1.5*i);for(let r=this.Lt.nt.from;rf+p-1&&(e=f+p-1,s=e-_+1),t.fillRect(i,s,o-i,e-s+1)}const i=a+m;let s=Math.max(f,Math.round(h.pe*n)-l),e=s+_-1;e>f+p-1&&(e=f+p-1,s=e-_+1),t.fillRect(u+1,s,i-u,e-s+1)}}}oe(t){const i=Math.floor(t);return Math.max(i,Math.floor(function(t,i){return Math.floor(.3*t*i)}(f(this.Lt).he,t)))}}class Nt extends zt{constructor(t,i){super(t,i,!1)}Zs(t,i,n){i.Js(this.zs,B(this.Es)),t.me(this.zs,n,B(this.Es))}be(t,i,n){return{ut:t,we:i.Bt[0],ge:i.Bt[1],Me:i.Bt[2],xe:i.Bt[3],et:NaN,ve:NaN,ce:NaN,de:NaN,pe:NaN}}qs(){const t=this.Is.$s();this.zs=this.Is.zn().ie().map((i=>this.ne(i.se,i,t)))}}class Ft extends Nt{constructor(){super(...arguments),this.Ws=new Lt}ne(t,i,n){return Object.assign(Object.assign({},this.be(t,i,n)),n.Hs(t))}Gs(){const t=this.Is.W();this.Ws.it({zn:this.zs,he:this.Ls.kt().he(),fe:t.openVisible,_e:t.thinBars,nt:this.Es})}}class Wt extends gt{constructor(){super(...arguments),this.Cs=new yt}ps(t,i){const n=this.tt;return this.Cs.bs(t,{gs:i.Se,Ms:i.ye,xs:i.ke,Ss:i.Ce,ys:t.bitmapSize.height,fs:n.fs})}}class jt extends Tt{constructor(){super(...arguments),this.Te=new yt}Ds(t,i){const n=this.tt;return this.Te.bs(t,{gs:i.Pe,Ms:i.Pe,xs:i.Re,Ss:i.Re,ys:t.bitmapSize.height,fs:n.fs})}}class Ht extends Et{constructor(t,i){super(t,i),this.Ws=new I,this.De=new Wt,this.Oe=new jt,this.Ws.X([this.De,this.Oe])}ne(t,i,n){return Object.assign(Object.assign({},this.te(t,i)),n.Hs(t))}Gs(){const t=this.Is.Pt();if(null===t)return;const i=this.Is.W(),n=this.Is.At().Ot(i.baseValue.price,t.Bt),s=this.Ls.kt().he();this.De.it({st:this.zs,ht:i.lineWidth,Wt:i.lineStyle,ds:i.lineType,fs:n,vs:!1,nt:this.Es,cs:s}),this.Oe.it({st:this.zs,ht:i.lineWidth,Wt:i.lineStyle,ds:i.lineVisible?i.lineType:void 0,Rs:i.pointMarkersVisible?i.pointMarkersRadius||i.lineWidth/2+2:void 0,fs:n,nt:this.Es,cs:s})}}class $t extends H{constructor(){super(...arguments),this.Lt=null,this.le=0}it(t){this.Lt=t}Z(t){if(null===this.Lt||0===this.Lt.zn.length||null===this.Lt.nt)return;const{horizontalPixelRatio:i}=t;if(this.le=function(t,i){if(t>=2.5&&t<=4)return Math.floor(3*i);const n=1-.2*Math.atan(Math.max(4,t)-4)/(.5*Math.PI),s=Math.floor(t*n*i),e=Math.floor(t*i),r=Math.min(s,e);return Math.max(Math.floor(i),r)}(this.Lt.he,i),this.le>=2){Math.floor(i)%2!=this.le%2&&this.le--}const n=this.Lt.zn;this.Lt.Ae&&this.Be(t,n,this.Lt.nt),this.Lt.ci&&this.Ve(t,n,this.Lt.nt);const s=this.ze(i);(!this.Lt.ci||this.le>2*s)&&this.Ee(t,n,this.Lt.nt)}Be(t,i,n){if(null===this.Lt)return;const{context:s,horizontalPixelRatio:e,verticalPixelRatio:r}=t;let h="",l=Math.min(Math.floor(e),Math.floor(this.Lt.he*e));l=Math.max(Math.floor(e),Math.min(l,this.le));const a=Math.floor(.5*l);let o=null;for(let t=n.from;t2*l)q(s,o,u,_-o+1,c-u+1,l);else{const t=_-o+1;s.fillRect(o,u,t,c-u+1)}a=_}}Ee(t,i,n){if(null===this.Lt)return;const{context:s,horizontalPixelRatio:e,verticalPixelRatio:r}=t;let h="";const l=this.ze(e);for(let t=n.from;to||s.fillRect(_,a,u-_+1,o-a+1)}}}class Ut extends Nt{constructor(){super(...arguments),this.Ws=new $t}ne(t,i,n){return Object.assign(Object.assign({},this.be(t,i,n)),n.Hs(t))}Gs(){const t=this.Is.W();this.Ws.it({zn:this.zs,he:this.Ls.kt().he(),Ae:t.wickVisible,ci:t.borderVisible,nt:this.Es})}}class qt{constructor(t,i){this.Ne=t,this.Li=i}K(t,i,n){this.Ne.draw(t,this.Li,i,n)}}class Yt extends zt{constructor(t,i,n){super(t,i,!1),this.wn=n,this.Ws=new qt(this.wn.renderer(),(i=>{const n=t.Pt();return null===n?null:t.At().Ot(i,n.Bt)}))}Fe(t){return this.wn.priceValueBuilder(t)}We(t){return this.wn.isWhitespace(t)}qs(){const t=this.Is.$s();this.zs=this.Is.zn().ie().map((i=>Object.assign(Object.assign({ut:i.se,et:NaN},t.Hs(i.se)),{je:i.He})))}Zs(t,i){i.Js(this.zs,B(this.Es))}Gs(){this.wn.update({bars:this.zs.map(Xt),barSpacing:this.Ls.kt().he(),visibleRange:this.Es},this.Is.W())}}function Xt(t){return{x:t.et,time:t.ut,originalData:t.je,barColor:t.ue}}class Kt extends H{constructor(){super(...arguments),this.Lt=null,this.$e=[]}it(t){this.Lt=t,this.$e=[]}Z({context:t,horizontalPixelRatio:i,verticalPixelRatio:n}){if(null===this.Lt||0===this.Lt.st.length||null===this.Lt.nt)return;this.$e.length||this.Ue(i);const s=Math.max(1,Math.floor(n)),e=Math.round(this.Lt.qe*n)-Math.floor(s/2),r=e+s;for(let i=this.Lt.nt.from;is.Xe?s.di=n.Os-i-1:n.Os=s.di+i+1))}let s=Math.ceil(this.Lt.he*t);for(let t=this.Lt.nt.from;t0&&s<4)for(let t=this.Lt.nt.from;ts&&(i.Ye>i.Xe?i.di-=1:i.Os+=1)}}}class Zt extends Et{constructor(){super(...arguments),this.Ws=new Kt}ne(t,i,n){return Object.assign(Object.assign({},this.te(t,i)),n.Hs(t))}Gs(){const t={st:this.zs,he:this.Ls.kt().he(),nt:this.Es,qe:this.Is.At().Ot(this.Is.W().base,f(this.Is.Pt()).Bt)};this.Ws.it(t)}}class Gt extends Et{constructor(){super(...arguments),this.Ws=new Pt}ne(t,i,n){return Object.assign(Object.assign({},this.te(t,i)),n.Hs(t))}Gs(){const t=this.Is.W(),i={st:this.zs,Wt:t.lineStyle,ds:t.lineVisible?t.lineType:void 0,ht:t.lineWidth,Rs:t.pointMarkersVisible?t.pointMarkersRadius||t.lineWidth/2+2:void 0,nt:this.Es,cs:this.Ls.kt().he()};this.Ws.it(i)}}const Jt=/[2-9]/g;class Qt{constructor(t=50){this.Ke=0,this.Ze=1,this.Ge=1,this.Je={},this.Qe=new Map,this.tr=t}ir(){this.Ke=0,this.Qe.clear(),this.Ze=1,this.Ge=1,this.Je={}}Si(t,i,n){return this.nr(t,i,n).width}xi(t,i,n){const s=this.nr(t,i,n);return((s.actualBoundingBoxAscent||0)-(s.actualBoundingBoxDescent||0))/2}nr(t,i,n){const s=n||Jt,e=String(i).replace(s,"0");if(this.Qe.has(e))return d(this.Qe.get(e)).sr;if(this.Ke===this.tr){const t=this.Je[this.Ge];delete this.Je[this.Ge],this.Qe.delete(t),this.Ge++,this.Ke--}t.save(),t.textBaseline="middle";const r=t.measureText(e);return t.restore(),0===r.width&&i.length||(this.Qe.set(e,{sr:r,er:this.Ze}),this.Je[this.Ze]=e,this.Ke++,this.Ze++),r}}class ti{constructor(t){this.rr=null,this.k=null,this.hr="right",this.lr=t}ar(t,i,n){this.rr=t,this.k=i,this.hr=n}K(t){null!==this.k&&null!==this.rr&&this.rr.K(t,this.k,this.lr,this.hr)}}class ii{constructor(t,i,n){this._r=t,this.lr=new Qt(50),this.ur=i,this.F=n,this.j=-1,this.Ht=new ti(this.lr)}xt(){const t=this.F.cr(this.ur);if(null===t)return null;const i=t.dr(this.ur)?t.vr():this.ur.At();if(null===i)return null;const n=t.pr(i);if("overlay"===n)return null;const s=this.F.mr();return s.P!==this.j&&(this.j=s.P,this.lr.ir()),this.Ht.ar(this._r.Ei(),s,n),this.Ht}}class ni extends H{constructor(){super(...arguments),this.Lt=null}it(t){this.Lt=t}br(t,i){var n;if(!(null===(n=this.Lt)||void 0===n?void 0:n.Tt))return null;const{rt:s,ht:e,wr:r}=this.Lt;return i>=s-e-7&&i<=s+e+7?{gr:this.Lt,wr:r}:null}Z({context:t,bitmapSize:i,horizontalPixelRatio:n,verticalPixelRatio:s}){if(null===this.Lt)return;if(!1===this.Lt.Tt)return;const e=Math.round(this.Lt.rt*s);e<0||e>i.height||(t.lineCap="butt",t.strokeStyle=this.Lt.O,t.lineWidth=Math.floor(this.Lt.ht*n),_(t,this.Lt.Wt),u(t,e,0,i.width))}}class si{constructor(t){this.Mr={rt:0,O:"rgba(0, 0, 0, 0)",ht:1,Wt:0,Tt:!1},this.Sr=new ni,this.bt=!0,this.Is=t,this.Ls=t.qt(),this.Sr.it(this.Mr)}gt(){this.bt=!0}xt(){return this.Is.Tt()?(this.bt&&(this.yr(),this.bt=!1),this.Sr):null}}class ei extends si{constructor(t){super(t)}yr(){this.Mr.Tt=!1;const t=this.Is.At(),i=t.kr().kr;if(2!==i&&3!==i)return;const n=this.Is.W();if(!n.baseLineVisible||!this.Is.Tt())return;const s=this.Is.Pt();null!==s&&(this.Mr.Tt=!0,this.Mr.rt=t.Ot(s.Bt,s.Bt),this.Mr.O=n.baseLineColor,this.Mr.ht=n.baseLineWidth,this.Mr.Wt=n.baseLineStyle)}}class ri extends H{constructor(){super(...arguments),this.Lt=null}it(t){this.Lt=t}He(){return this.Lt}Z({context:t,horizontalPixelRatio:i,verticalPixelRatio:n}){const s=this.Lt;if(null===s)return;const e=Math.max(1,Math.floor(i)),r=e%2/2,h=Math.round(s.Xe.x*i)+r,l=s.Xe.y*n;t.fillStyle=s.Cr,t.beginPath();const a=Math.max(2,1.5*s.Tr)*i;t.arc(h,l,a,0,2*Math.PI,!1),t.fill(),t.fillStyle=s.Pr,t.beginPath(),t.arc(h,l,s.ot*i,0,2*Math.PI,!1),t.fill(),t.lineWidth=e,t.strokeStyle=s.Rr,t.beginPath(),t.arc(h,l,s.ot*i+e/2,0,2*Math.PI,!1),t.stroke()}}const hi=[{Dr:0,Or:.25,Ar:4,Br:10,Vr:.25,zr:0,Er:.4,Ir:.8},{Dr:.25,Or:.525,Ar:10,Br:14,Vr:0,zr:0,Er:.8,Ir:0},{Dr:.525,Or:1,Ar:14,Br:14,Vr:0,zr:0,Er:0,Ir:0}];function li(t,i,n,s){return function(t,i){if("transparent"===t)return t;const n=S(t),s=n[3];return`rgba(${n[0]}, ${n[1]}, ${n[2]}, ${i*s})`}(t,n+(s-n)*i)}function ai(t,i){const n=t%2600/2600;let s;for(const t of hi)if(n>=t.Dr&&n<=t.Or){s=t;break}c(void 0!==s,"Last price animation internal logic error");const e=(n-s.Dr)/(s.Or-s.Dr);return{Pr:li(i,e,s.Vr,s.zr),Rr:li(i,e,s.Er,s.Ir),ot:(r=e,h=s.Ar,l=s.Br,h+(l-h)*r)};var r,h,l}class oi{constructor(t){this.Ht=new ri,this.bt=!0,this.Lr=!0,this.Nr=performance.now(),this.Fr=this.Nr-1,this.Wr=t}jr(){this.Fr=this.Nr-1,this.gt()}Hr(){if(this.gt(),2===this.Wr.W().lastPriceAnimation){const t=performance.now(),i=this.Fr-t;if(i>0)return void(i<650&&(this.Fr+=2600));this.Nr=t,this.Fr=t+2600}}gt(){this.bt=!0}$r(){this.Lr=!0}Tt(){return 0!==this.Wr.W().lastPriceAnimation}Ur(){switch(this.Wr.W().lastPriceAnimation){case 0:return!1;case 1:return!0;case 2:return performance.now()<=this.Fr}}xt(){return this.bt?(this.St(),this.bt=!1,this.Lr=!1):this.Lr&&(this.qr(),this.Lr=!1),this.Ht}St(){this.Ht.it(null);const t=this.Wr.qt().kt(),i=t.Xs(),n=this.Wr.Pt();if(null===i||null===n)return;const s=this.Wr.Yr(!0);if(s.Xr||!i.Kr(s.se))return;const e={x:t.It(s.se),y:this.Wr.At().Ot(s.ct,n.Bt)},r=s.O,h=this.Wr.W().lineWidth,l=ai(this.Zr(),r);this.Ht.it({Cr:r,Tr:h,Pr:l.Pr,Rr:l.Rr,ot:l.ot,Xe:e})}qr(){const t=this.Ht.He();if(null!==t){const i=ai(this.Zr(),t.Cr);t.Pr=i.Pr,t.Rr=i.Rr,t.ot=i.ot}}Zr(){return this.Ur()?performance.now()-this.Nr:2599}}function _i(t,i){return St(Math.min(Math.max(t,12),30)*i)}function ui(t,i){switch(t){case"arrowDown":case"arrowUp":return _i(i,1);case"circle":return _i(i,.8);case"square":return _i(i,.7)}}function ci(t){return function(t){const i=Math.ceil(t);return i%2!=0?i-1:i}(_i(t,1))}function di(t){return Math.max(_i(t,.1),3)}function fi(t,i,n,s,e){const r=ui("square",n),h=(r-1)/2,l=t-h,a=i-h;return s>=l&&s<=l+r&&e>=a&&e<=a+r}function vi(t,i,n,s,e){const r=(ui("arrowUp",e)-1)/2,h=(St(e/2)-1)/2;i.beginPath(),t?(i.moveTo(n-r,s),i.lineTo(n,s-r),i.lineTo(n+r,s),i.lineTo(n+h,s),i.lineTo(n+h,s+r),i.lineTo(n-h,s+r),i.lineTo(n-h,s)):(i.moveTo(n-r,s),i.lineTo(n,s+r),i.lineTo(n+r,s),i.lineTo(n+h,s),i.lineTo(n+h,s-r),i.lineTo(n-h,s-r),i.lineTo(n-h,s)),i.fill()}function pi(t,i,n,s,e,r){return fi(i,n,s,e,r)}class mi extends L{constructor(){super(...arguments),this.Lt=null,this.lr=new Qt,this.j=-1,this.H="",this.Gr=""}it(t){this.Lt=t}ar(t,i){this.j===t&&this.H===i||(this.j=t,this.H=i,this.Gr=z(t,i),this.lr.ir())}br(t,i){if(null===this.Lt||null===this.Lt.nt)return null;for(let n=this.Lt.nt.from;n=t&&e<=t+n&&r>=i-h&&r<=i+h}(t.Jt.et,t.Jt.rt,t.Jt.$i,t.Jt.Et,i,n))||function(t,i,n){if(0===t.Ks)return!1;switch(t.th){case"arrowDown":case"arrowUp":return pi(0,t.et,t.rt,t.Ks,i,n);case"circle":return function(t,i,n,s,e){const r=2+ui("circle",n)/2,h=t-s,l=i-e;return Math.sqrt(h*h+l*l)<=r}(t.et,t.rt,t.Ks,i,n);case"square":return fi(t.et,t.rt,t.Ks,i,n)}}(t,i,n)}function gi(t,i,n,s,e,r,h,l,a){const o=T(n)?n:n.xe,_=T(n)?n:n.ge,u=T(n)?n:n.Me,c=T(i.size)?Math.max(i.size,0):1,d=ci(l.he())*c,f=d/2;switch(t.Ks=d,i.position){case"inBar":return t.rt=h.Ot(o,a),void(void 0!==t.Jt&&(t.Jt.rt=t.rt+f+r+.6*e));case"aboveBar":return t.rt=h.Ot(_,a)-f-s.ih,void 0!==t.Jt&&(t.Jt.rt=t.rt-f-.6*e,s.ih+=1.2*e),void(s.ih+=d+r);case"belowBar":return t.rt=h.Ot(u,a)+f+s.nh,void 0!==t.Jt&&(t.Jt.rt=t.rt+f+r+.6*e,s.nh+=1.2*e),void(s.nh+=d+r)}i.position}class Mi{constructor(t,i){this.bt=!0,this.sh=!0,this.eh=!0,this.rh=null,this.Ht=new mi,this.Wr=t,this.Ui=i,this.Lt={st:[],nt:null}}gt(t){this.bt=!0,this.eh=!0,"data"===t&&(this.sh=!0)}xt(t){if(!this.Wr.Tt())return null;this.bt&&this.hh();const i=this.Ui.W().layout;return this.Ht.ar(i.fontSize,i.fontFamily),this.Ht.it(this.Lt),this.Ht}lh(){if(this.eh){if(this.Wr.ah().length>0){const t=this.Ui.kt().he(),i=di(t),n=1.5*ci(t)+2*i;this.rh={above:n,below:n}}else this.rh=null;this.eh=!1}return this.rh}hh(){const t=this.Wr.At(),i=this.Ui.kt(),n=this.Wr.ah();this.sh&&(this.Lt.st=n.map((t=>({ut:t.time,et:0,rt:0,Ks:0,th:t.shape,O:t.color,Jr:t.Jr,wr:t.id,Jt:void 0}))),this.sh=!1);const s=this.Ui.W().layout;this.Lt.nt=null;const e=i.Xs();if(null===e)return;const r=this.Wr.Pt();if(null===r)return;if(0===this.Lt.st.length)return;let h=NaN;const l=di(i.he()),a={ih:l,nh:l};this.Lt.nt=Vt(this.Lt.st,e,!0);for(let e=this.Lt.nt.from;e0&&(_.Jt={Qr:o.text,et:0,rt:0,$i:0,Et:0});const u=this.Wr.oh(o.time);null!==u&&gi(_,o,u,a,s.fontSize,l,t,i,r.Bt)}this.bt=!1}}class xi extends si{constructor(t){super(t)}yr(){const t=this.Mr;t.Tt=!1;const i=this.Is.W();if(!i.priceLineVisible||!this.Is.Tt())return;const n=this.Is.Yr(0===i.priceLineSource);n.Xr||(t.Tt=!0,t.rt=n.ki,t.O=this.Is._h(n.O),t.ht=i.priceLineWidth,t.Wt=i.priceLineStyle)}}class Si extends Q{constructor(t){super(),this.$t=t}Ii(t,i,n){t.Tt=!1,i.Tt=!1;const s=this.$t;if(!s.Tt())return;const e=s.W(),r=e.lastValueVisible,h=""!==s.uh(),l=0===e.seriesLastValueMode,a=s.Yr(!1);if(a.Xr)return;r&&(t.Jt=this.dh(a,r,l),t.Tt=0!==t.Jt.length),(h||l)&&(i.Jt=this.fh(a,r,h,l),i.Tt=i.Jt.length>0);const o=s._h(a.O),_=y(o);n.t=_.t,n.ki=a.ki,i.Vt=s.qt().zt(a.ki/s.At().Et()),t.Vt=o,t.O=_.i,i.O=_.i}fh(t,i,n,s){let e="";const r=this.$t.uh();return n&&0!==r.length&&(e+=`${r} `),i&&s&&(e+=this.$t.At().ph()?t.mh:t.bh),e.trim()}dh(t,i,n){return i?n?this.$t.At().ph()?t.bh:t.mh:t.Jt:""}}function yi(t,i,n,s){const e=Number.isFinite(i),r=Number.isFinite(n);return e&&r?t(i,n):e||r?e?i:n:s}class ki{constructor(t,i){this.wh=t,this.gh=i}Mh(t){return null!==t&&(this.wh===t.wh&&this.gh===t.gh)}xh(){return new ki(this.wh,this.gh)}Sh(){return this.wh}yh(){return this.gh}kh(){return this.gh-this.wh}Fi(){return this.gh===this.wh||Number.isNaN(this.gh)||Number.isNaN(this.wh)}ts(t){return null===t?this:new ki(yi(Math.min,this.Sh(),t.Sh(),-1/0),yi(Math.max,this.yh(),t.yh(),1/0))}Ch(t){if(!T(t))return;if(0===this.gh-this.wh)return;const i=.5*(this.gh+this.wh);let n=this.gh-i,s=this.wh-i;n*=t,s*=t,this.gh=i+n,this.wh=i+s}Th(t){T(t)&&(this.gh+=t,this.wh+=t)}Ph(){return{minValue:this.wh,maxValue:this.gh}}static Rh(t){return null===t?null:new ki(t.minValue,t.maxValue)}}class Ci{constructor(t,i){this.Dh=t,this.Oh=i||null}Ah(){return this.Dh}Bh(){return this.Oh}Ph(){return null===this.Dh?null:{priceRange:this.Dh.Ph(),margins:this.Oh||void 0}}static Rh(t){return null===t?null:new Ci(ki.Rh(t.priceRange),t.margins)}}class Ti extends si{constructor(t,i){super(t),this.Vh=i}yr(){const t=this.Mr;t.Tt=!1;const i=this.Vh.W();if(!this.Is.Tt()||!i.lineVisible)return;const n=this.Vh.zh();null!==n&&(t.Tt=!0,t.rt=n,t.O=i.color,t.ht=i.lineWidth,t.Wt=i.lineStyle,t.wr=this.Vh.W().id)}}class Pi extends Q{constructor(t,i){super(),this.Wr=t,this.Vh=i}Ii(t,i,n){t.Tt=!1,i.Tt=!1;const s=this.Vh.W(),e=s.axisLabelVisible,r=""!==s.title,h=this.Wr;if(!e||!h.Tt())return;const l=this.Vh.zh();if(null===l)return;r&&(i.Jt=s.title,i.Tt=!0),i.Vt=h.qt().zt(l/h.At().Et()),t.Jt=this.Eh(s.price),t.Tt=!0;const a=y(s.axisLabelColor||s.color);n.t=a.t;const o=s.axisLabelTextColor||a.i;t.O=o,i.O=o,n.ki=l}Eh(t){const i=this.Wr.Pt();return null===i?"":this.Wr.At().Wi(t,i.Bt)}}class Ri{constructor(t,i){this.Wr=t,this.cn=i,this.Ih=new Ti(t,this),this._r=new Pi(t,this),this.Lh=new ii(this._r,t,t.qt())}Nh(t){C(this.cn,t),this.gt(),this.Wr.qt().Fh()}W(){return this.cn}Wh(){return this.Ih}jh(){return this.Lh}Hh(){return this._r}gt(){this.Ih.gt(),this._r.gt()}zh(){const t=this.Wr,i=t.At();if(t.qt().kt().Fi()||i.Fi())return null;const n=t.Pt();return null===n?null:i.Ot(this.cn.price,n.Bt)}}class Di extends et{constructor(t){super(),this.Ui=t}qt(){return this.Ui}}const Oi={Bar:(t,i,n,s)=>{var e;const r=i.upColor,h=i.downColor,l=f(t(n,s)),a=v(l.Bt[0])<=v(l.Bt[3]);return{ue:null!==(e=l.O)&&void 0!==e?e:a?r:h}},Candlestick:(t,i,n,s)=>{var e,r,h;const l=i.upColor,a=i.downColor,o=i.borderUpColor,_=i.borderDownColor,u=i.wickUpColor,c=i.wickDownColor,d=f(t(n,s)),p=v(d.Bt[0])<=v(d.Bt[3]);return{ue:null!==(e=d.O)&&void 0!==e?e:p?l:a,Le:null!==(r=d.Vt)&&void 0!==r?r:p?o:_,Ie:null!==(h=d.$h)&&void 0!==h?h:p?u:c}},Custom:(t,i,n,s)=>{var e;return{ue:null!==(e=f(t(n,s)).O)&&void 0!==e?e:i.color}},Area:(t,i,n,s)=>{var e,r,h,l;const a=f(t(n,s));return{ue:null!==(e=a._t)&&void 0!==e?e:i.lineColor,_t:null!==(r=a._t)&&void 0!==r?r:i.lineColor,Ts:null!==(h=a.Ts)&&void 0!==h?h:i.topColor,Ps:null!==(l=a.Ps)&&void 0!==l?l:i.bottomColor}},Baseline:(t,i,n,s)=>{var e,r,h,l,a,o;const _=f(t(n,s));return{ue:_.Bt[3]>=i.baseValue.price?i.topLineColor:i.bottomLineColor,Pe:null!==(e=_.Pe)&&void 0!==e?e:i.topLineColor,Re:null!==(r=_.Re)&&void 0!==r?r:i.bottomLineColor,Se:null!==(h=_.Se)&&void 0!==h?h:i.topFillColor1,ye:null!==(l=_.ye)&&void 0!==l?l:i.topFillColor2,ke:null!==(a=_.ke)&&void 0!==a?a:i.bottomFillColor1,Ce:null!==(o=_.Ce)&&void 0!==o?o:i.bottomFillColor2}},Line:(t,i,n,s)=>{var e,r;const h=f(t(n,s));return{ue:null!==(e=h.O)&&void 0!==e?e:i.color,_t:null!==(r=h.O)&&void 0!==r?r:i.color}},Histogram:(t,i,n,s)=>{var e;return{ue:null!==(e=f(t(n,s)).O)&&void 0!==e?e:i.color}}};class Ai{constructor(t){this.Uh=(t,i)=>void 0!==i?i.Bt:this.Wr.zn().qh(t),this.Wr=t,this.Yh=Oi[t.Xh()]}Hs(t,i){return this.Yh(this.Uh,this.Wr.W(),t,i)}}var Bi;!function(t){t[t.NearestLeft=-1]="NearestLeft",t[t.None=0]="None",t[t.NearestRight=1]="NearestRight"}(Bi||(Bi={}));const Vi=30;class zi{constructor(){this.Kh=[],this.Zh=new Map,this.Gh=new Map}Jh(){return this.Ks()>0?this.Kh[this.Kh.length-1]:null}Qh(){return this.Ks()>0?this.tl(0):null}Vn(){return this.Ks()>0?this.tl(this.Kh.length-1):null}Ks(){return this.Kh.length}Fi(){return 0===this.Ks()}Kr(t){return null!==this.il(t,0)}qh(t){return this.nl(t)}nl(t,i=0){const n=this.il(t,i);return null===n?null:Object.assign(Object.assign({},this.sl(n)),{se:this.tl(n)})}ie(){return this.Kh}el(t,i,n){if(this.Fi())return null;let s=null;for(const e of n){s=Ei(s,this.rl(t,i,e))}return s}it(t){this.Gh.clear(),this.Zh.clear(),this.Kh=t}tl(t){return this.Kh[t].se}sl(t){return this.Kh[t]}il(t,i){const n=this.hl(t);if(null===n&&0!==i)switch(i){case-1:return this.ll(t);case 1:return this.al(t);default:throw new TypeError("Unknown search mode")}return n}ll(t){let i=this.ol(t);return i>0&&(i-=1),i!==this.Kh.length&&this.tl(i)t.set.se>i))}ul(t,i,n){let s=null;for(let e=t;es.dl&&(s.dl=t)))}return s}rl(t,i,n){if(this.Fi())return null;let s=null;const e=f(this.Qh()),r=f(this.Vn()),h=Math.max(t,e),l=Math.min(i,r),a=Math.ceil(h/Vi)*Vi,o=Math.max(a,Math.floor(l/Vi)*Vi);{const t=this.ol(h),e=this._l(Math.min(l,a,i));s=Ei(s,this.ul(t,e,n))}let _=this.Zh.get(n);void 0===_&&(_=new Map,this.Zh.set(n,_));for(let t=Math.max(a+1,h);tnew Li(t)));return this.gl={vl:e,pl:r},r}tn(){var t,i,n,s;const e=null!==(n=null===(i=(t=this.kl).timeAxisViews)||void 0===i?void 0:i.call(t))&&void 0!==n?n:[];if((null===(s=this.Ml)||void 0===s?void 0:s.vl)===e)return this.Ml.pl;const r=this.Wr.qt().kt(),h=e.map((t=>new Fi(t,r)));return this.Ml={vl:e,pl:h},h}Rn(){var t,i,n,s;const e=null!==(n=null===(i=(t=this.kl).priceAxisViews)||void 0===i?void 0:i.call(t))&&void 0!==n?n:[];if((null===(s=this.xl)||void 0===s?void 0:s.vl)===e)return this.xl.pl;const r=this.Wr.At(),h=e.map((t=>new Wi(t,r)));return this.xl={vl:e,pl:h},h}Tl(){var t,i,n,s;const e=null!==(n=null===(i=(t=this.kl).priceAxisPaneViews)||void 0===i?void 0:i.call(t))&&void 0!==n?n:[];if((null===(s=this.Sl)||void 0===s?void 0:s.vl)===e)return this.Sl.pl;const r=e.map((t=>new Li(t)));return this.Sl={vl:e,pl:r},r}Pl(){var t,i,n,s;const e=null!==(n=null===(i=(t=this.kl).timeAxisPaneViews)||void 0===i?void 0:i.call(t))&&void 0!==n?n:[];if((null===(s=this.yl)||void 0===s?void 0:s.vl)===e)return this.yl.pl;const r=e.map((t=>new Li(t)));return this.yl={vl:e,pl:r},r}Rl(t,i){var n,s,e;return null!==(e=null===(s=(n=this.kl).autoscaleInfo)||void 0===s?void 0:s.call(n,t,i))&&void 0!==e?e:null}br(t,i){var n,s,e;return null!==(e=null===(s=(n=this.kl).hitTest)||void 0===s?void 0:s.call(n,t,i))&&void 0!==e?e:null}}function Hi(t,i,n,s){t.forEach((t=>{i(t).forEach((t=>{t.ml()===n&&s.push(t)}))}))}function $i(t){return t.Pn()}function Ui(t){return t.Tl()}function qi(t){return t.Pl()}class Yi extends Di{constructor(t,i,n,s,e){super(t),this.Lt=new zi,this.Ih=new xi(this),this.Dl=[],this.Ol=new ei(this),this.Al=null,this.Bl=null,this.Vl=[],this.zl=[],this.El=null,this.Il=[],this.cn=i,this.Ll=n;const r=new Si(this);this.hn=[r],this.Lh=new ii(r,this,t),"Area"!==n&&"Line"!==n&&"Baseline"!==n||(this.Al=new oi(this)),this.Nl(),this.Fl(e)}S(){null!==this.El&&clearTimeout(this.El)}_h(t){return this.cn.priceLineColor||t}Yr(t){const i={Xr:!0},n=this.At();if(this.qt().kt().Fi()||n.Fi()||this.Lt.Fi())return i;const s=this.qt().kt().Xs(),e=this.Pt();if(null===s||null===e)return i;let r,h;if(t){const t=this.Lt.Jh();if(null===t)return i;r=t,h=t.se}else{const t=this.Lt.nl(s.di(),-1);if(null===t)return i;if(r=this.Lt.qh(t.se),null===r)return i;h=t.se}const l=r.Bt[3],a=this.$s().Hs(h,{Bt:r}),o=n.Ot(l,e.Bt);return{Xr:!1,ct:l,Jt:n.Wi(l,e.Bt),mh:n.Wl(l),bh:n.jl(l,e.Bt),O:a.ue,ki:o,se:h}}$s(){return null!==this.Bl||(this.Bl=new Ai(this)),this.Bl}W(){return this.cn}Nh(t){const i=t.priceScaleId;void 0!==i&&i!==this.cn.priceScaleId&&this.qt().Hl(this,i),C(this.cn,t),void 0!==t.priceFormat&&(this.Nl(),this.qt().$l()),this.qt().Ul(this),this.qt().ql(),this.wn.gt("options")}it(t,i){this.Lt.it(t),this.Yl(),this.wn.gt("data"),this.dn.gt("data"),null!==this.Al&&(i&&i.Xl?this.Al.Hr():0===t.length&&this.Al.jr());const n=this.qt().cr(this);this.qt().Kl(n),this.qt().Ul(this),this.qt().ql(),this.qt().Fh()}Zl(t){this.Vl=t,this.Yl();const i=this.qt().cr(this);this.dn.gt("data"),this.qt().Kl(i),this.qt().Ul(this),this.qt().ql(),this.qt().Fh()}Gl(){return this.Vl}ah(){return this.zl}Jl(t){const i=new Ri(this,t);return this.Dl.push(i),this.qt().Ul(this),i}Ql(t){const i=this.Dl.indexOf(t);-1!==i&&this.Dl.splice(i,1),this.qt().Ul(this)}Xh(){return this.Ll}Pt(){const t=this.ta();return null===t?null:{Bt:t.Bt[3],ia:t.ut}}ta(){const t=this.qt().kt().Xs();if(null===t)return null;const i=t.Os();return this.Lt.nl(i,1)}zn(){return this.Lt}oh(t){const i=this.Lt.qh(t);return null===i?null:"Bar"===this.Ll||"Candlestick"===this.Ll||"Custom"===this.Ll?{we:i.Bt[0],ge:i.Bt[1],Me:i.Bt[2],xe:i.Bt[3]}:i.Bt[3]}na(t){const i=[];Hi(this.Il,$i,"top",i);const n=this.Al;return null!==n&&n.Tt()?(null===this.El&&n.Ur()&&(this.El=setTimeout((()=>{this.El=null,this.qt().sa()}),0)),n.$r(),i.push(n),i):i}Pn(){const t=[];this.ea()||t.push(this.Ol),t.push(this.wn,this.Ih,this.dn);const i=this.Dl.map((t=>t.Wh()));return t.push(...i),Hi(this.Il,$i,"normal",t),t}ra(){return this.ha($i,"bottom")}la(t){return this.ha(Ui,t)}aa(t){return this.ha(qi,t)}oa(t,i){return this.Il.map((n=>n.br(t,i))).filter((t=>null!==t))}Qi(t){return[this.Lh,...this.Dl.map((t=>t.jh()))]}Rn(t,i){if(i!==this.Xi&&!this.ea())return[];const n=[...this.hn];for(const t of this.Dl)n.push(t.Hh());return this.Il.forEach((t=>{n.push(...t.Rn())})),n}tn(){const t=[];return this.Il.forEach((i=>{t.push(...i.tn())})),t}Rl(t,i){if(void 0!==this.cn.autoscaleInfoProvider){const n=this.cn.autoscaleInfoProvider((()=>{const n=this._a(t,i);return null===n?null:n.Ph()}));return Ci.Rh(n)}return this._a(t,i)}ua(){return this.cn.priceFormat.minMove}ca(){return this.da}On(){var t;this.wn.gt(),this.dn.gt();for(const t of this.hn)t.gt();for(const t of this.Dl)t.gt();this.Ih.gt(),this.Ol.gt(),null===(t=this.Al)||void 0===t||t.gt(),this.Il.forEach((t=>t.On()))}At(){return f(super.At())}Ct(t){if(!(("Line"===this.Ll||"Area"===this.Ll||"Baseline"===this.Ll)&&this.cn.crosshairMarkerVisible))return null;const i=this.Lt.qh(t);if(null===i)return null;return{ct:i.Bt[3],ot:this.fa(),Vt:this.va(),Dt:this.pa(),Rt:this.ma(t)}}uh(){return this.cn.title}Tt(){return this.cn.visible}ba(t){this.Il.push(new ji(t,this))}wa(t){this.Il=this.Il.filter((i=>i.Cl()!==t))}ga(){if(this.wn instanceof Yt!=!1)return t=>this.wn.Fe(t)}Ma(){if(this.wn instanceof Yt!=!1)return t=>this.wn.We(t)}ea(){return!lt(this.At().xa())}_a(t,i){if(!P(t)||!P(i)||this.Lt.Fi())return null;const n="Line"===this.Ll||"Area"===this.Ll||"Baseline"===this.Ll||"Histogram"===this.Ll?[3]:[2,1],s=this.Lt.el(t,i,n);let e=null!==s?new ki(s.cl,s.dl):null;if("Histogram"===this.Xh()){const t=this.cn.base,i=new ki(t,t);e=null!==e?e.ts(i):i}let r=this.dn.lh();return this.Il.forEach((n=>{const s=n.Rl(t,i);if(null==s?void 0:s.priceRange){const t=new ki(s.priceRange.minValue,s.priceRange.maxValue);e=null!==e?e.ts(t):t}var h,l,a,o;(null==s?void 0:s.margins)&&(h=r,l=s.margins,r={above:Math.max(null!==(a=null==h?void 0:h.above)&&void 0!==a?a:0,l.above),below:Math.max(null!==(o=null==h?void 0:h.below)&&void 0!==o?o:0,l.below)})})),new Ci(e,r)}fa(){switch(this.Ll){case"Line":case"Area":case"Baseline":return this.cn.crosshairMarkerRadius}return 0}va(){switch(this.Ll){case"Line":case"Area":case"Baseline":{const t=this.cn.crosshairMarkerBorderColor;if(0!==t.length)return t}}return null}pa(){switch(this.Ll){case"Line":case"Area":case"Baseline":return this.cn.crosshairMarkerBorderWidth}return 0}ma(t){switch(this.Ll){case"Line":case"Area":case"Baseline":{const t=this.cn.crosshairMarkerBackgroundColor;if(0!==t.length)return t}}return this.$s().Hs(t).ue}Nl(){switch(this.cn.priceFormat.type){case"custom":this.da={format:this.cn.priceFormat.formatter};break;case"volume":this.da=new dt(this.cn.priceFormat.precision);break;case"percent":this.da=new ct(this.cn.priceFormat.precision);break;default:{const t=Math.pow(10,this.cn.priceFormat.precision);this.da=new ut(t,this.cn.priceFormat.minMove*t)}}null!==this.Xi&&this.Xi.Sa()}Yl(){const t=this.qt().kt();if(!t.ya()||this.Lt.Fi())return void(this.zl=[]);const i=f(this.Lt.Qh());this.zl=this.Vl.map(((n,s)=>{const e=f(t.ka(n.time,!0)),r=et instanceof Yi)).reduce(((t,s)=>{if(n.dr(s)||!s.Tt())return t;const e=s.At(),r=s.zn();if(e.Fi()||!r.Kr(i))return t;const h=r.qh(i);if(null===h)return t;const l=v(s.Pt());return t.concat([e.Ot(h.Bt[3],l.Bt)])}),[]);if(0===l.length)return s;l.sort(((t,i)=>Math.abs(t-h)-Math.abs(i-h)));const a=l[0];return s=e.pn(a,r),s}}class Ki extends H{constructor(){super(...arguments),this.Lt=null}it(t){this.Lt=t}Z({context:t,bitmapSize:i,horizontalPixelRatio:n,verticalPixelRatio:s}){if(null===this.Lt)return;const e=Math.max(1,Math.floor(n));t.lineWidth=e,function(t,i){t.save(),t.lineWidth%2&&t.translate(.5,.5),i(),t.restore()}(t,(()=>{const r=f(this.Lt);if(r.Pa){t.strokeStyle=r.Ra,_(t,r.Da),t.beginPath();for(const s of r.Oa){const r=Math.round(s.Aa*n);t.moveTo(r,-e),t.lineTo(r,i.height+e)}t.stroke()}if(r.Ba){t.strokeStyle=r.Va,_(t,r.za),t.beginPath();for(const n of r.Ea){const r=Math.round(n.Aa*s);t.moveTo(-e,r),t.lineTo(i.width+e,r)}t.stroke()}}))}}class Zi{constructor(t){this.Ht=new Ki,this.bt=!0,this.nn=t}gt(){this.bt=!0}xt(){if(this.bt){const t=this.nn.qt().W().grid,i={Ba:t.horzLines.visible,Pa:t.vertLines.visible,Va:t.horzLines.color,Ra:t.vertLines.color,za:t.horzLines.style,Da:t.vertLines.style,Ea:this.nn.vn().Ia(),Oa:(this.nn.qt().kt().Ia()||[]).map((t=>({Aa:t.coord})))};this.Ht.it(i),this.bt=!1}return this.Ht}}class Gi{constructor(t){this.wn=new Zi(t)}Wh(){return this.wn}}const Ji={La:4,Na:1e-4};function Qi(t,i){const n=100*(t-i)/i;return i<0?-n:n}function tn(t,i){const n=Qi(t.Sh(),i),s=Qi(t.yh(),i);return new ki(n,s)}function nn(t,i){const n=100*(t-i)/i+100;return i<0?-n:n}function sn(t,i){const n=nn(t.Sh(),i),s=nn(t.yh(),i);return new ki(n,s)}function en(t,i){const n=Math.abs(t);if(n<1e-15)return 0;const s=Math.log10(n+i.Na)+i.La;return t<0?-s:s}function rn(t,i){const n=Math.abs(t);if(n<1e-15)return 0;const s=Math.pow(10,n-i.La)-i.Na;return t<0?-s:s}function hn(t,i){if(null===t)return null;const n=en(t.Sh(),i),s=en(t.yh(),i);return new ki(n,s)}function ln(t,i){if(null===t)return null;const n=rn(t.Sh(),i),s=rn(t.yh(),i);return new ki(n,s)}function an(t){if(null===t)return Ji;const i=Math.abs(t.yh()-t.Sh());if(i>=1||i<1e-15)return Ji;const n=Math.ceil(Math.abs(Math.log10(i))),s=Ji.La+n;return{La:s,Na:1/Math.pow(10,s)}}class on{constructor(t,i){if(this.Fa=t,this.Wa=i,function(t){if(t<0)return!1;for(let i=t;i>1;i/=10)if(i%10!=0)return!1;return!0}(this.Fa))this.ja=[2,2.5,2];else{this.ja=[];for(let t=this.Fa;1!==t;){if(t%2==0)this.ja.push(2),t/=2;else{if(t%5!=0)throw new Error("unexpected base");this.ja.push(2,2.5),t/=5}if(this.ja.length>100)throw new Error("something wrong with base")}}}Ha(t,i,n){const s=0===this.Fa?0:1/this.Fa;let e=Math.pow(10,Math.max(0,Math.ceil(Math.log10(t-i)))),r=0,h=this.Wa[0];for(;;){const t=xt(e,s,1e-14)&&e>s+1e-14,i=xt(e,n*h,1e-14),l=xt(e,1,1e-14);if(!(t&&i&&l))break;e/=h,h=this.Wa[++r%this.Wa.length]}if(e<=s+1e-14&&(e=s),e=Math.max(1,e),this.ja.length>0&&(l=e,a=1,o=1e-14,Math.abs(l-a)s+1e-14;)e/=h,h=this.ja[++r%this.ja.length];var l,a,o;return e}}class _n{constructor(t,i,n,s){this.$a=[],this.Li=t,this.Fa=i,this.Ua=n,this.qa=s}Ha(t,i){if(t=o?1:-1;let d=null,f=0;for(let n=a-u;n>o;n-=_){const s=this.qa(n,i,!0);null!==d&&Math.abs(s-d)l||(ff(t.Zi())-f(i.Zi())))}var cn;!function(t){t[t.Normal=0]="Normal",t[t.Logarithmic=1]="Logarithmic",t[t.Percentage=2]="Percentage",t[t.IndexedTo100=3]="IndexedTo100"}(cn||(cn={}));const dn=new ct,fn=new ut(100,1);class vn{constructor(t,i,n,s){this.Qa=0,this.io=null,this.Dh=null,this.no=null,this.so={eo:!1,ro:null},this.ho=0,this.lo=0,this.ao=new k,this.oo=new k,this._o=[],this.uo=null,this.co=null,this.do=null,this.fo=null,this.da=fn,this.vo=an(null),this.po=t,this.cn=i,this.mo=n,this.bo=s,this.wo=new _n(this,100,this.Mo.bind(this),this.xo.bind(this))}xa(){return this.po}W(){return this.cn}Nh(t){if(C(this.cn,t),this.Sa(),void 0!==t.mode&&this.So({kr:t.mode}),void 0!==t.scaleMargins){const i=d(t.scaleMargins.top),n=d(t.scaleMargins.bottom);if(i<0||i>1)throw new Error(`Invalid top margin - expect value between 0 and 1, given=${i}`);if(n<0||n>1||i+n>1)throw new Error(`Invalid bottom margin - expect value between 0 and 1, given=${n}`);if(i+n>1)throw new Error(`Invalid margins - sum of margins must be less than 1, given=${i+n}`);this.yo(),this.co=null}}ko(){return this.cn.autoScale}Ja(){return 1===this.cn.mode}ph(){return 2===this.cn.mode}Co(){return 3===this.cn.mode}kr(){return{Wn:this.cn.autoScale,To:this.cn.invertScale,kr:this.cn.mode}}So(t){const i=this.kr();let n=null;void 0!==t.Wn&&(this.cn.autoScale=t.Wn),void 0!==t.kr&&(this.cn.mode=t.kr,2!==t.kr&&3!==t.kr||(this.cn.autoScale=!0),this.so.eo=!1),1===i.kr&&t.kr!==i.kr&&(!function(t,i){if(null===t)return!1;const n=rn(t.Sh(),i),s=rn(t.yh(),i);return isFinite(n)&&isFinite(s)}(this.Dh,this.vo)?this.cn.autoScale=!0:(n=ln(this.Dh,this.vo),null!==n&&this.Po(n))),1===t.kr&&t.kr!==i.kr&&(n=hn(this.Dh,this.vo),null!==n&&this.Po(n));const s=i.kr!==this.cn.mode;s&&(2===i.kr||this.ph())&&this.Sa(),s&&(3===i.kr||this.Co())&&this.Sa(),void 0!==t.To&&i.To!==t.To&&(this.cn.invertScale=t.To,this.Ro()),this.oo.m(i,this.kr())}Do(){return this.oo}P(){return this.mo.fontSize}Et(){return this.Qa}Oo(t){this.Qa!==t&&(this.Qa=t,this.yo(),this.co=null)}Ao(){if(this.io)return this.io;const t=this.Et()-this.Bo()-this.Vo();return this.io=t,t}Ah(){return this.zo(),this.Dh}Po(t,i){const n=this.Dh;(i||null===n&&null!==t||null!==n&&!n.Mh(t))&&(this.co=null,this.Dh=t)}Fi(){return this.zo(),0===this.Qa||!this.Dh||this.Dh.Fi()}Eo(t){return this.To()?t:this.Et()-1-t}Ot(t,i){return this.ph()?t=Qi(t,i):this.Co()&&(t=nn(t,i)),this.xo(t,i)}Qs(t,i,n){this.zo();const s=this.Vo(),e=f(this.Ah()),r=e.Sh(),h=e.yh(),l=this.Ao()-1,a=this.To(),o=l/(h-r),_=void 0===n?0:n.from,u=void 0===n?t.length:n.to,c=this.Io();for(let n=_;nt.On()))}Sa(){this.co=null;const t=this.Jo();let i=100;null!==t&&(i=Math.round(1/t.ua())),this.da=fn,this.ph()?(this.da=dn,i=100):this.Co()?(this.da=new ut(100,1),i=100):null!==t&&(this.da=t.ca()),this.wo=new _n(this,i,this.Mo.bind(this),this.xo.bind(this)),this.wo.Xa()}Wo(){this.uo=null}Jo(){return this._o[0]||null}Bo(){return this.To()?this.cn.scaleMargins.bottom*this.Et()+this.lo:this.cn.scaleMargins.top*this.Et()+this.ho}Vo(){return this.To()?this.cn.scaleMargins.top*this.Et()+this.ho:this.cn.scaleMargins.bottom*this.Et()+this.lo}zo(){this.so.eo||(this.so.eo=!0,this.i_())}yo(){this.io=null}xo(t,i){if(this.zo(),this.Fi())return 0;t=this.Ja()&&t?en(t,this.vo):t;const n=f(this.Ah()),s=this.Vo()+(this.Ao()-1)*(t-n.Sh())/n.kh();return this.Eo(s)}Mo(t,i){if(this.zo(),this.Fi())return 0;const n=this.Eo(t),s=f(this.Ah()),e=s.Sh()+s.kh()*((n-this.Vo())/(this.Ao()-1));return this.Ja()?rn(e,this.vo):e}Ro(){this.co=null,this.wo.Xa()}i_(){const t=this.so.ro;if(null===t)return;let i=null;const n=this.Qo();let s=0,e=0;for(const r of n){if(!r.Tt())continue;const n=r.Pt();if(null===n)continue;const h=r.Rl(t.Os(),t.di());let l=h&&h.Ah();if(null!==l){switch(this.cn.mode){case 1:l=hn(l,this.vo);break;case 2:l=tn(l,n.Bt);break;case 3:l=sn(l,n.Bt)}if(i=null===i?l:i.ts(f(l)),null!==h){const t=h.Bh();null!==t&&(s=Math.max(s,t.above),e=Math.max(s,t.below))}}}if(s===this.ho&&e===this.lo||(this.ho=s,this.lo=e,this.co=null,this.yo()),null!==i){if(i.Sh()===i.yh()){const t=this.Jo(),n=5*(null===t||this.ph()||this.Co()?1:t.ua());this.Ja()&&(i=ln(i,this.vo)),i=new ki(i.Sh()-n,i.yh()+n),this.Ja()&&(i=hn(i,this.vo))}if(this.Ja()){const t=ln(i,this.vo),n=an(t);if(r=n,h=this.vo,r.La!==h.La||r.Na!==h.Na){const s=null!==this.no?ln(this.no,this.vo):null;this.vo=n,i=hn(t,n),null!==s&&(this.no=hn(s,n))}}this.Po(i)}else null===this.Dh&&(this.Po(new ki(-.5,.5)),this.vo=an(null));var r,h;this.so.eo=!0}Io(){return this.ph()?Qi:this.Co()?nn:this.Ja()?t=>en(t,this.vo):null}n_(t,i,n){return void 0===i?(void 0===n&&(n=this.ca()),n.format(t)):i(t)}Eh(t,i){return this.n_(t,this.bo.priceFormatter,i)}Go(t,i){return this.n_(t,this.bo.percentageFormatter,i)}}class pn{constructor(t,i){this._o=[],this.s_=new Map,this.Qa=0,this.e_=0,this.r_=1e3,this.uo=null,this.h_=new k,this.wl=t,this.Ui=i,this.l_=new Gi(this);const n=i.W();this.a_=this.o_("left",n.leftPriceScale),this.__=this.o_("right",n.rightPriceScale),this.a_.Do().l(this.u_.bind(this,this.a_),this),this.__.Do().l(this.u_.bind(this,this.__),this),this.c_(n)}c_(t){if(t.leftPriceScale&&this.a_.Nh(t.leftPriceScale),t.rightPriceScale&&this.__.Nh(t.rightPriceScale),t.localization&&(this.a_.Sa(),this.__.Sa()),t.overlayPriceScales){const i=Array.from(this.s_.values());for(const n of i){const i=f(n[0].At());i.Nh(t.overlayPriceScales),t.localization&&i.Sa()}}}d_(t){switch(t){case"left":return this.a_;case"right":return this.__}return this.s_.has(t)?d(this.s_.get(t))[0].At():null}S(){this.qt().f_().p(this),this.a_.Do().p(this),this.__.Do().p(this),this._o.forEach((t=>{t.S&&t.S()})),this.h_.m()}v_(){return this.r_}p_(t){this.r_=t}qt(){return this.Ui}$i(){return this.e_}Et(){return this.Qa}m_(t){this.e_=t,this.b_()}Oo(t){this.Qa=t,this.a_.Oo(t),this.__.Oo(t),this._o.forEach((i=>{if(this.dr(i)){const n=i.At();null!==n&&n.Oo(t)}})),this.b_()}Ta(){return this._o}dr(t){const i=t.At();return null===i||this.a_!==i&&this.__!==i}Fo(t,i,n){const s=void 0!==n?n:this.g_().w_+1;this.M_(t,i,s)}jo(t){const i=this._o.indexOf(t);c(-1!==i,"removeDataSource: invalid data source"),this._o.splice(i,1);const n=f(t.At()).xa();if(this.s_.has(n)){const i=d(this.s_.get(n)),s=i.indexOf(t);-1!==s&&(i.splice(s,1),0===i.length&&this.s_.delete(n))}const s=t.At();s&&s.Ta().indexOf(t)>=0&&s.jo(t),null!==s&&(s.Wo(),this.x_(s)),this.uo=null}pr(t){return t===this.a_?"left":t===this.__?"right":"overlay"}S_(){return this.a_}y_(){return this.__}k_(t,i){t.Uo(i)}C_(t,i){t.qo(i),this.b_()}T_(t){t.Yo()}P_(t,i){t.Xo(i)}R_(t,i){t.Ko(i),this.b_()}D_(t){t.Zo()}b_(){this._o.forEach((t=>{t.On()}))}vn(){let t=null;return this.Ui.W().rightPriceScale.visible&&0!==this.__.Ta().length?t=this.__:this.Ui.W().leftPriceScale.visible&&0!==this.a_.Ta().length?t=this.a_:0!==this._o.length&&(t=this._o[0].At()),null===t&&(t=this.__),t}vr(){let t=null;return this.Ui.W().rightPriceScale.visible?t=this.__:this.Ui.W().leftPriceScale.visible&&(t=this.a_),t}x_(t){null!==t&&t.ko()&&this.O_(t)}A_(t){const i=this.wl.Xs();t.So({Wn:!0}),null!==i&&t.t_(i),this.b_()}B_(){this.O_(this.a_),this.O_(this.__)}V_(){this.x_(this.a_),this.x_(this.__),this._o.forEach((t=>{this.dr(t)&&this.x_(t.At())})),this.b_(),this.Ui.Fh()}No(){return null===this.uo&&(this.uo=un(this._o)),this.uo}z_(){return this.h_}E_(){return this.l_}O_(t){const i=t.Qo();if(i&&i.length>0&&!this.wl.Fi()){const i=this.wl.Xs();null!==i&&t.t_(i)}t.On()}g_(){const t=this.No();if(0===t.length)return{I_:0,w_:0};let i=0,n=0;for(let s=0;sn&&(n=e))}return{I_:i,w_:n}}M_(t,i,n){let s=this.d_(i);if(null===s&&(s=this.o_(i,this.Ui.W().overlayPriceScales)),this._o.push(t),!lt(i)){const n=this.s_.get(i)||[];n.push(t),this.s_.set(i,n)}s.Fo(t),t.Ji(s),t.Gi(n),this.x_(s),this.uo=null}u_(t,i,n){i.kr!==n.kr&&this.O_(t)}o_(t,i){const n=Object.assign({visible:!0,autoScale:!0},O(i)),s=new vn(t,n,this.Ui.W().layout,this.Ui.W().localization);return s.Oo(this.Et()),s}}class mn{constructor(t,i,n=50){this.Ke=0,this.Ze=1,this.Ge=1,this.Qe=new Map,this.Je=new Map,this.L_=t,this.N_=i,this.tr=n}F_(t){const i=t.time,n=this.N_.cacheKey(i),s=this.Qe.get(n);if(void 0!==s)return s.W_;if(this.Ke===this.tr){const t=this.Je.get(this.Ge);this.Je.delete(this.Ge),this.Qe.delete(d(t)),this.Ge++,this.Ke--}const e=this.L_(t);return this.Qe.set(n,{W_:e,er:this.Ze}),this.Je.set(this.Ze,n),this.Ke++,this.Ze++,e}}class bn{constructor(t,i){c(t<=i,"right should be >= left"),this.j_=t,this.H_=i}Os(){return this.j_}di(){return this.H_}U_(){return this.H_-this.j_+1}Kr(t){return this.j_<=t&&t<=this.H_}Mh(t){return this.j_===t.Os()&&this.H_===t.di()}}function wn(t,i){return null===t||null===i?t===i:t.Mh(i)}class gn{constructor(){this.q_=new Map,this.Qe=null,this.Y_=!1}X_(t){this.Y_=t,this.Qe=null}K_(t,i){this.Z_(i),this.Qe=null;for(let n=i;n{t<=n[0].index?i.push(s):n.splice(Dt(n,t,(i=>i.indexi-t))){if(!this.q_.get(n))continue;const s=i;i=[];const e=s.length;let r=0;const h=d(this.q_.get(n)),l=h.length;let a=1/0,o=-1/0;for(let n=0;n=t&&_-o>=t)i.push(l),o=_;else if(this.Y_)return s}for(;ri.weight?t:i}class Sn{constructor(t,i,n,s){this.e_=0,this.eu=null,this.ru=[],this.fo=null,this.do=null,this.hu=new gn,this.lu=new Map,this.au=Mn.su(),this.ou=!0,this._u=new k,this.uu=new k,this.cu=new k,this.du=null,this.fu=null,this.vu=[],this.cn=i,this.bo=n,this.pu=i.rightOffset,this.mu=i.barSpacing,this.Ui=t,this.N_=s,this.bu(),this.hu.X_(i.uniformDistribution)}W(){return this.cn}wu(t){C(this.bo,t),this.gu(),this.bu()}Nh(t,i){var n;C(this.cn,t),this.cn.fixLeftEdge&&this.Mu(),this.cn.fixRightEdge&&this.xu(),void 0!==t.barSpacing&&this.Ui.Gn(t.barSpacing),void 0!==t.rightOffset&&this.Ui.Jn(t.rightOffset),void 0!==t.minBarSpacing&&this.Ui.Gn(null!==(n=t.barSpacing)&&void 0!==n?n:this.mu),this.gu(),this.bu(),this.cu.m()}mn(t){var i,n;return null!==(n=null===(i=this.ru[t])||void 0===i?void 0:i.time)&&void 0!==n?n:null}qi(t){var i;return null!==(i=this.ru[t])&&void 0!==i?i:null}ka(t,i){if(this.ru.length<1)return null;if(this.N_.key(t)>this.N_.key(this.ru[this.ru.length-1].time))return i?this.ru.length-1:null;const n=Dt(this.ru,this.N_.key(t),((t,i)=>this.N_.key(t.time)0}Xs(){return this.Su(),this.au.iu()}yu(){return this.Su(),this.au.nu()}ku(){const t=this.Xs();if(null===t)return null;const i={from:t.Os(),to:t.di()};return this.Cu(i)}Cu(t){const i=Math.round(t.from),n=Math.round(t.to),s=f(this.Tu()),e=f(this.Pu());return{from:f(this.qi(Math.max(s,i))),to:f(this.qi(Math.min(e,n)))}}Ru(t){return{from:f(this.ka(t.from,!0)),to:f(this.ka(t.to,!0))}}$i(){return this.e_}m_(t){if(!isFinite(t)||t<=0)return;if(this.e_===t)return;const i=this.yu(),n=this.e_;if(this.e_=t,this.ou=!0,this.cn.lockVisibleTimeRangeOnResize&&0!==n){const i=this.mu*t/n;this.mu=i}if(this.cn.fixLeftEdge&&null!==i&&i.Os()<=0){const i=n-t;this.pu-=Math.round(i/this.mu)+1,this.ou=!0}this.Du(),this.Ou()}It(t){if(this.Fi()||!P(t))return 0;const i=this.Au()+this.pu-t;return this.e_-(i+.5)*this.mu-1}Js(t,i){const n=this.Au(),s=void 0===i?0:i.from,e=void 0===i?t.length:i.to;for(let i=s;ii/2&&!o?n.needAlignCoordinate=!1:n.needAlignCoordinate=_&&t.index<=l||u&&t.index>=a,c++}return this.vu.length=c,this.fu=this.vu,this.vu}Fu(){this.ou=!0,this.Gn(this.cn.barSpacing),this.Jn(this.cn.rightOffset)}Wu(t){this.ou=!0,this.eu=t,this.Ou(),this.Mu()}ju(t,i){const n=this.Vu(t),s=this.he(),e=s+i*(s/10);this.Gn(e),this.cn.rightBarStaysOnScroll||this.Jn(this.Iu()+(n-this.Vu(t)))}Uo(t){this.fo&&this.Zo(),null===this.do&&null===this.du&&(this.Fi()||(this.do=t,this.Hu()))}qo(t){if(null===this.du)return;const i=Mt(this.e_-t,0,this.e_),n=Mt(this.e_-f(this.do),0,this.e_);0!==i&&0!==n&&this.Gn(this.du.he*i/n)}Yo(){null!==this.do&&(this.do=null,this.$u())}Xo(t){null===this.fo&&null===this.du&&(this.Fi()||(this.fo=t,this.Hu()))}Ko(t){if(null===this.fo)return;const i=(this.fo-t)/this.he();this.pu=f(this.du).Iu+i,this.ou=!0,this.Ou()}Zo(){null!==this.fo&&(this.fo=null,this.$u())}Uu(){this.qu(this.cn.rightOffset)}qu(t,i=400){if(!isFinite(t))throw new RangeError("offset is required and must be finite number");if(!isFinite(i)||i<=0)throw new RangeError("animationDuration (optional) must be finite positive number");const n=this.pu,s=performance.now();this.Ui.Xn({Yu:t=>(t-s)/i>=1,Xu:e=>{const r=(e-s)/i;return r>=1?t:n+(t-n)*r}})}gt(t,i){this.ou=!0,this.ru=t,this.hu.K_(t,i),this.Ou()}Ku(){return this._u}Zu(){return this.uu}Gu(){return this.cu}Au(){return this.eu||0}Ju(t){const i=t.U_();this.Eu(this.e_/i),this.pu=t.di()-this.Au(),this.Ou(),this.ou=!0,this.Ui.zu(),this.Ui.Fh()}Qu(){const t=this.Tu(),i=this.Pu();null!==t&&null!==i&&this.Ju(new bn(t,i+this.cn.rightOffset))}tc(t){const i=new bn(t.from,t.to);this.Ju(i)}Yi(t){return void 0!==this.bo.timeFormatter?this.bo.timeFormatter(t.originalTime):this.N_.formatHorzItem(t.time)}Lu(){const{handleScroll:t,handleScale:i}=this.Ui.W();return!(t.horzTouchDrag||t.mouseWheel||t.pressedMouseMove||t.vertTouchDrag||i.axisDoubleClickReset.time||i.axisPressedMouseMove.time||i.mouseWheel||i.pinch)}Tu(){return 0===this.ru.length?null:0}Pu(){return 0===this.ru.length?null:this.ru.length-1}ic(t){return(this.e_-1-t)/this.mu}Vu(t){const i=this.ic(t),n=this.Au()+this.pu-i;return Math.round(1e6*n)/1e6}Eu(t){const i=this.mu;this.mu=t,this.Du(),i!==this.mu&&(this.ou=!0,this.nc())}Su(){if(!this.ou)return;if(this.ou=!1,this.Fi())return void this.sc(Mn.su());const t=this.Au(),i=this.e_/this.mu,n=this.pu+t,s=new bn(n-i+1,n);this.sc(new Mn(s))}Du(){const t=this.ec();if(this.mut&&(this.mu=t,this.ou=!0)}}ec(){return this.cn.fixLeftEdge&&this.cn.fixRightEdge&&0!==this.ru.length?this.e_/this.ru.length:this.cn.minBarSpacing}Ou(){const t=this.rc();this.pu>t&&(this.pu=t,this.ou=!0);const i=this.hc();null!==i&&this.puthis.lc(t)),this.N_),this.lu.set(t.weight,i)),i.F_(t)}lc(t){return this.N_.formatTickmark(t,this.bo)}sc(t){const i=this.au;this.au=t,wn(i.iu(),this.au.iu())||this._u.m(),wn(i.nu(),this.au.nu())||this.uu.m(),this.nc()}nc(){this.fu=null}gu(){this.nc(),this.lu.clear()}bu(){this.N_.updateFormatter(this.bo)}Mu(){if(!this.cn.fixLeftEdge)return;const t=this.Tu();if(null===t)return;const i=this.Xs();if(null===i)return;const n=i.Os()-t;if(n<0){const t=this.pu-n-1;this.Jn(t)}this.Du()}xu(){this.Ou(),this.Du()}}class yn extends L{constructor(t){super(),this.ac=new Map,this.Lt=t}Z(t){}J(t){if(!this.Lt.Tt)return;const{context:i,mediaSize:n}=t;let s=0;for(const t of this.Lt.oc){if(0===t.Jt.length)continue;i.font=t.R;const e=this._c(i,t.Jt);e>n.width?t.ju=n.width/e:t.ju=1,s+=t.uc*t.ju}let e=0;switch(this.Lt.cc){case"top":e=0;break;case"center":e=Math.max((n.height-s)/2,0);break;case"bottom":e=Math.max(n.height-s,0)}i.fillStyle=this.Lt.O;for(const t of this.Lt.oc){i.save();let s=0;switch(this.Lt.dc){case"left":i.textAlign="left",s=t.uc/2;break;case"center":i.textAlign="center",s=n.width/2;break;case"right":i.textAlign="right",s=n.width-1-t.uc/2}i.translate(s,e),i.textBaseline="top",i.font=t.R,i.scale(t.ju,t.ju),i.fillText(t.Jt,0,t.fc),i.restore(),e+=t.uc*t.ju}}_c(t,i){const n=this.vc(t.font);let s=n.get(i);return void 0===s&&(s=t.measureText(i).width,n.set(i,s)),s}vc(t){let i=this.ac.get(t);return void 0===i&&(i=new Map,this.ac.set(t,i)),i}}class kn{constructor(t){this.bt=!0,this.jt={Tt:!1,O:"",oc:[],cc:"center",dc:"center"},this.Ht=new yn(this.jt),this.$t=t}gt(){this.bt=!0}xt(){return this.bt&&(this.St(),this.bt=!1),this.Ht}St(){const t=this.$t.W(),i=this.jt;i.Tt=t.visible,i.Tt&&(i.O=t.color,i.dc=t.horzAlign,i.cc=t.vertAlign,i.oc=[{Jt:t.text,R:z(t.fontSize,t.fontFamily,t.fontStyle),uc:1.2*t.fontSize,fc:0,ju:0}])}}class Cn extends et{constructor(t,i){super(),this.cn=i,this.wn=new kn(this)}Rn(){return[]}Pn(){return[this.wn]}W(){return this.cn}On(){this.wn.gt()}}var Tn,Pn,Rn,Dn,On;!function(t){t[t.OnTouchEnd=0]="OnTouchEnd",t[t.OnNextTap=1]="OnNextTap"}(Tn||(Tn={}));class An{constructor(t,i,n){this.mc=[],this.bc=[],this.e_=0,this.wc=null,this.gc=new k,this.Mc=new k,this.xc=null,this.Sc=t,this.cn=i,this.N_=n,this.yc=new E(this),this.wl=new Sn(this,i.timeScale,this.cn.localization,n),this.wt=new ht(this,i.crosshair),this.kc=new Xi(i.crosshair),this.Cc=new Cn(this,i.watermark),this.Tc(),this.mc[0].p_(2e3),this.Pc=this.Rc(0),this.Dc=this.Rc(1)}$l(){this.Oc(at.es())}Fh(){this.Oc(at.ss())}sa(){this.Oc(new at(1))}Ul(t){const i=this.Ac(t);this.Oc(i)}Bc(){return this.wc}Vc(t){const i=this.wc;this.wc=t,null!==i&&this.Ul(i.zc),null!==t&&this.Ul(t.zc)}W(){return this.cn}Nh(t){C(this.cn,t),this.mc.forEach((i=>i.c_(t))),void 0!==t.timeScale&&this.wl.Nh(t.timeScale),void 0!==t.localization&&this.wl.wu(t.localization),(t.leftPriceScale||t.rightPriceScale)&&this.gc.m(),this.Pc=this.Rc(0),this.Dc=this.Rc(1),this.$l()}Ec(t,i){if("left"===t)return void this.Nh({leftPriceScale:i});if("right"===t)return void this.Nh({rightPriceScale:i});const n=this.Ic(t);null!==n&&(n.At.Nh(i),this.gc.m())}Ic(t){for(const i of this.mc){const n=i.d_(t);if(null!==n)return{Ut:i,At:n}}return null}kt(){return this.wl}Lc(){return this.mc}Nc(){return this.Cc}Fc(){return this.wt}Wc(){return this.Mc}jc(t,i){t.Oo(i),this.zu()}m_(t){this.e_=t,this.wl.m_(this.e_),this.mc.forEach((i=>i.m_(t))),this.zu()}Tc(t){const i=new pn(this.wl,this);void 0!==t?this.mc.splice(t,0,i):this.mc.push(i);const n=void 0===t?this.mc.length-1:t,s=at.es();return s.Nn(n,{Fn:0,Wn:!0}),this.Oc(s),i}k_(t,i,n){t.k_(i,n)}C_(t,i,n){t.C_(i,n),this.ql(),this.Oc(this.Hc(t,2))}T_(t,i){t.T_(i),this.Oc(this.Hc(t,2))}P_(t,i,n){i.ko()||t.P_(i,n)}R_(t,i,n){i.ko()||(t.R_(i,n),this.ql(),this.Oc(this.Hc(t,2)))}D_(t,i){i.ko()||(t.D_(i),this.Oc(this.Hc(t,2)))}A_(t,i){t.A_(i),this.Oc(this.Hc(t,2))}$c(t){this.wl.Uo(t)}Uc(t,i){const n=this.kt();if(n.Fi()||0===i)return;const s=n.$i();t=Math.max(1,Math.min(t,s)),n.ju(t,i),this.zu()}qc(t){this.Yc(0),this.Xc(t),this.Kc()}Zc(t){this.wl.qo(t),this.zu()}Gc(){this.wl.Yo(),this.Fh()}Yc(t){this.wl.Xo(t)}Xc(t){this.wl.Ko(t),this.zu()}Kc(){this.wl.Zo(),this.Fh()}Mt(){return this.bc}Jc(t,i,n,s,e){this.wt.gn(t,i);let r=NaN,h=this.wl.Bu(t);const l=this.wl.Xs();null!==l&&(h=Math.min(Math.max(l.Os(),h),l.di()));const a=s.vn(),o=a.Pt();null!==o&&(r=a.pn(i,o)),r=this.kc.Ca(r,h,s),this.wt.yn(h,r,s),this.sa(),e||this.Mc.m(this.wt.yt(),{x:t,y:i},n)}Qc(t,i,n){const s=n.vn(),e=s.Pt(),r=s.Ot(t,f(e)),h=this.wl.ka(i,!0),l=this.wl.It(f(h));this.Jc(l,r,null,n,!0)}td(t){this.Fc().Cn(),this.sa(),t||this.Mc.m(null,null,null)}ql(){const t=this.wt.Ut();if(null!==t){const i=this.wt.xn(),n=this.wt.Sn();this.Jc(i,n,null,t)}this.wt.On()}nd(t,i,n){const s=this.wl.mn(0);void 0!==i&&void 0!==n&&this.wl.gt(i,n);const e=this.wl.mn(0),r=this.wl.Au(),h=this.wl.Xs();if(null!==h&&null!==s&&null!==e){const i=h.Kr(r),l=this.N_.key(s)>this.N_.key(e),a=null!==t&&t>r&&!l,o=this.wl.W().allowShiftVisibleRangeOnWhitespaceReplacement,_=i&&(!(void 0===n)||o)&&this.wl.W().shiftVisibleRangeOnNewBar;if(a&&!_){const i=t-r;this.wl.Jn(this.wl.Iu()-i)}}this.wl.Wu(t)}Kl(t){null!==t&&t.V_()}cr(t){const i=this.mc.find((i=>i.No().includes(t)));return void 0===i?null:i}zu(){this.Cc.On(),this.mc.forEach((t=>t.V_())),this.ql()}S(){this.mc.forEach((t=>t.S())),this.mc.length=0,this.cn.localization.priceFormatter=void 0,this.cn.localization.percentageFormatter=void 0,this.cn.localization.timeFormatter=void 0}sd(){return this.yc}mr(){return this.yc.W()}f_(){return this.gc}ed(t,i,n){const s=this.mc[0],e=this.rd(i,t,s,n);return this.bc.push(e),1===this.bc.length?this.$l():this.Fh(),e}hd(t){const i=this.cr(t),n=this.bc.indexOf(t);c(-1!==n,"Series not found"),this.bc.splice(n,1),f(i).jo(t),t.S&&t.S()}Hl(t,i){const n=f(this.cr(t));n.jo(t);const s=this.Ic(i);if(null===s){const s=t.Zi();n.Fo(t,i,s)}else{const e=s.Ut===n?t.Zi():void 0;s.Ut.Fo(t,i,e)}}Qu(){const t=at.ss();t.$n(),this.Oc(t)}ld(t){const i=at.ss();i.Yn(t),this.Oc(i)}Zn(){const t=at.ss();t.Zn(),this.Oc(t)}Gn(t){const i=at.ss();i.Gn(t),this.Oc(i)}Jn(t){const i=at.ss();i.Jn(t),this.Oc(i)}Xn(t){const i=at.ss();i.Xn(t),this.Oc(i)}Un(){const t=at.ss();t.Un(),this.Oc(t)}ad(){return this.cn.rightPriceScale.visible?"right":"left"}od(){return this.Dc}q(){return this.Pc}zt(t){const i=this.Dc,n=this.Pc;if(i===n)return i;if(t=Math.max(0,Math.min(100,Math.round(100*t))),null===this.xc||this.xc.Ts!==n||this.xc.Ps!==i)this.xc={Ts:n,Ps:i,_d:new Map};else{const i=this.xc._d.get(t);if(void 0!==i)return i}const s=function(t,i,n){const[s,e,r,h]=S(t),[l,a,o,_]=S(i),u=[m(s+n*(l-s)),m(e+n*(a-e)),m(r+n*(o-r)),b(h+n*(_-h))];return`rgba(${u[0]}, ${u[1]}, ${u[2]}, ${u[3]})`}(n,i,t/100);return this.xc._d.set(t,s),s}Hc(t,i){const n=new at(i);if(null!==t){const s=this.mc.indexOf(t);n.Nn(s,{Fn:i})}return n}Ac(t,i){return void 0===i&&(i=2),this.Hc(this.cr(t),i)}Oc(t){this.Sc&&this.Sc(t),this.mc.forEach((t=>t.E_().Wh().gt()))}rd(t,i,n,s){const e=new Yi(this,t,i,n,s),r=void 0!==t.priceScaleId?t.priceScaleId:this.ad();return n.Fo(e,r),lt(r)||e.Nh(t),e}Rc(t){const i=this.cn.layout;return"gradient"===i.background.type?0===t?i.background.topColor:i.background.bottomColor:i.background.color}}function Bn(t){return!T(t)&&!R(t)}function Vn(t){return T(t)}!function(t){t[t.Disabled=0]="Disabled",t[t.Continuous=1]="Continuous",t[t.OnDataUpdate=2]="OnDataUpdate"}(Pn||(Pn={})),function(t){t[t.LastBar=0]="LastBar",t[t.LastVisible=1]="LastVisible"}(Rn||(Rn={})),function(t){t.Solid="solid",t.VerticalGradient="gradient"}(Dn||(Dn={})),function(t){t[t.Year=0]="Year",t[t.Month=1]="Month",t[t.DayOfMonth=2]="DayOfMonth",t[t.Time=3]="Time",t[t.TimeWithSeconds=4]="TimeWithSeconds"}(On||(On={}));const zn=t=>t.getUTCFullYear();function En(t,i,n){return i.replace(/yyyy/g,(t=>_t(zn(t),4))(t)).replace(/yy/g,(t=>_t(zn(t)%100,2))(t)).replace(/MMMM/g,((t,i)=>new Date(t.getUTCFullYear(),t.getUTCMonth(),1).toLocaleString(i,{month:"long"}))(t,n)).replace(/MMM/g,((t,i)=>new Date(t.getUTCFullYear(),t.getUTCMonth(),1).toLocaleString(i,{month:"short"}))(t,n)).replace(/MM/g,(t=>_t((t=>t.getUTCMonth()+1)(t),2))(t)).replace(/dd/g,(t=>_t((t=>t.getUTCDate())(t),2))(t))}class In{constructor(t="yyyy-MM-dd",i="default"){this.ud=t,this.dd=i}F_(t){return En(t,this.ud,this.dd)}}class Ln{constructor(t){this.fd=t||"%h:%m:%s"}F_(t){return this.fd.replace("%h",_t(t.getUTCHours(),2)).replace("%m",_t(t.getUTCMinutes(),2)).replace("%s",_t(t.getUTCSeconds(),2))}}const Nn={vd:"yyyy-MM-dd",pd:"%h:%m:%s",md:" ",bd:"default"};class Fn{constructor(t={}){const i=Object.assign(Object.assign({},Nn),t);this.wd=new In(i.vd,i.bd),this.gd=new Ln(i.pd),this.Md=i.md}F_(t){return`${this.wd.F_(t)}${this.Md}${this.gd.F_(t)}`}}function Wn(t){return 60*t*60*1e3}function jn(t){return 60*t*1e3}const Hn=[{xd:($n=1,1e3*$n),Sd:10},{xd:jn(1),Sd:20},{xd:jn(5),Sd:21},{xd:jn(30),Sd:22},{xd:Wn(1),Sd:30},{xd:Wn(3),Sd:31},{xd:Wn(6),Sd:32},{xd:Wn(12),Sd:33}];var $n;function Un(t,i){if(t.getUTCFullYear()!==i.getUTCFullYear())return 70;if(t.getUTCMonth()!==i.getUTCMonth())return 60;if(t.getUTCDate()!==i.getUTCDate())return 50;for(let n=Hn.length-1;n>=0;--n)if(Math.floor(i.getTime()/Hn[n].xd)!==Math.floor(t.getTime()/Hn[n].xd))return Hn[n].Sd;return 0}function qn(t){let i=t;if(R(t)&&(i=Xn(t)),!Bn(i))throw new Error("time must be of type BusinessDay");const n=new Date(Date.UTC(i.year,i.month-1,i.day,0,0,0,0));return{yd:Math.round(n.getTime()/1e3),kd:i}}function Yn(t){if(!Vn(t))throw new Error("time must be of type isUTCTimestamp");return{yd:t}}function Xn(t){const i=new Date(t);if(isNaN(i.getTime()))throw new Error(`Invalid date string=${t}, expected format=yyyy-mm-dd`);return{day:i.getUTCDate(),month:i.getUTCMonth()+1,year:i.getUTCFullYear()}}function Kn(t){R(t.time)&&(t.time=Xn(t.time))}class Zn{options(){return this.cn}setOptions(t){this.cn=t,this.updateFormatter(t.localization)}preprocessData(t){Array.isArray(t)?function(t){t.forEach(Kn)}(t):Kn(t)}createConverterToInternalObj(t){return f(function(t){return 0===t.length?null:Bn(t[0].time)||R(t[0].time)?qn:Yn}(t))}key(t){return"object"==typeof t&&"yd"in t?t.yd:this.key(this.convertHorzItemToInternal(t))}cacheKey(t){const i=t;return void 0===i.kd?new Date(1e3*i.yd).getTime():new Date(Date.UTC(i.kd.year,i.kd.month-1,i.kd.day)).getTime()}convertHorzItemToInternal(t){return Vn(i=t)?Yn(i):Bn(i)?qn(i):qn(Xn(i));var i}updateFormatter(t){if(!this.cn)return;const i=t.dateFormat;this.cn.timeScale.timeVisible?this.Cd=new Fn({vd:i,pd:this.cn.timeScale.secondsVisible?"%h:%m:%s":"%h:%m",md:" ",bd:t.locale}):this.Cd=new In(i,t.locale)}formatHorzItem(t){const i=t;return this.Cd.F_(new Date(1e3*i.yd))}formatTickmark(t,i){const n=function(t,i,n){switch(t){case 0:case 10:return i?n?4:3:2;case 20:case 21:case 22:case 30:case 31:case 32:case 33:return i?3:2;case 50:return 2;case 60:return 1;case 70:return 0}}(t.weight,this.cn.timeScale.timeVisible,this.cn.timeScale.secondsVisible),s=this.cn.timeScale;if(void 0!==s.tickMarkFormatter){const e=s.tickMarkFormatter(t.originalTime,n,i.locale);if(null!==e)return e}return function(t,i,n){const s={};switch(i){case 0:s.year="numeric";break;case 1:s.month="short";break;case 2:s.day="numeric";break;case 3:s.hour12=!1,s.hour="2-digit",s.minute="2-digit";break;case 4:s.hour12=!1,s.hour="2-digit",s.minute="2-digit",s.second="2-digit"}const e=void 0===t.kd?new Date(1e3*t.yd):new Date(Date.UTC(t.kd.year,t.kd.month-1,t.kd.day));return new Date(e.getUTCFullYear(),e.getUTCMonth(),e.getUTCDate(),e.getUTCHours(),e.getUTCMinutes(),e.getUTCSeconds(),e.getUTCMilliseconds()).toLocaleString(n,s)}(t.time,n,i.locale)}maxTickMarkWeight(t){let i=t.reduce(xn,t[0]).weight;return i>30&&i<50&&(i=30),i}fillWeightsForPoints(t,i){!function(t,i=0){if(0===t.length)return;let n=0===i?null:t[i-1].time.yd,s=null!==n?new Date(1e3*n):null,e=0;for(let r=i;r1){const i=Math.ceil(e/(t.length-1)),n=new Date(1e3*(t[0].time.yd-i));t[0].timeWeight=Un(new Date(1e3*t[0].time.yd),n)}}(t,i)}static Td(t){return C({localization:{dateFormat:"dd MMM 'yy"}},null!=t?t:{})}}function Gn(t){var i=t.width,n=t.height;if(i<0)throw new Error("Negative width is not allowed for Size");if(n<0)throw new Error("Negative height is not allowed for Size");return{width:i,height:n}}function Jn(t,i){return t.width===i.width&&t.height===i.height}var Qn=function(){function t(t){var i=this;this._resolutionListener=function(){return i._onResolutionChanged()},this._resolutionMediaQueryList=null,this._observers=[],this._window=t,this._installResolutionListener()}return t.prototype.dispose=function(){this._uninstallResolutionListener(),this._window=null},Object.defineProperty(t.prototype,"value",{get:function(){return this._window.devicePixelRatio},enumerable:!1,configurable:!0}),t.prototype.subscribe=function(t){var i=this,n={next:t};return this._observers.push(n),{unsubscribe:function(){i._observers=i._observers.filter((function(t){return t!==n}))}}},t.prototype._installResolutionListener=function(){if(null!==this._resolutionMediaQueryList)throw new Error("Resolution listener is already installed");var t=this._window.devicePixelRatio;this._resolutionMediaQueryList=this._window.matchMedia("all and (resolution: ".concat(t,"dppx)")),this._resolutionMediaQueryList.addListener(this._resolutionListener)},t.prototype._uninstallResolutionListener=function(){null!==this._resolutionMediaQueryList&&(this._resolutionMediaQueryList.removeListener(this._resolutionListener),this._resolutionMediaQueryList=null)},t.prototype._reinstallResolutionListener=function(){this._uninstallResolutionListener(),this._installResolutionListener()},t.prototype._onResolutionChanged=function(){var t=this;this._observers.forEach((function(i){return i.next(t._window.devicePixelRatio)})),this._reinstallResolutionListener()},t}();var ts=function(){function t(t,i,n){var s;this._canvasElement=null,this._bitmapSizeChangedListeners=[],this._suggestedBitmapSize=null,this._suggestedBitmapSizeChangedListeners=[],this._devicePixelRatioObservable=null,this._canvasElementResizeObserver=null,this._canvasElement=t,this._canvasElementClientSize=Gn({width:this._canvasElement.clientWidth,height:this._canvasElement.clientHeight}),this._transformBitmapSize=null!=i?i:function(t){return t},this._allowResizeObserver=null===(s=null==n?void 0:n.allowResizeObserver)||void 0===s||s,this._chooseAndInitObserver()}return t.prototype.dispose=function(){var t,i;if(null===this._canvasElement)throw new Error("Object is disposed");null===(t=this._canvasElementResizeObserver)||void 0===t||t.disconnect(),this._canvasElementResizeObserver=null,null===(i=this._devicePixelRatioObservable)||void 0===i||i.dispose(),this._devicePixelRatioObservable=null,this._suggestedBitmapSizeChangedListeners.length=0,this._bitmapSizeChangedListeners.length=0,this._canvasElement=null},Object.defineProperty(t.prototype,"canvasElement",{get:function(){if(null===this._canvasElement)throw new Error("Object is disposed");return this._canvasElement},enumerable:!1,configurable:!0}),Object.defineProperty(t.prototype,"canvasElementClientSize",{get:function(){return this._canvasElementClientSize},enumerable:!1,configurable:!0}),Object.defineProperty(t.prototype,"bitmapSize",{get:function(){return Gn({width:this.canvasElement.width,height:this.canvasElement.height})},enumerable:!1,configurable:!0}),t.prototype.resizeCanvasElement=function(t){this._canvasElementClientSize=Gn(t),this.canvasElement.style.width="".concat(this._canvasElementClientSize.width,"px"),this.canvasElement.style.height="".concat(this._canvasElementClientSize.height,"px"),this._invalidateBitmapSize()},t.prototype.subscribeBitmapSizeChanged=function(t){this._bitmapSizeChangedListeners.push(t)},t.prototype.unsubscribeBitmapSizeChanged=function(t){this._bitmapSizeChangedListeners=this._bitmapSizeChangedListeners.filter((function(i){return i!==t}))},Object.defineProperty(t.prototype,"suggestedBitmapSize",{get:function(){return this._suggestedBitmapSize},enumerable:!1,configurable:!0}),t.prototype.subscribeSuggestedBitmapSizeChanged=function(t){this._suggestedBitmapSizeChangedListeners.push(t)},t.prototype.unsubscribeSuggestedBitmapSizeChanged=function(t){this._suggestedBitmapSizeChangedListeners=this._suggestedBitmapSizeChangedListeners.filter((function(i){return i!==t}))},t.prototype.applySuggestedBitmapSize=function(){if(null!==this._suggestedBitmapSize){var t=this._suggestedBitmapSize;this._suggestedBitmapSize=null,this._resizeBitmap(t),this._emitSuggestedBitmapSizeChanged(t,this._suggestedBitmapSize)}},t.prototype._resizeBitmap=function(t){var i=this.bitmapSize;Jn(i,t)||(this.canvasElement.width=t.width,this.canvasElement.height=t.height,this._emitBitmapSizeChanged(i,t))},t.prototype._emitBitmapSizeChanged=function(t,i){var n=this;this._bitmapSizeChangedListeners.forEach((function(s){return s.call(n,t,i)}))},t.prototype._suggestNewBitmapSize=function(t){var i=this._suggestedBitmapSize,n=Gn(this._transformBitmapSize(t,this._canvasElementClientSize)),s=Jn(this.bitmapSize,n)?null:n;null===i&&null===s||null!==i&&null!==s&&Jn(i,s)||(this._suggestedBitmapSize=s,this._emitSuggestedBitmapSizeChanged(i,s))},t.prototype._emitSuggestedBitmapSizeChanged=function(t,i){var n=this;this._suggestedBitmapSizeChangedListeners.forEach((function(s){return s.call(n,t,i)}))},t.prototype._chooseAndInitObserver=function(){var t=this;this._allowResizeObserver?new Promise((function(t){var i=new ResizeObserver((function(n){t(n.every((function(t){return"devicePixelContentBoxSize"in t}))),i.disconnect()}));i.observe(document.body,{box:"device-pixel-content-box"})})).catch((function(){return!1})).then((function(i){return i?t._initResizeObserver():t._initDevicePixelRatioObservable()})):this._initDevicePixelRatioObservable()},t.prototype._initDevicePixelRatioObservable=function(){var t=this;if(null!==this._canvasElement){var i=is(this._canvasElement);if(null===i)throw new Error("No window is associated with the canvas");this._devicePixelRatioObservable=function(t){return new Qn(t)}(i),this._devicePixelRatioObservable.subscribe((function(){return t._invalidateBitmapSize()})),this._invalidateBitmapSize()}},t.prototype._invalidateBitmapSize=function(){var t,i;if(null!==this._canvasElement){var n=is(this._canvasElement);if(null!==n){var s=null!==(i=null===(t=this._devicePixelRatioObservable)||void 0===t?void 0:t.value)&&void 0!==i?i:n.devicePixelRatio,e=this._canvasElement.getClientRects(),r=void 0!==e[0]?function(t,i){return Gn({width:Math.round(t.left*i+t.width*i)-Math.round(t.left*i),height:Math.round(t.top*i+t.height*i)-Math.round(t.top*i)})}(e[0],s):Gn({width:this._canvasElementClientSize.width*s,height:this._canvasElementClientSize.height*s});this._suggestNewBitmapSize(r)}}},t.prototype._initResizeObserver=function(){var t=this;null!==this._canvasElement&&(this._canvasElementResizeObserver=new ResizeObserver((function(i){var n=i.find((function(i){return i.target===t._canvasElement}));if(n&&n.devicePixelContentBoxSize&&n.devicePixelContentBoxSize[0]){var s=n.devicePixelContentBoxSize[0],e=Gn({width:s.inlineSize,height:s.blockSize});t._suggestNewBitmapSize(e)}})),this._canvasElementResizeObserver.observe(this._canvasElement,{box:"device-pixel-content-box"}))},t}();function is(t){return t.ownerDocument.defaultView}var ns=function(){function t(t,i,n){if(0===i.width||0===i.height)throw new TypeError("Rendering target could only be created on a media with positive width and height");if(this._mediaSize=i,0===n.width||0===n.height)throw new TypeError("Rendering target could only be created using a bitmap with positive integer width and height");this._bitmapSize=n,this._context=t}return t.prototype.useMediaCoordinateSpace=function(t){try{return this._context.save(),this._context.setTransform(1,0,0,1,0,0),this._context.scale(this._horizontalPixelRatio,this._verticalPixelRatio),t({context:this._context,mediaSize:this._mediaSize})}finally{this._context.restore()}},t.prototype.useBitmapCoordinateSpace=function(t){try{return this._context.save(),this._context.setTransform(1,0,0,1,0,0),t({context:this._context,mediaSize:this._mediaSize,bitmapSize:this._bitmapSize,horizontalPixelRatio:this._horizontalPixelRatio,verticalPixelRatio:this._verticalPixelRatio})}finally{this._context.restore()}},Object.defineProperty(t.prototype,"_horizontalPixelRatio",{get:function(){return this._bitmapSize.width/this._mediaSize.width},enumerable:!1,configurable:!0}),Object.defineProperty(t.prototype,"_verticalPixelRatio",{get:function(){return this._bitmapSize.height/this._mediaSize.height},enumerable:!1,configurable:!0}),t}();function ss(t,i){var n=t.canvasElementClientSize;if(0===n.width||0===n.height)return null;var s=t.bitmapSize;if(0===s.width||0===s.height)return null;var e=t.canvasElement.getContext("2d",i);return null===e?null:new ns(e,n,s)}const es="undefined"!=typeof window;function rs(){return!!es&&window.navigator.userAgent.toLowerCase().indexOf("firefox")>-1}function hs(){return!!es&&/iPhone|iPad|iPod/.test(window.navigator.platform)}function ls(t){return t+t%2}function as(t,i){return t.Pd-i.Pd}function os(t,i,n){const s=(t.Pd-i.Pd)/(t.ut-i.ut);return Math.sign(s)*Math.min(Math.abs(s),n)}class _s{constructor(t,i,n,s){this.Rd=null,this.Dd=null,this.Od=null,this.Ad=null,this.Bd=null,this.Vd=0,this.zd=0,this.Ed=t,this.Id=i,this.Ld=n,this.rs=s}Nd(t,i){if(null!==this.Rd){if(this.Rd.ut===i)return void(this.Rd.Pd=t);if(Math.abs(this.Rd.Pd-t)50)return;let n=0;const s=os(this.Rd,this.Dd,this.Id),e=as(this.Rd,this.Dd),r=[s],h=[e];if(n+=e,null!==this.Od){const t=os(this.Dd,this.Od,this.Id);if(Math.sign(t)===Math.sign(s)){const i=as(this.Dd,this.Od);if(r.push(t),h.push(i),n+=i,null!==this.Ad){const t=os(this.Od,this.Ad,this.Id);if(Math.sign(t)===Math.sign(s)){const i=as(this.Od,this.Ad);r.push(t),h.push(i),n+=i}}}}let l=0;for(let t=0;t({width:Math.max(t.width,i.width),height:Math.max(t.height,i.height)})});return s.resizeCanvasElement(i),s}function cs(t,i,n,s){t.G&&t.G(i,n,s)}function ds(t,i,n,s){t.K(i,n,s)}function fs(t,i,n,s){const e=t(n,s);for(const t of e){const n=t.xt();null!==n&&i(n)}}function vs(t){es&&void 0!==window.chrome&&t.addEventListener("mousedown",(t=>{if(1===t.button)return t.preventDefault(),!1}))}class ps{constructor(t,i,n){this.Wd=0,this.jd=null,this.Hd={et:Number.NEGATIVE_INFINITY,rt:Number.POSITIVE_INFINITY},this.$d=0,this.Ud=null,this.qd={et:Number.NEGATIVE_INFINITY,rt:Number.POSITIVE_INFINITY},this.Yd=null,this.Xd=!1,this.Kd=null,this.Zd=null,this.Gd=!1,this.Jd=!1,this.Qd=!1,this.tf=null,this.if=null,this.nf=null,this.sf=null,this.ef=null,this.rf=null,this.hf=null,this.lf=0,this.af=!1,this._f=!1,this.uf=!1,this.cf=0,this.df=null,this.ff=!hs(),this.vf=t=>{this.pf(t)},this.mf=t=>{if(this.bf(t)){const i=this.wf(t);if(++this.$d,this.Ud&&this.$d>1){const{gf:n}=this.Mf(ws(t),this.qd);n<30&&!this.Qd&&this.xf(i,this.yf.Sf),this.kf()}}else{const i=this.wf(t);if(++this.Wd,this.jd&&this.Wd>1){const{gf:n}=this.Mf(ws(t),this.Hd);n<5&&!this.Jd&&this.Cf(i,this.yf.Tf),this.Pf()}}},this.Rf=t,this.yf=i,this.cn=n,this.Df()}S(){null!==this.tf&&(this.tf(),this.tf=null),null!==this.if&&(this.if(),this.if=null),null!==this.sf&&(this.sf(),this.sf=null),null!==this.ef&&(this.ef(),this.ef=null),null!==this.rf&&(this.rf(),this.rf=null),null!==this.nf&&(this.nf(),this.nf=null),this.Of(),this.Pf()}Af(t){this.sf&&this.sf();const i=this.Bf.bind(this);if(this.sf=()=>{this.Rf.removeEventListener("mousemove",i)},this.Rf.addEventListener("mousemove",i),this.bf(t))return;const n=this.wf(t);this.Cf(n,this.yf.Vf),this.ff=!0}Pf(){null!==this.jd&&clearTimeout(this.jd),this.Wd=0,this.jd=null,this.Hd={et:Number.NEGATIVE_INFINITY,rt:Number.POSITIVE_INFINITY}}kf(){null!==this.Ud&&clearTimeout(this.Ud),this.$d=0,this.Ud=null,this.qd={et:Number.NEGATIVE_INFINITY,rt:Number.POSITIVE_INFINITY}}Bf(t){if(this.uf||null!==this.Zd)return;if(this.bf(t))return;const i=this.wf(t);this.Cf(i,this.yf.zf),this.ff=!0}Ef(t){const i=Ms(t.changedTouches,f(this.df));if(null===i)return;if(this.cf=gs(t),null!==this.hf)return;if(this._f)return;this.af=!0;const n=this.Mf(ws(i),f(this.Zd)),{If:s,Lf:e,gf:r}=n;if(this.Gd||!(r<5)){if(!this.Gd){const t=.5*s,i=e>=t&&!this.cn.Nf(),n=t>e&&!this.cn.Ff();i||n||(this._f=!0),this.Gd=!0,this.Qd=!0,this.Of(),this.kf()}if(!this._f){const n=this.wf(t,i);this.xf(n,this.yf.Wf),bs(t)}}}jf(t){if(0!==t.button)return;const i=this.Mf(ws(t),f(this.Kd)),{gf:n}=i;if(n>=5&&(this.Jd=!0,this.Pf()),this.Jd){const i=this.wf(t);this.Cf(i,this.yf.Hf)}}Mf(t,i){const n=Math.abs(i.et-t.et),s=Math.abs(i.rt-t.rt);return{If:n,Lf:s,gf:n+s}}$f(t){let i=Ms(t.changedTouches,f(this.df));if(null===i&&0===t.touches.length&&(i=t.changedTouches[0]),null===i)return;this.df=null,this.cf=gs(t),this.Of(),this.Zd=null,this.rf&&(this.rf(),this.rf=null);const n=this.wf(t,i);if(this.xf(n,this.yf.Uf),++this.$d,this.Ud&&this.$d>1){const{gf:t}=this.Mf(ws(i),this.qd);t<30&&!this.Qd&&this.xf(n,this.yf.Sf),this.kf()}else this.Qd||(this.xf(n,this.yf.qf),this.yf.qf&&bs(t));0===this.$d&&bs(t),0===t.touches.length&&this.Xd&&(this.Xd=!1,bs(t))}pf(t){if(0!==t.button)return;const i=this.wf(t);if(this.Kd=null,this.uf=!1,this.ef&&(this.ef(),this.ef=null),rs()){this.Rf.ownerDocument.documentElement.removeEventListener("mouseleave",this.vf)}if(!this.bf(t))if(this.Cf(i,this.yf.Yf),++this.Wd,this.jd&&this.Wd>1){const{gf:n}=this.Mf(ws(t),this.Hd);n<5&&!this.Jd&&this.Cf(i,this.yf.Tf),this.Pf()}else this.Jd||this.Cf(i,this.yf.Xf)}Of(){null!==this.Yd&&(clearTimeout(this.Yd),this.Yd=null)}Kf(t){if(null!==this.df)return;const i=t.changedTouches[0];this.df=i.identifier,this.cf=gs(t);const n=this.Rf.ownerDocument.documentElement;this.Qd=!1,this.Gd=!1,this._f=!1,this.Zd=ws(i),this.rf&&(this.rf(),this.rf=null);{const i=this.Ef.bind(this),s=this.$f.bind(this);this.rf=()=>{n.removeEventListener("touchmove",i),n.removeEventListener("touchend",s)},n.addEventListener("touchmove",i,{passive:!1}),n.addEventListener("touchend",s,{passive:!1}),this.Of(),this.Yd=setTimeout(this.Zf.bind(this,t),240)}const s=this.wf(t,i);this.xf(s,this.yf.Gf),this.Ud||(this.$d=0,this.Ud=setTimeout(this.kf.bind(this),500),this.qd=ws(i))}Jf(t){if(0!==t.button)return;const i=this.Rf.ownerDocument.documentElement;rs()&&i.addEventListener("mouseleave",this.vf),this.Jd=!1,this.Kd=ws(t),this.ef&&(this.ef(),this.ef=null);{const t=this.jf.bind(this),n=this.pf.bind(this);this.ef=()=>{i.removeEventListener("mousemove",t),i.removeEventListener("mouseup",n)},i.addEventListener("mousemove",t),i.addEventListener("mouseup",n)}if(this.uf=!0,this.bf(t))return;const n=this.wf(t);this.Cf(n,this.yf.Qf),this.jd||(this.Wd=0,this.jd=setTimeout(this.Pf.bind(this),500),this.Hd=ws(t))}Df(){this.Rf.addEventListener("mouseenter",this.Af.bind(this)),this.Rf.addEventListener("touchcancel",this.Of.bind(this));{const t=this.Rf.ownerDocument,i=t=>{this.yf.tv&&(t.composed&&this.Rf.contains(t.composedPath()[0])||t.target&&this.Rf.contains(t.target)||this.yf.tv())};this.if=()=>{t.removeEventListener("touchstart",i)},this.tf=()=>{t.removeEventListener("mousedown",i)},t.addEventListener("mousedown",i),t.addEventListener("touchstart",i,{passive:!0})}hs()&&(this.nf=()=>{this.Rf.removeEventListener("dblclick",this.mf)},this.Rf.addEventListener("dblclick",this.mf)),this.Rf.addEventListener("mouseleave",this.iv.bind(this)),this.Rf.addEventListener("touchstart",this.Kf.bind(this),{passive:!0}),vs(this.Rf),this.Rf.addEventListener("mousedown",this.Jf.bind(this)),this.nv(),this.Rf.addEventListener("touchmove",(()=>{}),{passive:!1})}nv(){void 0===this.yf.sv&&void 0===this.yf.ev&&void 0===this.yf.rv||(this.Rf.addEventListener("touchstart",(t=>this.hv(t.touches)),{passive:!0}),this.Rf.addEventListener("touchmove",(t=>{if(2===t.touches.length&&null!==this.hf&&void 0!==this.yf.ev){const i=ms(t.touches[0],t.touches[1])/this.lf;this.yf.ev(this.hf,i),bs(t)}}),{passive:!1}),this.Rf.addEventListener("touchend",(t=>{this.hv(t.touches)})))}hv(t){1===t.length&&(this.af=!1),2!==t.length||this.af||this.Xd?this.lv():this.av(t)}av(t){const i=this.Rf.getBoundingClientRect()||{left:0,top:0};this.hf={et:(t[0].clientX-i.left+(t[1].clientX-i.left))/2,rt:(t[0].clientY-i.top+(t[1].clientY-i.top))/2},this.lf=ms(t[0],t[1]),void 0!==this.yf.sv&&this.yf.sv(),this.Of()}lv(){null!==this.hf&&(this.hf=null,void 0!==this.yf.rv&&this.yf.rv())}iv(t){if(this.sf&&this.sf(),this.bf(t))return;if(!this.ff)return;const i=this.wf(t);this.Cf(i,this.yf.ov),this.ff=!hs()}Zf(t){const i=Ms(t.touches,f(this.df));if(null===i)return;const n=this.wf(t,i);this.xf(n,this.yf._v),this.Qd=!0,this.Xd=!0}bf(t){return t.sourceCapabilities&&void 0!==t.sourceCapabilities.firesTouchEvents?t.sourceCapabilities.firesTouchEvents:gs(t){"touchstart"!==t.type&&bs(t)}}}}function ms(t,i){const n=t.clientX-i.clientX,s=t.clientY-i.clientY;return Math.sqrt(n*n+s*s)}function bs(t){t.cancelable&&t.preventDefault()}function ws(t){return{et:t.pageX,rt:t.pageY}}function gs(t){return t.timeStamp||performance.now()}function Ms(t,i){for(let n=0;n{var s,e,r,h;return(null!==(e=null===(s=n.At())||void 0===s?void 0:s.xa())&&void 0!==e?e:"")!==i?[]:null!==(h=null===(r=n.la)||void 0===r?void 0:r.call(n,t))&&void 0!==h?h:[]}}class ks{constructor(t,i,n,s){this.Li=null,this.wv=null,this.gv=!1,this.Mv=new Qt(200),this.Gr=null,this.xv=0,this.Sv=!1,this.yv=()=>{this.Sv||this.nn.kv().qt().Fh()},this.Cv=()=>{this.Sv||this.nn.kv().qt().Fh()},this.nn=t,this.cn=i,this.mo=i.layout,this.yc=n,this.Tv="left"===s,this.Pv=ys("normal",s),this.Rv=ys("top",s),this.Dv=ys("bottom",s),this.Ov=document.createElement("div"),this.Ov.style.height="100%",this.Ov.style.overflow="hidden",this.Ov.style.width="25px",this.Ov.style.left="0",this.Ov.style.position="relative",this.Av=us(this.Ov,Gn({width:16,height:16})),this.Av.subscribeSuggestedBitmapSizeChanged(this.yv);const e=this.Av.canvasElement;e.style.position="absolute",e.style.zIndex="1",e.style.left="0",e.style.top="0",this.Bv=us(this.Ov,Gn({width:16,height:16})),this.Bv.subscribeSuggestedBitmapSizeChanged(this.Cv);const r=this.Bv.canvasElement;r.style.position="absolute",r.style.zIndex="2",r.style.left="0",r.style.top="0";const h={Qf:this.Vv.bind(this),Gf:this.Vv.bind(this),Hf:this.zv.bind(this),Wf:this.zv.bind(this),tv:this.Ev.bind(this),Yf:this.Iv.bind(this),Uf:this.Iv.bind(this),Tf:this.Lv.bind(this),Sf:this.Lv.bind(this),Vf:this.Nv.bind(this),ov:this.Fv.bind(this)};this.Wv=new ps(this.Bv.canvasElement,h,{Nf:()=>!this.cn.handleScroll.vertTouchDrag,Ff:()=>!0})}S(){this.Wv.S(),this.Bv.unsubscribeSuggestedBitmapSizeChanged(this.Cv),this.Bv.dispose(),this.Av.unsubscribeSuggestedBitmapSizeChanged(this.yv),this.Av.dispose(),null!==this.Li&&this.Li.$o().p(this),this.Li=null}jv(){return this.Ov}P(){return this.mo.fontSize}Hv(){const t=this.yc.W();return this.Gr!==t.R&&(this.Mv.ir(),this.Gr=t.R),t}$v(){if(null===this.Li)return 0;let t=0;const i=this.Hv(),n=f(this.Av.canvasElement.getContext("2d"));n.save();const s=this.Li.Ia();n.font=this.Uv(),s.length>0&&(t=Math.max(this.Mv.Si(n,s[0].Za),this.Mv.Si(n,s[s.length-1].Za)));const e=this.qv();for(let i=e.length;i--;){const s=this.Mv.Si(n,e[i].Jt());s>t&&(t=s)}const r=this.Li.Pt();if(null!==r&&null!==this.wv){const i=this.Li.pn(1,r),s=this.Li.pn(this.wv.height-2,r);t=Math.max(t,this.Mv.Si(n,this.Li.Wi(Math.floor(Math.min(i,s))+.11111111111111,r)),this.Mv.Si(n,this.Li.Wi(Math.ceil(Math.max(i,s))-.11111111111111,r)))}n.restore();const h=t||34;return ls(Math.ceil(i.C+i.T+i.V+i.I+5+h))}Yv(t){null!==this.wv&&Jn(this.wv,t)||(this.wv=t,this.Sv=!0,this.Av.resizeCanvasElement(t),this.Bv.resizeCanvasElement(t),this.Sv=!1,this.Ov.style.width=`${t.width}px`,this.Ov.style.height=`${t.height}px`)}Xv(){return f(this.wv).width}Ji(t){this.Li!==t&&(null!==this.Li&&this.Li.$o().p(this),this.Li=t,t.$o().l(this.ao.bind(this),this))}At(){return this.Li}ir(){const t=this.nn.Kv();this.nn.kv().qt().A_(t,f(this.At()))}Zv(t){if(null===this.wv)return;if(1!==t){this.Gv(),this.Av.applySuggestedBitmapSize();const t=ss(this.Av);null!==t&&(t.useBitmapCoordinateSpace((t=>{this.Jv(t),this.Ve(t)})),this.nn.Qv(t,this.Dv),this.tp(t),this.nn.Qv(t,this.Pv),this.ip(t))}this.Bv.applySuggestedBitmapSize();const i=ss(this.Bv);null!==i&&(i.useBitmapCoordinateSpace((({context:t,bitmapSize:i})=>{t.clearRect(0,0,i.width,i.height)})),this.np(i),this.nn.Qv(i,this.Rv))}sp(){return this.Av.bitmapSize}ep(t,i,n){const s=this.sp();s.width>0&&s.height>0&&t.drawImage(this.Av.canvasElement,i,n)}gt(){var t;null===(t=this.Li)||void 0===t||t.Ia()}Vv(t){if(null===this.Li||this.Li.Fi()||!this.cn.handleScale.axisPressedMouseMove.price)return;const i=this.nn.kv().qt(),n=this.nn.Kv();this.gv=!0,i.k_(n,this.Li,t.localY)}zv(t){if(null===this.Li||!this.cn.handleScale.axisPressedMouseMove.price)return;const i=this.nn.kv().qt(),n=this.nn.Kv(),s=this.Li;i.C_(n,s,t.localY)}Ev(){if(null===this.Li||!this.cn.handleScale.axisPressedMouseMove.price)return;const t=this.nn.kv().qt(),i=this.nn.Kv(),n=this.Li;this.gv&&(this.gv=!1,t.T_(i,n))}Iv(t){if(null===this.Li||!this.cn.handleScale.axisPressedMouseMove.price)return;const i=this.nn.kv().qt(),n=this.nn.Kv();this.gv=!1,i.T_(n,this.Li)}Lv(t){this.cn.handleScale.axisDoubleClickReset.price&&this.ir()}Nv(t){if(null===this.Li)return;!this.nn.kv().qt().W().handleScale.axisPressedMouseMove.price||this.Li.ph()||this.Li.Co()||this.rp(1)}Fv(t){this.rp(0)}qv(){const t=[],i=null===this.Li?void 0:this.Li;return(n=>{for(let s=0;s{t.fillStyle=n.borderColor;const l=Math.max(1,Math.floor(h)),a=Math.floor(.5*h),o=Math.round(s.T*r);t.beginPath();for(const n of i)t.rect(Math.floor(e*r),Math.round(n.Aa*h)-a,o,l);t.fill()})),t.useMediaCoordinateSpace((({context:t})=>{var r;t.font=this.Uv(),t.fillStyle=null!==(r=n.textColor)&&void 0!==r?r:this.mo.textColor,t.textAlign=this.Tv?"right":"left",t.textBaseline="middle";const h=this.Tv?Math.round(e-s.V):Math.round(e+s.T+s.V),l=i.map((i=>this.Mv.xi(t,i.Za)));for(let n=i.length;n--;){const s=i[n];t.fillText(s.Za,h,s.Aa+l[n])}}))}Gv(){if(null===this.wv||null===this.Li)return;let t=this.wv.height/2;const i=[],n=this.Li.No().slice(),s=this.nn.Kv(),e=this.Hv();this.Li===s.vr()&&this.nn.Kv().No().forEach((t=>{s.dr(t)&&n.push(t)}));const r=this.Li.Ta()[0],h=this.Li;n.forEach((n=>{const e=n.Rn(s,h);e.forEach((t=>{t.Bi(null),t.Vi()&&i.push(t)})),r===n&&e.length>0&&(t=e[0].ki())})),i.forEach((t=>t.Bi(t.ki())));this.Li.W().alignLabels&&this.hp(i,e,t)}hp(t,i,n){if(null===this.wv)return;const s=t.filter((t=>t.ki()<=n)),e=t.filter((t=>t.ki()>n));s.sort(((t,i)=>i.ki()-t.ki())),s.length&&e.length&&e.push(s[0]),e.sort(((t,i)=>t.ki()-i.ki()));for(const n of t){const t=Math.floor(n.Et(i)/2),s=n.ki();s>-t&&sthis.wv.height-t&&sl-r&&n.Bi(l-r)}for(let t=1;t{if(i.zi()){i.xt(f(this.Li)).K(t,n,this.Mv,s)}}))}np(t){if(null===this.wv||null===this.Li)return;const i=this.nn.kv().qt(),n=[],s=this.nn.Kv(),e=i.Fc().Rn(s,this.Li);e.length&&n.push(e);const r=this.Hv(),h=this.Tv?"right":"left";n.forEach((i=>{i.forEach((i=>{i.xt(f(this.Li)).K(t,r,this.Mv,h)}))}))}rp(t){this.Ov.style.cursor=1===t?"ns-resize":"default"}ao(){const t=this.$v();this.xv{this.Sv||null===this.wp||this.Ui().Fh()},this.Cv=()=>{this.Sv||null===this.wp||this.Ui().Fh()},this.gp=t,this.wp=i,this.wp.z_().l(this.Mp.bind(this),this,!0),this.xp=document.createElement("td"),this.xp.style.padding="0",this.xp.style.position="relative";const n=document.createElement("div");n.style.width="100%",n.style.height="100%",n.style.position="relative",n.style.overflow="hidden",this.Sp=document.createElement("td"),this.Sp.style.padding="0",this.yp=document.createElement("td"),this.yp.style.padding="0",this.xp.appendChild(n),this.Av=us(n,Gn({width:16,height:16})),this.Av.subscribeSuggestedBitmapSizeChanged(this.yv);const s=this.Av.canvasElement;s.style.position="absolute",s.style.zIndex="1",s.style.left="0",s.style.top="0",this.Bv=us(n,Gn({width:16,height:16})),this.Bv.subscribeSuggestedBitmapSizeChanged(this.Cv);const e=this.Bv.canvasElement;e.style.position="absolute",e.style.zIndex="2",e.style.left="0",e.style.top="0",this.kp=document.createElement("tr"),this.kp.appendChild(this.Sp),this.kp.appendChild(this.xp),this.kp.appendChild(this.yp),this.Cp(),this.Wv=new ps(this.Bv.canvasElement,this,{Nf:()=>null===this.vp&&!this.gp.W().handleScroll.vertTouchDrag,Ff:()=>null===this.vp&&!this.gp.W().handleScroll.horzTouchDrag})}S(){null!==this.lp&&this.lp.S(),null!==this.ap&&this.ap.S(),this.Bv.unsubscribeSuggestedBitmapSizeChanged(this.Cv),this.Bv.dispose(),this.Av.unsubscribeSuggestedBitmapSizeChanged(this.yv),this.Av.dispose(),null!==this.wp&&this.wp.z_().p(this),this.Wv.S()}Kv(){return f(this.wp)}Tp(t){null!==this.wp&&this.wp.z_().p(this),this.wp=t,null!==this.wp&&this.wp.z_().l(Ds.prototype.Mp.bind(this),this,!0),this.Cp()}kv(){return this.gp}jv(){return this.kp}Cp(){if(null!==this.wp&&(this.Pp(),0!==this.Ui().Mt().length)){if(null!==this.lp){const t=this.wp.S_();this.lp.Ji(f(t))}if(null!==this.ap){const t=this.wp.y_();this.ap.Ji(f(t))}}}Rp(){null!==this.lp&&this.lp.gt(),null!==this.ap&&this.ap.gt()}v_(){return null!==this.wp?this.wp.v_():0}p_(t){this.wp&&this.wp.p_(t)}Vf(t){if(!this.wp)return;this.Dp();const i=t.localX,n=t.localY;this.Op(i,n,t)}Qf(t){this.Dp(),this.Ap(),this.Op(t.localX,t.localY,t)}zf(t){var i;if(!this.wp)return;this.Dp();const n=t.localX,s=t.localY;this.Op(n,s,t);const e=this.br(n,s);this.gp.Bp(null!==(i=null==e?void 0:e.bv)&&void 0!==i?i:null),this.Ui().Vc(e&&{zc:e.zc,pv:e.pv})}Xf(t){null!==this.wp&&(this.Dp(),this.Vp(t))}Tf(t){null!==this.wp&&this.zp(this.cp,t)}Sf(t){this.Tf(t)}Hf(t){this.Dp(),this.Ep(t),this.Op(t.localX,t.localY,t)}Yf(t){null!==this.wp&&(this.Dp(),this.fp=!1,this.Ip(t))}qf(t){null!==this.wp&&this.Vp(t)}_v(t){if(this.fp=!0,null===this.vp){const i={x:t.localX,y:t.localY};this.Lp(i,i,t)}}ov(t){null!==this.wp&&(this.Dp(),this.wp.qt().Vc(null),this.Np())}Fp(){return this.up}Wp(){return this.cp}sv(){this.dp=1,this.Ui().Un()}ev(t,i){if(!this.gp.W().handleScale.pinch)return;const n=5*(i-this.dp);this.dp=i,this.Ui().Uc(t.et,n)}Gf(t){this.fp=!1,this.pp=null!==this.vp,this.Ap();const i=this.Ui().Fc();null!==this.vp&&i.Tt()&&(this.mp={x:i.Kt(),y:i.Zt()},this.vp={x:t.localX,y:t.localY})}Wf(t){if(null===this.wp)return;const i=t.localX,n=t.localY;if(null===this.vp)this.Ep(t);else{this.pp=!1;const s=f(this.mp),e=s.x+(i-this.vp.x),r=s.y+(n-this.vp.y);this.Op(e,r,t)}}Uf(t){0===this.kv().W().trackingMode.exitMode&&(this.pp=!0),this.jp(),this.Ip(t)}br(t,i){const n=this.wp;return null===n?null:function(t,i,n){const s=t.No(),e=function(t,i,n){var s,e;let r,h;for(const o of t){const t=null!==(e=null===(s=o.oa)||void 0===s?void 0:s.call(o,i,n))&&void 0!==e?e:[];for(const i of t)l=i.zOrder,(!(a=null==r?void 0:r.zOrder)||"top"===l&&"top"!==a||"normal"===l&&"bottom"===a)&&(r=i,h=o)}var l,a;return r&&h?{mv:r,zc:h}:null}(s,i,n);if("top"===(null==e?void 0:e.mv.zOrder))return xs(e);for(const r of s){if(e&&e.zc===r&&"bottom"!==e.mv.zOrder&&!e.mv.isBackground)return xs(e);const s=Ss(r.Pn(t),i,n);if(null!==s)return{zc:r,fv:s.fv,pv:s.pv};if(e&&e.zc===r&&"bottom"!==e.mv.zOrder&&e.mv.isBackground)return xs(e)}return(null==e?void 0:e.mv)?xs(e):null}(n,t,i)}Hp(t,i){f("left"===i?this.lp:this.ap).Yv(Gn({width:t,height:this.wv.height}))}$p(){return this.wv}Yv(t){Jn(this.wv,t)||(this.wv=t,this.Sv=!0,this.Av.resizeCanvasElement(t),this.Bv.resizeCanvasElement(t),this.Sv=!1,this.xp.style.width=t.width+"px",this.xp.style.height=t.height+"px")}Up(){const t=f(this.wp);t.x_(t.S_()),t.x_(t.y_());for(const i of t.Ta())if(t.dr(i)){const n=i.At();null!==n&&t.x_(n),i.On()}}sp(){return this.Av.bitmapSize}ep(t,i,n){const s=this.sp();s.width>0&&s.height>0&&t.drawImage(this.Av.canvasElement,i,n)}Zv(t){if(0===t)return;if(null===this.wp)return;if(t>1&&this.Up(),null!==this.lp&&this.lp.Zv(t),null!==this.ap&&this.ap.Zv(t),1!==t){this.Av.applySuggestedBitmapSize();const t=ss(this.Av);null!==t&&(t.useBitmapCoordinateSpace((t=>{this.Jv(t)})),this.wp&&(this.qp(t,Cs),this.Yp(t),this.Xp(t),this.qp(t,Ts),this.qp(t,Ps)))}this.Bv.applySuggestedBitmapSize();const i=ss(this.Bv);null!==i&&(i.useBitmapCoordinateSpace((({context:t,bitmapSize:i})=>{t.clearRect(0,0,i.width,i.height)})),this.Kp(i),this.qp(i,Rs))}Zp(){return this.lp}Gp(){return this.ap}Qv(t,i){this.qp(t,i)}Mp(){null!==this.wp&&this.wp.z_().p(this),this.wp=null}Vp(t){this.zp(this.up,t)}zp(t,i){const n=i.localX,s=i.localY;t.M()&&t.m(this.Ui().kt().Bu(n),{x:n,y:s},i)}Jv({context:t,bitmapSize:i}){const{width:n,height:s}=i,e=this.Ui(),r=e.q(),h=e.od();r===h?Y(t,0,0,n,s,h):G(t,0,0,n,s,r,h)}Yp(t){const i=f(this.wp).E_().Wh().xt();null!==i&&i.K(t,!1)}Xp(t){const i=this.Ui().Nc();this.Jp(t,Ts,cs,i),this.Jp(t,Ts,ds,i)}Kp(t){this.Jp(t,Ts,ds,this.Ui().Fc())}qp(t,i){const n=f(this.wp).No();for(const s of n)this.Jp(t,i,cs,s);for(const s of n)this.Jp(t,i,ds,s)}Jp(t,i,n,s){const e=f(this.wp),r=e.qt().Bc(),h=null!==r&&r.zc===s,l=null!==r&&h&&void 0!==r.pv?r.pv.gr:void 0;fs(i,(i=>n(i,t,h,l)),s,e)}Pp(){if(null===this.wp)return;const t=this.gp,i=this.wp.S_().W().visible,n=this.wp.y_().W().visible;i||null===this.lp||(this.Sp.removeChild(this.lp.jv()),this.lp.S(),this.lp=null),n||null===this.ap||(this.yp.removeChild(this.ap.jv()),this.ap.S(),this.ap=null);const s=t.qt().sd();i&&null===this.lp&&(this.lp=new ks(this,t.W(),s,"left"),this.Sp.appendChild(this.lp.jv())),n&&null===this.ap&&(this.ap=new ks(this,t.W(),s,"right"),this.yp.appendChild(this.ap.jv()))}Qp(t){return t.uv&&this.fp||null!==this.vp}tm(t){return Math.max(0,Math.min(t,this.wv.width-1))}im(t){return Math.max(0,Math.min(t,this.wv.height-1))}Op(t,i,n){this.Ui().Jc(this.tm(t),this.im(i),n,f(this.wp))}Np(){this.Ui().td()}jp(){this.pp&&(this.vp=null,this.Np())}Lp(t,i,n){this.vp=t,this.pp=!1,this.Op(i.x,i.y,n);const s=this.Ui().Fc();this.mp={x:s.Kt(),y:s.Zt()}}Ui(){return this.gp.qt()}Ip(t){if(!this._p)return;const i=this.Ui(),n=this.Kv();if(i.D_(n,n.vn()),this.op=null,this._p=!1,i.Kc(),null!==this.bp){const t=performance.now(),n=i.kt();this.bp.Dr(n.Iu(),t),this.bp.Yu(t)||i.Xn(this.bp)}}Dp(){this.vp=null}Ap(){if(!this.wp)return;if(this.Ui().Un(),document.activeElement!==document.body&&document.activeElement!==document.documentElement)f(document.activeElement).blur();else{const t=document.getSelection();null!==t&&t.removeAllRanges()}!this.wp.vn().Fi()&&this.Ui().kt().Fi()}Ep(t){if(null===this.wp)return;const i=this.Ui(),n=i.kt();if(n.Fi())return;const s=this.gp.W(),e=s.handleScroll,r=s.kineticScroll;if((!e.pressedMouseMove||t.uv)&&(!e.horzTouchDrag&&!e.vertTouchDrag||!t.uv))return;const h=this.wp.vn(),l=performance.now();if(null!==this.op||this.Qp(t)||(this.op={x:t.clientX,y:t.clientY,yd:l,nm:t.localX,sm:t.localY}),null!==this.op&&!this._p&&(this.op.x!==t.clientX||this.op.y!==t.clientY)){if(t.uv&&r.touch||!t.uv&&r.mouse){const t=n.he();this.bp=new _s(.2/t,7/t,.997,15/t),this.bp.Nd(n.Iu(),this.op.yd)}else this.bp=null;h.Fi()||i.P_(this.wp,h,t.localY),i.Yc(t.localX),this._p=!0}this._p&&(h.Fi()||i.R_(this.wp,h,t.localY),i.Xc(t.localX),null!==this.bp&&this.bp.Nd(n.Iu(),l))}}class Os{constructor(t,i,n,s,e){this.bt=!0,this.wv=Gn({width:0,height:0}),this.yv=()=>this.Zv(3),this.Tv="left"===t,this.yc=n.sd,this.cn=i,this.rm=s,this.hm=e,this.Ov=document.createElement("div"),this.Ov.style.width="25px",this.Ov.style.height="100%",this.Ov.style.overflow="hidden",this.Av=us(this.Ov,Gn({width:16,height:16})),this.Av.subscribeSuggestedBitmapSizeChanged(this.yv)}S(){this.Av.unsubscribeSuggestedBitmapSizeChanged(this.yv),this.Av.dispose()}jv(){return this.Ov}$p(){return this.wv}Yv(t){Jn(this.wv,t)||(this.wv=t,this.Av.resizeCanvasElement(t),this.Ov.style.width=`${t.width}px`,this.Ov.style.height=`${t.height}px`,this.bt=!0)}Zv(t){if(t<3&&!this.bt)return;if(0===this.wv.width||0===this.wv.height)return;this.bt=!1,this.Av.applySuggestedBitmapSize();const i=ss(this.Av);null!==i&&i.useBitmapCoordinateSpace((t=>{this.Jv(t),this.Ve(t)}))}sp(){return this.Av.bitmapSize}ep(t,i,n){const s=this.sp();s.width>0&&s.height>0&&t.drawImage(this.Av.canvasElement,i,n)}Ve({context:t,bitmapSize:i,horizontalPixelRatio:n,verticalPixelRatio:s}){if(!this.rm())return;t.fillStyle=this.cn.timeScale.borderColor;const e=Math.floor(this.yc.W().C*n),r=Math.floor(this.yc.W().C*s),h=this.Tv?i.width-e:0;t.fillRect(h,0,e,r)}Jv({context:t,bitmapSize:i}){Y(t,0,0,i.width,i.height,this.hm())}}function As(t){return i=>{var n,s;return null!==(s=null===(n=i.aa)||void 0===n?void 0:n.call(i,t))&&void 0!==s?s:[]}}const Bs=As("normal"),Vs=As("top"),zs=As("bottom");class Es{constructor(t,i){this.lm=null,this.am=null,this.k=null,this.om=!1,this.wv=Gn({width:0,height:0}),this._m=new k,this.Mv=new Qt(5),this.Sv=!1,this.yv=()=>{this.Sv||this.gp.qt().Fh()},this.Cv=()=>{this.Sv||this.gp.qt().Fh()},this.gp=t,this.N_=i,this.cn=t.W().layout,this.um=document.createElement("tr"),this.dm=document.createElement("td"),this.dm.style.padding="0",this.fm=document.createElement("td"),this.fm.style.padding="0",this.Ov=document.createElement("td"),this.Ov.style.height="25px",this.Ov.style.padding="0",this.vm=document.createElement("div"),this.vm.style.width="100%",this.vm.style.height="100%",this.vm.style.position="relative",this.vm.style.overflow="hidden",this.Ov.appendChild(this.vm),this.Av=us(this.vm,Gn({width:16,height:16})),this.Av.subscribeSuggestedBitmapSizeChanged(this.yv);const n=this.Av.canvasElement;n.style.position="absolute",n.style.zIndex="1",n.style.left="0",n.style.top="0",this.Bv=us(this.vm,Gn({width:16,height:16})),this.Bv.subscribeSuggestedBitmapSizeChanged(this.Cv);const s=this.Bv.canvasElement;s.style.position="absolute",s.style.zIndex="2",s.style.left="0",s.style.top="0",this.um.appendChild(this.dm),this.um.appendChild(this.Ov),this.um.appendChild(this.fm),this.pm(),this.gp.qt().f_().l(this.pm.bind(this),this),this.Wv=new ps(this.Bv.canvasElement,this,{Nf:()=>!0,Ff:()=>!this.gp.W().handleScroll.horzTouchDrag})}S(){this.Wv.S(),null!==this.lm&&this.lm.S(),null!==this.am&&this.am.S(),this.Bv.unsubscribeSuggestedBitmapSizeChanged(this.Cv),this.Bv.dispose(),this.Av.unsubscribeSuggestedBitmapSizeChanged(this.yv),this.Av.dispose()}jv(){return this.um}bm(){return this.lm}wm(){return this.am}Qf(t){if(this.om)return;this.om=!0;const i=this.gp.qt();!i.kt().Fi()&&this.gp.W().handleScale.axisPressedMouseMove.time&&i.$c(t.localX)}Gf(t){this.Qf(t)}tv(){const t=this.gp.qt();!t.kt().Fi()&&this.om&&(this.om=!1,this.gp.W().handleScale.axisPressedMouseMove.time&&t.Gc())}Hf(t){const i=this.gp.qt();!i.kt().Fi()&&this.gp.W().handleScale.axisPressedMouseMove.time&&i.Zc(t.localX)}Wf(t){this.Hf(t)}Yf(){this.om=!1;const t=this.gp.qt();t.kt().Fi()&&!this.gp.W().handleScale.axisPressedMouseMove.time||t.Gc()}Uf(){this.Yf()}Tf(){this.gp.W().handleScale.axisDoubleClickReset.time&&this.gp.qt().Zn()}Sf(){this.Tf()}Vf(){this.gp.qt().W().handleScale.axisPressedMouseMove.time&&this.rp(1)}ov(){this.rp(0)}$p(){return this.wv}gm(){return this._m}Mm(t,i,n){Jn(this.wv,t)||(this.wv=t,this.Sv=!0,this.Av.resizeCanvasElement(t),this.Bv.resizeCanvasElement(t),this.Sv=!1,this.Ov.style.width=`${t.width}px`,this.Ov.style.height=`${t.height}px`,this._m.m(t)),null!==this.lm&&this.lm.Yv(Gn({width:i,height:t.height})),null!==this.am&&this.am.Yv(Gn({width:n,height:t.height}))}xm(){const t=this.Sm();return Math.ceil(t.C+t.T+t.P+t.L+t.B+t.ym)}gt(){this.gp.qt().kt().Ia()}sp(){return this.Av.bitmapSize}ep(t,i,n){const s=this.sp();s.width>0&&s.height>0&&t.drawImage(this.Av.canvasElement,i,n)}Zv(t){if(0===t)return;if(1!==t){this.Av.applySuggestedBitmapSize();const i=ss(this.Av);null!==i&&(i.useBitmapCoordinateSpace((t=>{this.Jv(t),this.Ve(t),this.km(i,zs)})),this.tp(i),this.km(i,Bs)),null!==this.lm&&this.lm.Zv(t),null!==this.am&&this.am.Zv(t)}this.Bv.applySuggestedBitmapSize();const i=ss(this.Bv);null!==i&&(i.useBitmapCoordinateSpace((({context:t,bitmapSize:i})=>{t.clearRect(0,0,i.width,i.height)})),this.Cm([...this.gp.qt().Mt(),this.gp.qt().Fc()],i),this.km(i,Vs))}km(t,i){const n=this.gp.qt().Mt();for(const s of n)fs(i,(i=>cs(i,t,!1,void 0)),s,void 0);for(const s of n)fs(i,(i=>ds(i,t,!1,void 0)),s,void 0)}Jv({context:t,bitmapSize:i}){Y(t,0,0,i.width,i.height,this.gp.qt().od())}Ve({context:t,bitmapSize:i,verticalPixelRatio:n}){if(this.gp.W().timeScale.borderVisible){t.fillStyle=this.Tm();const s=Math.max(1,Math.floor(this.Sm().C*n));t.fillRect(0,0,i.width,s)}}tp(t){const i=this.gp.qt().kt(),n=i.Ia();if(!n||0===n.length)return;const s=this.N_.maxTickMarkWeight(n),e=this.Sm(),r=i.W();r.borderVisible&&r.ticksVisible&&t.useBitmapCoordinateSpace((({context:t,horizontalPixelRatio:i,verticalPixelRatio:s})=>{t.strokeStyle=this.Tm(),t.fillStyle=this.Tm();const r=Math.max(1,Math.floor(i)),h=Math.floor(.5*i);t.beginPath();const l=Math.round(e.T*s);for(let s=n.length;s--;){const e=Math.round(n[s].coord*i);t.rect(e-h,0,r,l)}t.fill()})),t.useMediaCoordinateSpace((({context:t})=>{const i=e.C+e.T+e.L+e.P/2;t.textAlign="center",t.textBaseline="middle",t.fillStyle=this.$(),t.font=this.Uv();for(const e of n)if(e.weight=s){const n=e.needAlignCoordinate?this.Pm(t,e.coord,e.label):e.coord;t.fillText(e.label,n,i)}}))}Pm(t,i,n){const s=this.Mv.Si(t,n),e=s/2,r=Math.floor(i-e)+.5;return r<0?i+=Math.abs(0-r):r+s>this.wv.width&&(i-=Math.abs(this.wv.width-(r+s))),i}Cm(t,i){const n=this.Sm();for(const s of t)for(const t of s.tn())t.xt().K(i,n)}Tm(){return this.gp.W().timeScale.borderColor}$(){return this.cn.textColor}j(){return this.cn.fontSize}Uv(){return z(this.j(),this.cn.fontFamily)}Rm(){return z(this.j(),this.cn.fontFamily,"bold")}Sm(){null===this.k&&(this.k={C:1,N:NaN,L:NaN,B:NaN,Hi:NaN,T:5,P:NaN,R:"",ji:new Qt,ym:0});const t=this.k,i=this.Uv();if(t.R!==i){const n=this.j();t.P=n,t.R=i,t.L=3*n/12,t.B=3*n/12,t.Hi=9*n/12,t.N=0,t.ym=4*n/12,t.ji.ir()}return this.k}rp(t){this.Ov.style.cursor=1===t?"ew-resize":"default"}pm(){const t=this.gp.qt(),i=t.W();i.leftPriceScale.visible||null===this.lm||(this.dm.removeChild(this.lm.jv()),this.lm.S(),this.lm=null),i.rightPriceScale.visible||null===this.am||(this.fm.removeChild(this.am.jv()),this.am.S(),this.am=null);const n={sd:this.gp.qt().sd()},s=()=>i.leftPriceScale.borderVisible&&t.kt().W().borderVisible,e=()=>t.od();i.leftPriceScale.visible&&null===this.lm&&(this.lm=new Os("left",i,n,s,e),this.dm.appendChild(this.lm.jv())),i.rightPriceScale.visible&&null===this.am&&(this.am=new Os("right",i,n,s,e),this.fm.appendChild(this.am.jv()))}}const Is=!!es&&!!navigator.userAgentData&&navigator.userAgentData.brands.some((t=>t.brand.includes("Chromium")))&&!!es&&((null===(Ls=null===navigator||void 0===navigator?void 0:navigator.userAgentData)||void 0===Ls?void 0:Ls.platform)?"Windows"===navigator.userAgentData.platform:navigator.userAgent.toLowerCase().indexOf("win")>=0);var Ls;class Ns{constructor(t,i,n){var s;this.Dm=[],this.Om=0,this.Qa=0,this.e_=0,this.Am=0,this.Bm=0,this.Vm=null,this.zm=!1,this.up=new k,this.cp=new k,this.Mc=new k,this.Em=null,this.Im=null,this.Lm=t,this.cn=i,this.N_=n,this.um=document.createElement("div"),this.um.classList.add("tv-lightweight-charts"),this.um.style.overflow="hidden",this.um.style.direction="ltr",this.um.style.width="100%",this.um.style.height="100%",(s=this.um).style.userSelect="none",s.style.webkitUserSelect="none",s.style.msUserSelect="none",s.style.MozUserSelect="none",s.style.webkitTapHighlightColor="transparent",this.Nm=document.createElement("table"),this.Nm.setAttribute("cellspacing","0"),this.um.appendChild(this.Nm),this.Fm=this.Wm.bind(this),Fs(this.cn)&&this.jm(!0),this.Ui=new An(this.Sc.bind(this),this.cn,n),this.qt().Wc().l(this.Hm.bind(this),this),this.$m=new Es(this,this.N_),this.Nm.appendChild(this.$m.jv());const e=i.autoSize&&this.Um();let r=this.cn.width,h=this.cn.height;if(e||0===r||0===h){const i=t.getBoundingClientRect();r=r||i.width,h=h||i.height}this.qm(r,h),this.Ym(),t.appendChild(this.um),this.Xm(),this.Ui.kt().Gu().l(this.Ui.$l.bind(this.Ui),this),this.Ui.f_().l(this.Ui.$l.bind(this.Ui),this)}qt(){return this.Ui}W(){return this.cn}Km(){return this.Dm}Zm(){return this.$m}S(){this.jm(!1),0!==this.Om&&window.cancelAnimationFrame(this.Om),this.Ui.Wc().p(this),this.Ui.kt().Gu().p(this),this.Ui.f_().p(this),this.Ui.S();for(const t of this.Dm)this.Nm.removeChild(t.jv()),t.Fp().p(this),t.Wp().p(this),t.S();this.Dm=[],f(this.$m).S(),null!==this.um.parentElement&&this.um.parentElement.removeChild(this.um),this.Mc.S(),this.up.S(),this.cp.S(),this.Gm()}qm(t,i,n=!1){if(this.Qa===i&&this.e_===t)return;const s=function(t){const i=Math.floor(t.width),n=Math.floor(t.height);return Gn({width:i-i%2,height:n-n%2})}(Gn({width:t,height:i}));this.Qa=s.height,this.e_=s.width;const e=this.Qa+"px",r=this.e_+"px";f(this.um).style.height=e,f(this.um).style.width=r,this.Nm.style.height=e,this.Nm.style.width=r,n?this.Jm(at.es(),performance.now()):this.Ui.$l()}Zv(t){void 0===t&&(t=at.es());for(let i=0;i{let s=0;for(let e=0;e{f("left"===i?this.$m.bm():this.$m.wm()).ep(f(t),n,s)};if(this.cn.timeScale.visible){const i=this.$m.sp();if(null!==t){let e=0;this.sb()&&(r("left",e,n),e=f(s.Zp()).sp().width),this.$m.ep(t,e,n),e+=i.width,this.eb()&&r("right",e,n)}n+=i.height}return Gn({width:i,height:n})}ob(){let t=0,i=0,n=0;for(const s of this.Dm)this.sb()&&(i=Math.max(i,f(s.Zp()).$v(),this.cn.leftPriceScale.minimumWidth)),this.eb()&&(n=Math.max(n,f(s.Gp()).$v(),this.cn.rightPriceScale.minimumWidth)),t+=s.v_();i=ls(i),n=ls(n);const s=this.e_,e=this.Qa,r=Math.max(s-i-n,0),h=this.cn.timeScale.visible;let l=h?Math.max(this.$m.xm(),this.cn.timeScale.minimumHeight):0;var a;l=(a=l)+a%2;const o=0+l,_=e{t.Rp()})),3===(null===(n=this.Vm)||void 0===n?void 0:n.jn())&&(this.Vm.ts(t),this.ub(),this.cb(this.Vm),this.fb(this.Vm,i),t=this.Vm,this.Vm=null)),this.Zv(t)}fb(t,i){for(const n of t.Qn())this.ns(n,i)}cb(t){const i=this.Ui.Lc();for(let n=0;n{if(this.zm=!1,this.Om=0,null!==this.Vm){const i=this.Vm;this.Vm=null,this.Jm(i,t);for(const n of i.Qn())if(5===n.qn&&!n.Bt.Yu(t)){this.qt().Xn(n.Bt);break}}})))}ub(){this.Ym()}Ym(){const t=this.Ui.Lc(),i=t.length,n=this.Dm.length;for(let t=i;t{const n=i.zn().nl(t);null!==n&&e.set(i,n)}))}let r;if(null!==t){const i=null===(s=this.Ui.kt().qi(t))||void 0===s?void 0:s.originalTime;void 0!==i&&(r=i)}const h=this.qt().Bc(),l=null!==h&&h.zc instanceof Yi?h.zc:void 0,a=null!==h&&void 0!==h.pv?h.pv.wr:void 0;return{bb:r,se:null!=t?t:void 0,wb:null!=i?i:void 0,gb:l,Mb:e,xb:a,Sb:null!=n?n:void 0}}vb(t,i,n){this.up.m((()=>this.mb(t,i,n)))}pb(t,i,n){this.cp.m((()=>this.mb(t,i,n)))}Hm(t,i,n){this.Mc.m((()=>this.mb(t,i,n)))}Xm(){const t=this.cn.timeScale.visible?"":"none";this.$m.jv().style.display=t}sb(){return this.Dm[0].Kv().S_().W().visible}eb(){return this.Dm[0].Kv().y_().W().visible}Um(){return"ResizeObserver"in window&&(this.Em=new ResizeObserver((t=>{const i=t.find((t=>t.target===this.Lm));i&&this.qm(i.contentRect.width,i.contentRect.height)})),this.Em.observe(this.Lm,{box:"border-box"}),!0)}Gm(){null!==this.Em&&this.Em.disconnect(),this.Em=null}}function Fs(t){return Boolean(t.handleScroll.mouseWheel||t.handleScale.mouseWheel)}function Ws(t,i){var n={};for(var s in t)Object.prototype.hasOwnProperty.call(t,s)&&i.indexOf(s)<0&&(n[s]=t[s]);if(null!=t&&"function"==typeof Object.getOwnPropertySymbols){var e=0;for(s=Object.getOwnPropertySymbols(t);efunction(t,i){return i?i(t):void 0===(n=t).open&&void 0===n.value;var n}(s,h)?Ks({ut:i,se:n,bb:e},s):Ks(t(i,n,s,e,r),s)}function Gs(t){return{Candlestick:Zs(qs),Bar:Zs(Us),Area:Zs(Hs),Baseline:Zs($s),Histogram:Zs(js),Line:Zs(js),Custom:Zs(Ys)}[t]}function Js(t){return{se:0,kb:new Map,ia:t}}function Qs(t,i){if(void 0!==t&&0!==t.length)return{Cb:i.key(t[0].ut),Tb:i.key(t[t.length-1].ut)}}function te(t){let i;return t.forEach((t=>{void 0===i&&(i=t.bb)})),d(i)}class ie{constructor(t){this.Pb=new Map,this.Rb=new Map,this.Db=new Map,this.Ob=[],this.N_=t}S(){this.Pb.clear(),this.Rb.clear(),this.Db.clear(),this.Ob=[]}Ab(t,i){let n=0!==this.Pb.size,s=!1;const e=this.Rb.get(t);if(void 0!==e)if(1===this.Rb.size)n=!1,s=!0,this.Pb.clear();else for(const i of this.Ob)i.pointData.kb.delete(t)&&(s=!0);let r=[];if(0!==i.length){const n=i.map((t=>t.time)),e=this.N_.createConverterToInternalObj(i),h=Gs(t.Xh()),l=t.ga(),a=t.Ma();r=i.map(((i,r)=>{const o=e(i.time),_=this.N_.key(o);let u=this.Pb.get(_);void 0===u&&(u=Js(o),this.Pb.set(_,u),s=!0);const c=h(o,u.se,i,n[r],l,a);return u.kb.set(t,c),c}))}n&&this.Bb(),this.Vb(t,r);let h=-1;if(s){const t=[];this.Pb.forEach((i=>{t.push({timeWeight:0,time:i.ia,pointData:i,originalTime:te(i.kb)})})),t.sort(((t,i)=>this.N_.key(t.time)-this.N_.key(i.time))),h=this.zb(t)}return this.Eb(t,h,function(t,i,n){const s=Qs(t,n),e=Qs(i,n);if(void 0!==s&&void 0!==e)return{Xl:s.Tb>=e.Tb&&s.Cb>=e.Cb}}(this.Rb.get(t),e,this.N_))}hd(t){return this.Ab(t,[])}Ib(t,i){const n=i;!function(t){void 0===t.bb&&(t.bb=t.time)}(n),this.N_.preprocessData(i);const s=this.N_.createConverterToInternalObj([i])(i.time),e=this.Db.get(t);if(void 0!==e&&this.N_.key(s)this.N_.key(t.time)this.N_.key(s.ut)?Xs(i)&&n.push(i):Xs(i)?n[n.length-1]=i:n.splice(-1,1),this.Db.set(t,i.ut)}Vb(t,i){0!==i.length?(this.Rb.set(t,i.filter(Xs)),this.Db.set(t,i[i.length-1].ut)):(this.Rb.delete(t),this.Db.delete(t))}Bb(){for(const t of this.Ob)0===t.pointData.kb.size&&this.Pb.delete(this.N_.key(t.time))}zb(t){let i=-1;for(let n=0;n{0!==i.length&&(t=Math.max(t,i[i.length-1].se))})),t}Eb(t,i,n){const s={Fb:new Map,kt:{Au:this.Nb()}};if(-1!==i)this.Rb.forEach(((i,e)=>{s.Fb.set(e,{He:i,Wb:e===t?n:void 0})})),this.Rb.has(t)||s.Fb.set(t,{He:[],Wb:n}),s.kt.jb=this.Ob,s.kt.Hb=i;else{const i=this.Rb.get(t);s.Fb.set(t,{He:i||[],Wb:n})}return s}}function ne(t,i){t.se=i,t.kb.forEach((t=>{t.se=i}))}function se(t){const i={value:t.Bt[3],time:t.bb};return void 0!==t.yb&&(i.customValues=t.yb),i}function ee(t){const i=se(t);return void 0!==t.O&&(i.color=t.O),i}function re(t){const i=se(t);return void 0!==t._t&&(i.lineColor=t._t),void 0!==t.Ts&&(i.topColor=t.Ts),void 0!==t.Ps&&(i.bottomColor=t.Ps),i}function he(t){const i=se(t);return void 0!==t.Pe&&(i.topLineColor=t.Pe),void 0!==t.Re&&(i.bottomLineColor=t.Re),void 0!==t.Se&&(i.topFillColor1=t.Se),void 0!==t.ye&&(i.topFillColor2=t.ye),void 0!==t.ke&&(i.bottomFillColor1=t.ke),void 0!==t.Ce&&(i.bottomFillColor2=t.Ce),i}function le(t){const i={open:t.Bt[0],high:t.Bt[1],low:t.Bt[2],close:t.Bt[3],time:t.bb};return void 0!==t.yb&&(i.customValues=t.yb),i}function ae(t){const i=le(t);return void 0!==t.O&&(i.color=t.O),i}function oe(t){const i=le(t),{O:n,Vt:s,$h:e}=t;return void 0!==n&&(i.color=n),void 0!==s&&(i.borderColor=s),void 0!==e&&(i.wickColor=e),i}function _e(t){return{Area:re,Line:ee,Baseline:he,Histogram:ee,Bar:ae,Candlestick:oe,Custom:ue}[t]}function ue(t){const i=t.bb;return Object.assign(Object.assign({},t.He),{time:i})}const ce={vertLine:{color:"#9598A1",width:1,style:3,visible:!0,labelVisible:!0,labelBackgroundColor:"#131722"},horzLine:{color:"#9598A1",width:1,style:3,visible:!0,labelVisible:!0,labelBackgroundColor:"#131722"},mode:1},de={vertLines:{color:"#D6DCDE",style:0,visible:!0},horzLines:{color:"#D6DCDE",style:0,visible:!0}},fe={background:{type:"solid",color:"#FFFFFF"},textColor:"#191919",fontSize:12,fontFamily:V},ve={autoScale:!0,mode:0,invertScale:!1,alignLabels:!0,borderVisible:!0,borderColor:"#2B2B43",entireTextOnly:!1,visible:!1,ticksVisible:!1,scaleMargins:{bottom:.1,top:.2},minimumWidth:0},pe={rightOffset:0,barSpacing:6,minBarSpacing:.5,fixLeftEdge:!1,fixRightEdge:!1,lockVisibleTimeRangeOnResize:!1,rightBarStaysOnScroll:!1,borderVisible:!0,borderColor:"#2B2B43",visible:!0,timeVisible:!1,secondsVisible:!0,shiftVisibleRangeOnNewBar:!0,allowShiftVisibleRangeOnWhitespaceReplacement:!1,ticksVisible:!1,uniformDistribution:!1,minimumHeight:0},me={color:"rgba(0, 0, 0, 0)",visible:!1,fontSize:48,fontFamily:V,fontStyle:"",text:"",horzAlign:"center",vertAlign:"center"};function be(){return{width:0,height:0,autoSize:!1,layout:fe,crosshair:ce,grid:de,overlayPriceScales:Object.assign({},ve),leftPriceScale:Object.assign(Object.assign({},ve),{visible:!1}),rightPriceScale:Object.assign(Object.assign({},ve),{visible:!0}),timeScale:pe,watermark:me,localization:{locale:es?navigator.language:"",dateFormat:"dd MMM 'yy"},handleScroll:{mouseWheel:!0,pressedMouseMove:!0,horzTouchDrag:!0,vertTouchDrag:!0},handleScale:{axisPressedMouseMove:{time:!0,price:!0},axisDoubleClickReset:{time:!0,price:!0},mouseWheel:!0,pinch:!0},kineticScroll:{mouse:!1,touch:!0},trackingMode:{exitMode:1}}}class we{constructor(t,i){this.$b=t,this.Ub=i}applyOptions(t){this.$b.qt().Ec(this.Ub,t)}options(){return this.Li().W()}width(){return lt(this.Ub)?this.$b.nb(this.Ub):0}Li(){return f(this.$b.qt().Ic(this.Ub)).At}}function ge(t,i,n){const s=Ws(t,["time","originalTime"]),e=Object.assign({time:i},s);return void 0!==n&&(e.originalTime=n),e}const Me={color:"#FF0000",price:0,lineStyle:2,lineWidth:1,lineVisible:!0,axisLabelVisible:!0,title:"",axisLabelColor:"",axisLabelTextColor:""};class xe{constructor(t){this.Vh=t}applyOptions(t){this.Vh.Nh(t)}options(){return this.Vh.W()}qb(){return this.Vh}}class Se{constructor(t,i,n,s,e){this.Yb=new k,this.Is=t,this.Xb=i,this.Kb=n,this.N_=e,this.Zb=s}S(){this.Yb.S()}priceFormatter(){return this.Is.ca()}priceToCoordinate(t){const i=this.Is.Pt();return null===i?null:this.Is.At().Ot(t,i.Bt)}coordinateToPrice(t){const i=this.Is.Pt();return null===i?null:this.Is.At().pn(t,i.Bt)}barsInLogicalRange(t){if(null===t)return null;const i=new Mn(new bn(t.from,t.to)).iu(),n=this.Is.zn();if(n.Fi())return null;const s=n.nl(i.Os(),1),e=n.nl(i.di(),-1),r=f(n.Qh()),h=f(n.Vn());if(null!==s&&null!==e&&s.se>e.se)return{barsBefore:t.from-r,barsAfter:h-t.to};const l={barsBefore:null===s||s.se===r?t.from-r:s.se-r,barsAfter:null===e||e.se===h?h-t.to:h-e.se};return null!==s&&null!==e&&(l.from=s.bb,l.to=e.bb),l}setData(t){this.N_,this.Is.Xh(),this.Xb.Gb(this.Is,t),this.Jb("full")}update(t){this.Is.Xh(),this.Xb.Qb(this.Is,t),this.Jb("update")}dataByIndex(t,i){const n=this.Is.zn().nl(t,i);if(null===n)return null;return _e(this.seriesType())(n)}data(){const t=_e(this.seriesType());return this.Is.zn().ie().map((i=>t(i)))}subscribeDataChanged(t){this.Yb.l(t)}unsubscribeDataChanged(t){this.Yb.v(t)}setMarkers(t){this.N_;const i=t.map((t=>ge(t,this.N_.convertHorzItemToInternal(t.time),t.time)));this.Is.Zl(i)}markers(){return this.Is.Gl().map((t=>ge(t,t.originalTime,void 0)))}applyOptions(t){this.Is.Nh(t)}options(){return O(this.Is.W())}priceScale(){return this.Kb.priceScale(this.Is.At().xa())}createPriceLine(t){const i=C(O(Me),t),n=this.Is.Jl(i);return new xe(n)}removePriceLine(t){this.Is.Ql(t.qb())}seriesType(){return this.Is.Xh()}attachPrimitive(t){this.Is.ba(t),t.attached&&t.attached({chart:this.Zb,series:this,requestUpdate:()=>this.Is.qt().$l()})}detachPrimitive(t){this.Is.wa(t),t.detached&&t.detached()}Jb(t){this.Yb.M()&&this.Yb.m(t)}}class ye{constructor(t,i,n){this.tw=new k,this.uu=new k,this._m=new k,this.Ui=t,this.wl=t.kt(),this.$m=i,this.wl.Ku().l(this.iw.bind(this)),this.wl.Zu().l(this.nw.bind(this)),this.$m.gm().l(this.sw.bind(this)),this.N_=n}S(){this.wl.Ku().p(this),this.wl.Zu().p(this),this.$m.gm().p(this),this.tw.S(),this.uu.S(),this._m.S()}scrollPosition(){return this.wl.Iu()}scrollToPosition(t,i){i?this.wl.qu(t,1e3):this.Ui.Jn(t)}scrollToRealTime(){this.wl.Uu()}getVisibleRange(){const t=this.wl.ku();return null===t?null:{from:t.from.originalTime,to:t.to.originalTime}}setVisibleRange(t){const i={from:this.N_.convertHorzItemToInternal(t.from),to:this.N_.convertHorzItemToInternal(t.to)},n=this.wl.Ru(i);this.Ui.ld(n)}getVisibleLogicalRange(){const t=this.wl.yu();return null===t?null:{from:t.Os(),to:t.di()}}setVisibleLogicalRange(t){c(t.from<=t.to,"The from index cannot be after the to index."),this.Ui.ld(t)}resetTimeScale(){this.Ui.Zn()}fitContent(){this.Ui.Qu()}logicalToCoordinate(t){const i=this.Ui.kt();return i.Fi()?null:i.It(t)}coordinateToLogical(t){return this.wl.Fi()?null:this.wl.Bu(t)}timeToCoordinate(t){const i=this.N_.convertHorzItemToInternal(t),n=this.wl.ka(i,!1);return null===n?null:this.wl.It(n)}coordinateToTime(t){const i=this.Ui.kt(),n=i.Bu(t),s=i.qi(n);return null===s?null:s.originalTime}width(){return this.$m.$p().width}height(){return this.$m.$p().height}subscribeVisibleTimeRangeChange(t){this.tw.l(t)}unsubscribeVisibleTimeRangeChange(t){this.tw.v(t)}subscribeVisibleLogicalRangeChange(t){this.uu.l(t)}unsubscribeVisibleLogicalRangeChange(t){this.uu.v(t)}subscribeSizeChange(t){this._m.l(t)}unsubscribeSizeChange(t){this._m.v(t)}applyOptions(t){this.wl.Nh(t)}options(){return Object.assign(Object.assign({},O(this.wl.W())),{barSpacing:this.wl.he()})}iw(){this.tw.M()&&this.tw.m(this.getVisibleRange())}nw(){this.uu.M()&&this.uu.m(this.getVisibleLogicalRange())}sw(t){this._m.m(t.width,t.height)}}function ke(t){if(void 0===t||"custom"===t.type)return;const i=t;void 0!==i.minMove&&void 0===i.precision&&(i.precision=function(t){if(t>=1)return 0;let i=0;for(;i<8;i++){const n=Math.round(t);if(Math.abs(n-t)<1e-8)return i;t*=10}return i}(i.minMove))}function Ce(t){return function(t){if(D(t.handleScale)){const i=t.handleScale;t.handleScale={axisDoubleClickReset:{time:i,price:i},axisPressedMouseMove:{time:i,price:i},mouseWheel:i,pinch:i}}else if(void 0!==t.handleScale){const{axisPressedMouseMove:i,axisDoubleClickReset:n}=t.handleScale;D(i)&&(t.handleScale.axisPressedMouseMove={time:i,price:i}),D(n)&&(t.handleScale.axisDoubleClickReset={time:n,price:n})}const i=t.handleScroll;D(i)&&(t.handleScroll={horzTouchDrag:i,vertTouchDrag:i,mouseWheel:i,pressedMouseMove:i})}(t),t}class Te{constructor(t,i,n){this.ew=new Map,this.rw=new Map,this.hw=new k,this.lw=new k,this.aw=new k,this.ow=new ie(i);const s=void 0===n?O(be()):C(O(be()),Ce(n));this.N_=i,this.$b=new Ns(t,s,i),this.$b.Fp().l((t=>{this.hw.M()&&this.hw.m(this._w(t()))}),this),this.$b.Wp().l((t=>{this.lw.M()&&this.lw.m(this._w(t()))}),this),this.$b.Wc().l((t=>{this.aw.M()&&this.aw.m(this._w(t()))}),this);const e=this.$b.qt();this.uw=new ye(e,this.$b.Zm(),this.N_)}remove(){this.$b.Fp().p(this),this.$b.Wp().p(this),this.$b.Wc().p(this),this.uw.S(),this.$b.S(),this.ew.clear(),this.rw.clear(),this.hw.S(),this.lw.S(),this.aw.S(),this.ow.S()}resize(t,i,n){this.autoSizeActive()||this.$b.qm(t,i,n)}addCustomSeries(t,i){const n=v(t),s=Object.assign(Object.assign({},h),n.defaultOptions());return this.cw("Custom",s,i,n)}addAreaSeries(t){return this.cw("Area",s,t)}addBaselineSeries(t){return this.cw("Baseline",e,t)}addBarSeries(t){return this.cw("Bar",i,t)}addCandlestickSeries(i={}){return function(t){void 0!==t.borderColor&&(t.borderUpColor=t.borderColor,t.borderDownColor=t.borderColor),void 0!==t.wickColor&&(t.wickUpColor=t.wickColor,t.wickDownColor=t.wickColor)}(i),this.cw("Candlestick",t,i)}addHistogramSeries(t){return this.cw("Histogram",r,t)}addLineSeries(t){return this.cw("Line",n,t)}removeSeries(t){const i=d(this.ew.get(t)),n=this.ow.hd(i);this.$b.qt().hd(i),this.dw(n),this.ew.delete(t),this.rw.delete(i)}Gb(t,i){this.dw(this.ow.Ab(t,i))}Qb(t,i){this.dw(this.ow.Ib(t,i))}subscribeClick(t){this.hw.l(t)}unsubscribeClick(t){this.hw.v(t)}subscribeCrosshairMove(t){this.aw.l(t)}unsubscribeCrosshairMove(t){this.aw.v(t)}subscribeDblClick(t){this.lw.l(t)}unsubscribeDblClick(t){this.lw.v(t)}priceScale(t){return new we(this.$b,t)}timeScale(){return this.uw}applyOptions(t){this.$b.Nh(Ce(t))}options(){return this.$b.W()}takeScreenshot(){return this.$b.tb()}autoSizeActive(){return this.$b.rb()}chartElement(){return this.$b.hb()}paneSize(){const t=this.$b.ab();return{height:t.height,width:t.width}}setCrosshairPosition(t,i,n){const s=this.ew.get(n);if(void 0===s)return;const e=this.$b.qt().cr(s);null!==e&&this.$b.qt().Qc(t,i,e)}clearCrosshairPosition(){this.$b.qt().td(!0)}cw(t,i,n={},s){ke(n.priceFormat);const e=C(O(l),O(i),n),r=this.$b.qt().ed(t,e,s),h=new Se(r,this,this,this,this.N_);return this.ew.set(h,r),this.rw.set(r,h),h}dw(t){const i=this.$b.qt();i.nd(t.kt.Au,t.kt.jb,t.kt.Hb),t.Fb.forEach(((t,i)=>i.it(t.He,t.Wb))),i.zu()}fw(t){return d(this.rw.get(t))}_w(t){const i=new Map;t.Mb.forEach(((t,n)=>{const s=n.Xh(),e=_e(s)(t);if("Custom"!==s)c(function(t){return void 0!==t.open||void 0!==t.value}(e));else{const t=n.Ma();c(!t||!1===t(e))}i.set(this.fw(n),e)}));const n=void 0===t.gb?void 0:this.fw(t.gb);return{time:t.bb,logical:t.se,point:t.wb,hoveredSeries:n,hoveredObjectId:t.xb,seriesData:i,sourceEvent:t.Sb}}}function Pe(t,i,n){let s;if(R(t)){const i=document.getElementById(t);c(null!==i,`Cannot find element in DOM with id=${t}`),s=i}else s=t;const e=new Te(s,i,n);return i.setOptions(e.options()),e}const Re=Object.assign(Object.assign({},l),h);var De=Object.freeze({__proto__:null,get ColorType(){return Dn},get CrosshairMode(){return rt},get LastPriceAnimationMode(){return Pn},get LineStyle(){return o},get LineType(){return a},get MismatchDirection(){return Bi},get PriceLineSource(){return Rn},get PriceScaleMode(){return cn},get TickMarkType(){return On},get TrackingModeExitMode(){return Tn},createChart:function(t,i){return Pe(t,new Zn,Zn.Td(i))},createChartEx:Pe,customSeriesDefaultOptions:Re,isBusinessDay:Bn,isUTCTimestamp:Vn,version:function(){return"4.1.1"}});window.LightweightCharts=De}(); diff --git a/src/aleph/vm/orchestrator/views/static/main.css b/src/aleph/vm/orchestrator/views/static/main.css new file mode 100644 index 000000000..2b14d4b60 --- /dev/null +++ b/src/aleph/vm/orchestrator/views/static/main.css @@ -0,0 +1,107 @@ +body { + font-family: IBM Plex Regular, monospace; + white-space: normal; + margin: auto; + max-width: 800px; +} + +details { + margin-top: 30px; +} + +main { + width: 90vw; + margin: 2vh auto; + max-width: 800px; +} + +progress { + width: 100%; + height: 0.5em; +} + +.virtualization-wrapper { + height: 30px; + display: flex; + align-items: end; +} + +#loader-container { + text-align: center; + padding: 20px; + height: 80px; + vertical-align: center; +} + +.loader { + display: inline-block; + width: 5px; + height: 20px; + margin: 0.3px; + background: #207AC9; +} + +@keyframes move { + 0% { + height: 10px; + } + + 50% { + height: 5px; + } + + 100% { + height: 10px; + } +} + +@keyframes move2 { + 0% { + height: 5px; + } + + 50% { + height: 10px; + } + + 100% { + height: 5px; + } +} + +#loader-one { + animation-name: move; + animation-duration: 1s; + animation-iteration-count: infinite; +} + +#loader-two { + animation-name: move2; + animation-duration: 1s; + animation-iteration-count: infinite; +} + +#loader-three { + animation-name: move; + animation-duration: 1s; + animation-iteration-count: infinite; +} + +#chart { + width: 100%; + height: 300px; +} + +.flex { + display: flex; + justify-content: space-between; +} + +#chart-wrapper{ + display: none; +} + +footer{ + font-size: 70%; + opacity: .75; +} diff --git a/src/aleph/vm/orchestrator/views/templates/index.html b/src/aleph/vm/orchestrator/views/templates/index.html new file mode 100644 index 000000000..7caf38700 --- /dev/null +++ b/src/aleph/vm/orchestrator/views/templates/index.html @@ -0,0 +1,420 @@ + + + + + Aleph.im Compute Node + + + + +
    + +

    + +
    +

    + This is an Aleph Cloud Compute Resource Node. +

    +

    + It executes user programs stored on the aleph.im network in Virtual Machines. +

    +

    + See the source code repository for more info. +

    + +
    + +
    + +
    +

    Multiaddr

    +

    + This node is exposed on the following addresses: +

    + + +
    + +
    + +
    +

    Diagnostic

    + +
    +

    Virtualization

    +

    + Virtualization + + ... + + + + + + +

    +
    +
      +
      + +
      + +
      +

      Virtualization (legacy)

      +

      + Virtualization + + ... + + + + + + +

      +
      +
        +
        + +
        + +
        +

        Host connectivity

        +

        + Host + + ... + + + + + + +

        +
        +

        IPv4

        +
          +

          IPv6

          +
            +

            VM Egress IPv6

            +

            + VM Egress IPv6 is a test to check if virtual machines are able to connect to the IPv6 internet. + Enabling VM IPv6 Egress requires a specific configuration that is not applied automatically. It is not yet + required to run programs inside, but it's required to run instance, so will be mandatory soon. +

            +
            +

            + VM Egress IPv6 + + is ... + + + + + + +

            +
            +
            + +
            + +
            +

            GPUs

            +
            + Loading GPU list + + ... + + + + + + +
            +
            + +
            + ℹ️ More information + +
            +

            Latest metrics

            +

            + The aleph.im network measures the performance of all nodes in the network. New metrics are published + every 10 minutes. +

            +

            + 🔍 Browse the metrics in the explorer +

            +
              +
              + +

              APIs

              +

              + Host status check API: /status/check/host +

              +

              + + Virtualization check API: /status/check/fastapi +

              +

              + + VM Egress IPv6:
              + /vm/$check_fastapi_vm_id/ip/6 +

              +
              + +
              + +
              +

              Version

              +

              + Running version $version. +

              + +
              + + + + +
              + +
              + +

              This downloads about 35 MB of data from the network

              +
              + +
              +
              + 0% + +
              + +
              + +
              + + +
              +
              +
              + + + + + + diff --git a/src/aleph/vm/orchestrator/vm/__init__.py b/src/aleph/vm/orchestrator/vm/__init__.py new file mode 100644 index 000000000..7d85867cc --- /dev/null +++ b/src/aleph/vm/orchestrator/vm/__init__.py @@ -0,0 +1,9 @@ +from aleph.vm.controllers.firecracker import ( + AlephFirecrackerInstance, + AlephFirecrackerProgram, +) + +__all__ = ( + "AlephFirecrackerProgram", + "AlephFirecrackerInstance", +) diff --git a/src/aleph/vm/pool.py b/src/aleph/vm/pool.py new file mode 100644 index 000000000..edcccd43a --- /dev/null +++ b/src/aleph/vm/pool.py @@ -0,0 +1,342 @@ +from __future__ import annotations + +import asyncio +import json +import logging +from collections.abc import Iterable +from datetime import datetime, timezone +from typing import List + +from aleph_message.models import ( + Chain, + ExecutableMessage, + ItemHash, + Payment, + PaymentType, +) +from pydantic import parse_raw_as + +from aleph.vm.conf import settings +from aleph.vm.controllers.firecracker.snapshot_manager import SnapshotManager +from aleph.vm.network.hostnetwork import Network, make_ipv6_allocator +from aleph.vm.orchestrator.metrics import get_execution_records +from aleph.vm.orchestrator.utils import update_aggregate_settings +from aleph.vm.resources import GpuDevice, HostGPU, get_gpu_devices +from aleph.vm.systemd import SystemDManager +from aleph.vm.utils import get_message_executable_content +from aleph.vm.vm_type import VmType + +from .models import ExecutableContent, VmExecution + +logger = logging.getLogger(__name__) + + +class VmPool: + """Pool of existing VMs + + For function VM we keep the VM a while after they have run, so we can reuse them and thus decrease response time. + After running, a VM is saved for future reuse from the same function during a + configurable duration. + """ + + executions: dict[ItemHash, VmExecution] + message_cache: dict[str, ExecutableMessage] + network: Network | None + snapshot_manager: SnapshotManager | None = None + systemd_manager: SystemDManager + creation_lock: asyncio.Lock + gpus: List[GpuDevice] = [] + + def __init__(self, loop: asyncio.AbstractEventLoop): + self.executions = {} + self.message_cache = {} + + asyncio.set_event_loop(loop) + self.creation_lock = asyncio.Lock() + + self.network = ( + Network( + vm_ipv4_address_pool_range=settings.IPV4_ADDRESS_POOL, + vm_network_size=settings.IPV4_NETWORK_PREFIX_LENGTH, + external_interface=settings.NETWORK_INTERFACE, + ipv6_allocator=make_ipv6_allocator( + allocation_policy=settings.IPV6_ALLOCATION_POLICY, + address_pool=settings.IPV6_ADDRESS_POOL, + subnet_prefix=settings.IPV6_SUBNET_PREFIX, + ), + use_ndp_proxy=settings.USE_NDP_PROXY, + ipv6_forwarding_enabled=settings.IPV6_FORWARDING_ENABLED, + ) + if settings.ALLOW_VM_NETWORKING + else None + ) + self.systemd_manager = SystemDManager() + if settings.SNAPSHOT_FREQUENCY > 0: + self.snapshot_manager = SnapshotManager() + + def setup(self) -> None: + """Set up the VM pool and the network.""" + if self.network: + self.network.setup() + + if self.snapshot_manager: + logger.debug("Initializing SnapshotManager ...") + self.snapshot_manager.run_in_thread() + + if settings.ENABLE_GPU_SUPPORT: + # Refresh and get latest settings aggregate + asyncio.run(update_aggregate_settings()) + logger.debug("Detecting GPU devices ...") + self.gpus = get_gpu_devices() + + def teardown(self) -> None: + """Stop the VM pool and the network properly.""" + if self.network: + # self.network.teardown() + # FIXME Temporary disable tearing down the network + # Fix issue of persistent instances running inside systemd controller losing their ipv4 nat access + # upon supervisor restart or upgrade. + pass + + async def create_a_vm( + self, vm_hash: ItemHash, message: ExecutableContent, original: ExecutableContent, persistent: bool + ) -> VmExecution: + """Create a new VM from an Aleph function or instance message.""" + async with self.creation_lock: + # Check if an execution is already present for this VM, then return it. + # Do not `await` in this section. + current_execution = self.get_running_vm(vm_hash) + if current_execution: + return current_execution + else: + execution = VmExecution( + vm_hash=vm_hash, + message=message, + original=original, + snapshot_manager=self.snapshot_manager, + systemd_manager=self.systemd_manager, + persistent=persistent, + ) + self.executions[vm_hash] = execution + + try: + # First assign Host GPUs from the available + execution.prepare_gpus(self.get_available_gpus()) + # Prepare VM general Resources and also the GPUs + await execution.prepare() + + vm_id = self.get_unique_vm_id() + + if self.network: + vm_type = VmType.from_message_content(message) + tap_interface = await self.network.prepare_tap(vm_id, vm_hash, vm_type) + # If the network interface already exists, remove it and then re-create it. + if self.network.interface_exists(vm_id): + await tap_interface.delete() + await self.network.create_tap(vm_id, tap_interface) + else: + tap_interface = None + + execution.create(vm_id=vm_id, tap_interface=tap_interface) + await execution.start() + + # Start VM and snapshots automatically + # If the execution is confidential, don't start it because we need to wait for the session certificate + # files, use the endpoint /control/machine/{ref}/confidential/initialize to get session files and start the VM + if execution.persistent and not execution.is_confidential: + self.systemd_manager.enable_and_start(execution.controller_service) + await execution.wait_for_init() + if execution.is_program and execution.vm: + await execution.vm.load_configuration() + + if execution.vm and execution.vm.support_snapshot and self.snapshot_manager: + await self.snapshot_manager.start_for(vm=execution.vm) + except Exception: + # ensure the VM is removed from the pool on creation error + self.forget_vm(vm_hash) + raise + + self._schedule_forget_on_stop(execution) + + return execution + + def get_unique_vm_id(self) -> int: + """Get a unique identifier for the VM. + + This identifier is used to name the network interface and in the IPv4 range + dedicated to the VM. + """ + # Take the first id that is not already taken + currently_used_vm_ids = {execution.vm_id for execution in self.executions.values()} + for i in range(settings.START_ID_INDEX, 255**2): + if i not in currently_used_vm_ids: + return i + msg = "No available value for vm_id." + raise ValueError(msg) + + def get_running_vm(self, vm_hash: ItemHash) -> VmExecution | None: + """Return a running VM or None. Disables the VM expiration task.""" + execution = self.executions.get(vm_hash) + if execution and execution.is_running and not execution.is_stopping: + execution.cancel_expiration() + return execution + else: + return None + + async def stop_vm(self, vm_hash: ItemHash) -> VmExecution | None: + """Stop a VM.""" + execution = self.executions.get(vm_hash) + if execution: + if execution.persistent: + await self.stop_persistent_execution(execution) + else: + await execution.stop() + return execution + else: + return None + + async def stop_persistent_execution(self, execution: VmExecution): + """Stop persistent VMs in the pool.""" + assert execution.persistent, "Execution isn't persistent" + self.systemd_manager.stop_and_disable(execution.controller_service) + await execution.stop() + + def forget_vm(self, vm_hash: ItemHash) -> None: + """Remove a VM from the executions pool. + + Used after self.create_a_vm(...) raised an error in order to + completely forget about the execution and enforce a new execution + when attempted again. + """ + try: + del self.executions[vm_hash] + except KeyError: + pass + + def _schedule_forget_on_stop(self, execution: VmExecution): + """Create a task that will remove the VM from the pool after it stops.""" + + async def forget_on_stop(stop_event: asyncio.Event): + await stop_event.wait() + self.forget_vm(execution.vm_hash) + + _ = asyncio.create_task(forget_on_stop(stop_event=execution.stop_event)) + + async def load_persistent_executions(self): + """Load persistent executions from the database.""" + saved_executions = await get_execution_records() + for saved_execution in saved_executions: + vm_hash = ItemHash(saved_execution.vm_hash) + + if vm_hash in self.executions or not saved_execution.persistent: + # The execution is already loaded or isn't persistent, skip it + continue + + vm_id = saved_execution.vm_id + + message_dict = json.loads(saved_execution.message) + original_dict = json.loads(saved_execution.original_message) + + execution = VmExecution( + vm_hash=vm_hash, + message=get_message_executable_content(message_dict), + original=get_message_executable_content(original_dict), + snapshot_manager=self.snapshot_manager, + systemd_manager=self.systemd_manager, + persistent=saved_execution.persistent, + ) + + if execution.is_running: + # TODO: Improve the way that we re-create running execution + # Load existing GPUs assigned to VMs + execution.gpus = parse_raw_as(List[HostGPU], saved_execution.gpus) if saved_execution.gpus else [] + # Load and instantiate the rest of resources and already assigned GPUs + await execution.prepare() + if self.network: + vm_type = VmType.from_message_content(execution.message) + tap_interface = await self.network.prepare_tap(vm_id, vm_hash, vm_type) + else: + tap_interface = None + + vm = execution.create(vm_id=vm_id, tap_interface=tap_interface, prepare=False) + await vm.start_guest_api() + execution.ready_event.set() + execution.times.started_at = datetime.now(tz=timezone.utc) + + self._schedule_forget_on_stop(execution) + + # Start the snapshot manager for the VM + if vm.support_snapshot and self.snapshot_manager: + await self.snapshot_manager.start_for(vm=execution.vm) + + self.executions[vm_hash] = execution + else: + execution.uuid = saved_execution.uuid + await execution.record_usage() + + logger.debug(f"Loaded {len(self.executions)} executions") + + async def stop(self): + """Stop ephemeral VMs in the pool.""" + # Stop executions in parallel: + await asyncio.gather(*(execution.stop() for execution in self.get_ephemeral_executions())) + + def get_ephemeral_executions(self) -> Iterable[VmExecution]: + executions = ( + execution for _, execution in self.executions.items() if execution.is_running and not execution.persistent + ) + return executions or [] + + def get_persistent_executions(self) -> Iterable[VmExecution]: + executions = ( + execution + for _vm_hash, execution in self.executions.items() + if execution.is_running and execution.persistent + ) + return executions or [] + + def get_instance_executions(self) -> Iterable[VmExecution]: + executions = ( + execution + for _vm_hash, execution in self.executions.items() + if execution.is_running and execution.is_instance + ) + return executions or [] + + def get_available_gpus(self) -> List[GpuDevice]: + available_gpus = [] + for gpu in self.gpus: + used = False + for _, execution in self.executions.items(): + if execution.uses_gpu(gpu.pci_host): + used = True + break + if not used: + available_gpus.append(gpu) + return available_gpus + + def get_executions_by_sender(self, payment_type: PaymentType) -> dict[str, dict[str, list[VmExecution]]]: + """Return all executions of the given type, grouped by sender and by chain.""" + executions_by_sender: dict[str, dict[str, list[VmExecution]]] = {} + for vm_hash, execution in self.executions.items(): + if execution.vm_hash in (settings.CHECK_FASTAPI_VM_ID, settings.LEGACY_CHECK_FASTAPI_VM_ID): + # Ignore Diagnostic VM execution + continue + + if not execution.is_running: + # Ignore the execution that is stopping or not running anymore + continue + if execution.vm_hash == settings.CHECK_FASTAPI_VM_ID: + # Ignore Diagnostic VM execution + continue + execution_payment = ( + execution.message.payment + if execution.message.payment + else Payment(chain=Chain.ETH, type=PaymentType.hold) + ) + if execution_payment.type == payment_type: + sender = execution.message.address + chain = execution_payment.chain + executions_by_sender.setdefault(sender, {}) + executions_by_sender[sender].setdefault(chain, []).append(execution) + return executions_by_sender diff --git a/src/aleph/vm/resources.py b/src/aleph/vm/resources.py new file mode 100644 index 000000000..4776c254c --- /dev/null +++ b/src/aleph/vm/resources.py @@ -0,0 +1,144 @@ +import subprocess +from enum import Enum +from typing import List, Optional + +from aleph_message.models import HashableModel +from pydantic import BaseModel, Extra, Field + +from aleph.vm.conf import settings +from aleph.vm.orchestrator.utils import get_compatible_gpus + + +class HostGPU(BaseModel): + """Host GPU properties detail.""" + + pci_host: str = Field(description="GPU PCI host address") + + class Config: + extra = Extra.forbid + + +class GpuDeviceClass(str, Enum): + """GPU device class. Look at https://admin.pci-ids.ucw.cz/read/PD/03""" + + VGA_COMPATIBLE_CONTROLLER = "0300" + _3D_CONTROLLER = "0302" + + +class GpuDevice(HashableModel): + """GPU properties.""" + + vendor: str = Field(description="GPU vendor name") + model: str | None = Field(description="GPU model name on Aleph Network") + device_name: str = Field(description="GPU vendor card name") + device_class: GpuDeviceClass = Field( + description="GPU device class. Look at https://admin.pci-ids.ucw.cz/read/PD/03" + ) + pci_host: str = Field(description="Host PCI bus for this device") + device_id: str = Field(description="GPU vendor & device ids") + compatible: bool = Field(description="GPU compatibility with Aleph Network", default=False) + + class Config: + extra = Extra.forbid + + +class CompatibleGPU(BaseModel): + """Compatible GPU properties detail.""" + + vendor: str = Field(description="GPU vendor name") + model: str = Field(description="GPU model name") + name: str = Field(description="GPU full name") + device_id: str = Field(description="GPU device id code including vendor_id and model_id") + + +def is_gpu_device_class(device_class: str) -> bool: + try: + GpuDeviceClass(device_class) + return True + except ValueError: + return False + + +def get_gpu_model(device_id: str) -> bool | None: + """Returns a GPU model name if it's found from the compatible ones.""" + model_gpu_set = {gpu["device_id"]: gpu["model"] for gpu in get_compatible_gpus()} + try: + return model_gpu_set[device_id] + except KeyError: + return None + + +def is_gpu_compatible(device_id: str) -> bool: + """Checks if a GPU is compatible based on vendor and model IDs.""" + compatible_gpu_set = {gpu["device_id"] for gpu in get_compatible_gpus()} + return device_id in compatible_gpu_set + + +def get_vendor_name(vendor_id: str) -> str: + match vendor_id: + case "10de": + return "NVIDIA" + case "1002": + return "AMD" + case "8086": + return "Intel" + case _: + raise ValueError("Device vendor not compatible") + + +def is_kernel_enabled_gpu(pci_host: str) -> bool: + # Get detailed info about Kernel drivers used by this device. + # Needs to use specifically only the kernel driver vfio-pci to be compatible for QEmu virtualization + result = subprocess.run(["lspci", "-s", pci_host, "-nnk"], capture_output=True, text=True, check=True) + details = result.stdout.split("\n") + if "\tKernel driver in use: vfio-pci" in details: + return True + + return False + + +def parse_gpu_device_info(line: str) -> Optional[GpuDevice]: + """Parse GPU device info from a line of lspci output.""" + + pci_host, device = line.split(' "', maxsplit=1) + + if not is_kernel_enabled_gpu(pci_host): + return None + + device_class, device_vendor, device_info = device.split('" "', maxsplit=2) + device_class = device_class.split("[", maxsplit=1)[1][:-1] + + if not is_gpu_device_class(device_class): + return None + + device_class = GpuDeviceClass(device_class) + + vendor, vendor_id = device_vendor.rsplit(" [", maxsplit=1) + vendor_id = vendor_id[:-1] + vendor_name = get_vendor_name(vendor_id) + device_name = device_info.split('"', maxsplit=1)[0] + device_name, model_id = device_name.rsplit(" [", maxsplit=1) + model_id = model_id[:-1] + device_id = f"{vendor_id}:{model_id}" + model = get_gpu_model(device_id=device_id) + compatible = is_gpu_compatible(device_id=device_id) + + return GpuDevice( + pci_host=pci_host, + vendor=vendor_name, + model=model, + device_name=device_name, + device_class=device_class, + device_id=device_id, + compatible=compatible, + ) + + +def get_gpu_devices() -> Optional[List[GpuDevice]]: + """Get GPU info using lspci command.""" + + result = subprocess.run(["lspci", "-mmnnn"], capture_output=True, text=True, check=True) + gpu_devices = list( + {device for line in result.stdout.split("\n") if line and (device := parse_gpu_device_info(line)) is not None} + ) + return gpu_devices if gpu_devices else None diff --git a/src/aleph/vm/sevclient.py b/src/aleph/vm/sevclient.py new file mode 100644 index 000000000..01f25acf9 --- /dev/null +++ b/src/aleph/vm/sevclient.py @@ -0,0 +1,30 @@ +from pathlib import Path + +from aleph.vm.utils import run_in_subprocess + + +class SevClient: + sev_dir: Path + sev_ctl_executable: Path + certificates_dir: Path + certificates_archive: Path + + def __init__(self, sev_dir: Path, sev_ctl_executable: Path): + self.sev_dir = sev_dir + self.sev_ctl_executable = sev_ctl_executable + self.certificates_dir = sev_dir / "platform" + self.certificates_dir.mkdir(exist_ok=True, parents=True) + self.certificates_archive = self.certificates_dir / "certs_export.cert" + + async def sev_ctl_cmd(self, *args) -> bytes: + """Run a command of the 'sevctl' tool.""" + return await run_in_subprocess( + [str(self.sev_ctl_executable), *args], + check=True, + ) + + async def get_certificates(self) -> Path: + if not self.certificates_archive.is_file(): + _ = await self.sev_ctl_cmd("export", str(self.certificates_archive)) + + return self.certificates_archive diff --git a/src/aleph/vm/storage.py b/src/aleph/vm/storage.py new file mode 100644 index 000000000..a6eef447c --- /dev/null +++ b/src/aleph/vm/storage.py @@ -0,0 +1,463 @@ +""" +This module is in charge of providing the source code corresponding to a 'code id'. + +In this prototype, it returns a hardcoded example. +In the future, it should connect to an Aleph node and retrieve the code from there. +""" + +import asyncio +import json +import logging +import re +import sys +from datetime import datetime, timezone +from pathlib import Path +from shutil import copy2, make_archive +from subprocess import CalledProcessError + +import aiohttp +from aleph_message.models import ( + AlephMessage, + InstanceMessage, + ItemHash, + ItemType, + ProgramMessage, + StoreMessage, + parse_message, +) +from aleph_message.models.execution.instance import RootfsVolume +from aleph_message.models.execution.program import Encoding +from aleph_message.models.execution.volume import ( + ImmutableVolume, + MachineVolume, + PersistentVolume, + VolumePersistence, +) + +from aleph.vm.conf import SnapshotCompressionAlgorithm, settings +from aleph.vm.utils import fix_message_validation, run_in_subprocess + +logger = logging.getLogger(__name__) +DEVICE_MAPPER_DIRECTORY = "/dev/mapper" + + +class CorruptedFilesystemError(Exception): + """Raised when a file containing a filesystem is corrupted.""" + + +async def chown_to_jailman(path: Path) -> None: + """Changes ownership of the target when running firecracker inside jailer isolation.""" + if not path.exists(): + msg = "No such file to change ownership from" + raise FileNotFoundError(msg, path) + if settings.USE_JAILER: + await run_in_subprocess(["chown", "jailman:jailman", str(path)]) + + +async def file_downloaded_by_another_task(final_path: Path) -> None: + """Wait for a file to be downloaded by another task in parallel.""" + + # Wait for the file to be created + while not final_path.is_file(): + await asyncio.sleep(0.1) + + +async def download_file_in_chunks(url: str, tmp_path: Path) -> None: + async with aiohttp.ClientSession() as session: + resp = await session.get(url) + resp.raise_for_status() + + with open(tmp_path, "wb") as cache_file: + counter = 0 + while True: + chunk = await resp.content.read(65536) + if not chunk: + break + cache_file.write(chunk) + counter += 1 + if not (counter % 20): + sys.stdout.write(".") + sys.stdout.flush() + + sys.stdout.write("\n") + sys.stdout.flush() + + +async def download_file(url: str, local_path: Path) -> None: + # TODO: Limit max size of download to the message specification + if local_path.is_file(): + logger.debug(f"File already exists: {local_path}") + return + + # Avoid partial downloads and incomplete files by only moving the file when it's complete. + tmp_path = Path(f"{local_path}.part") + + logger.debug(f"Downloading {url} -> {tmp_path}") + download_attempts = 10 + for attempt in range(download_attempts): + logger.debug(f"Download attempt {attempt + 1}/{download_attempts}...") + try: + # Ensure the file is not being downloaded by another task in parallel. + tmp_path.touch(exist_ok=False) + + await download_file_in_chunks(url, tmp_path) + tmp_path.rename(local_path) + logger.debug(f"Download complete, moved {tmp_path} -> {local_path}") + return + except FileExistsError as file_exists_error: + # Another task is already downloading the file. + # Use `asyncio.timeout` manager after dropping support for Python 3.10 + logger.debug(f"File already being downloaded by another task: {local_path}") + try: + await asyncio.wait_for(file_downloaded_by_another_task(local_path), timeout=30) + except TimeoutError as error: + if attempt < (download_attempts - 1): + logger.warning( + f"Download failed (waiting for another taks), retrying attempt {attempt + 1}/{download_attempts}..." + ) + continue + else: + logger.warning(f"Download of {url} failed (waiting for another task), aborting...") + raise error from file_exists_error + except ( + aiohttp.ClientConnectionError, + aiohttp.ClientResponseError, + aiohttp.ClientPayloadError, + ) as error: + if attempt < (download_attempts - 1): + logger.warning(f"Download failed, retrying attempt {attempt + 1}/{download_attempts}...") + # continue # continue inside try/finally block is unimplemented in `mypyc` + else: + logger.warning(f"Download of {url} failed (aborting...") + raise error + finally: + # Ensure no partial file is left behind + tmp_path.unlink(missing_ok=True) + + +async def download_file_from_ipfs_or_connector(ref: str, cache_path: Path, filetype: str) -> None: + """Download a file from the IPFS Gateway if possible, else from the vm-connector.""" + + if cache_path.is_file(): + logger.debug(f"File already exists: {cache_path}") + return + + message: StoreMessage = await get_store_message(ref) + + if message.content.item_type == ItemType.ipfs: + # Download IPFS files from the IPFS gateway directly + cid = message.content.item_hash + url = f"{settings.IPFS_SERVER}/{cid}" + await download_file(url, cache_path) + else: + # Download via the vm-connector + path_mapping = { + "runtime": "/download/runtime", + "code": "/download/code", + "data": "/download/data", + } + path = path_mapping[filetype] + url = f"{settings.CONNECTOR_URL}{path}/{ref}" + await download_file(url, cache_path) + + +async def get_latest_amend(item_hash: str) -> str: + if settings.FAKE_DATA_PROGRAM: + return item_hash + else: + url = f"{settings.CONNECTOR_URL}/compute/latest_amend/{item_hash}" + async with aiohttp.ClientSession() as session: + resp = await session.get(url) + resp.raise_for_status() + result: str = await resp.json() + assert isinstance(result, str) + return result or item_hash + + +async def load_message(path: Path) -> AlephMessage: + """Load a message from the cache on disk.""" + with open(path) as cache_file: + msg = json.load(cache_file) + + if path in (settings.FAKE_DATA_MESSAGE, settings.FAKE_INSTANCE_MESSAGE): + # Ensure validation passes while tweaking message content + msg = fix_message_validation(msg) + + return parse_message(message_dict=msg) + + +async def get_message(ref: str) -> AlephMessage: + cache_path = (Path(settings.MESSAGE_CACHE) / ref).with_suffix(".json") + url = f"{settings.CONNECTOR_URL}/download/message/{ref}" + await download_file(url, cache_path) + return await load_message(cache_path) + + +async def get_executable_message(ref: str) -> ProgramMessage | InstanceMessage: + if ref == settings.FAKE_INSTANCE_ID: + logger.debug("Using the fake instance message since the ref matches") + cache_path = settings.FAKE_INSTANCE_MESSAGE + elif settings.FAKE_DATA_PROGRAM: + cache_path = settings.FAKE_DATA_MESSAGE + logger.debug("Using the fake data message") + else: + cache_path = (Path(settings.MESSAGE_CACHE) / ref).with_suffix(".json") + url = f"{settings.CONNECTOR_URL}/download/message/{ref}" + await download_file(url, cache_path) + + return await load_message(cache_path) + + +async def get_store_message(ref: str) -> StoreMessage: + message = await get_message(ref) + if not isinstance(message, StoreMessage): + msg = f"Expected a store message, got {message.type}" + raise ValueError(msg) + return message + + +async def get_code_path(ref: str) -> Path: + if settings.FAKE_DATA_PROGRAM: + archive_path = Path(settings.FAKE_DATA_PROGRAM) + + encoding: Encoding = (await get_executable_message(ref="fake-message")).content.code.encoding + if encoding == Encoding.squashfs: + squashfs_path = Path(archive_path.name + ".squashfs") + squashfs_path.unlink(missing_ok=True) + await run_in_subprocess(["mksquashfs", str(archive_path), str(squashfs_path)]) + logger.debug(f"Squashfs generated on {squashfs_path}") + return squashfs_path + elif encoding == Encoding.zip: + make_archive(str(archive_path), "zip", root_dir=archive_path) + zip_path = Path(f"{archive_path}.zip") + logger.debug(f"Zip generated on {zip_path}") + return zip_path + else: + msg = f"Unsupported encoding: {encoding}" + raise ValueError(msg) + + cache_path = Path(settings.CODE_CACHE) / ref + await download_file_from_ipfs_or_connector(ref, cache_path, "code") + return cache_path + + +async def get_data_path(ref: str) -> Path: + if settings.FAKE_DATA_PROGRAM and settings.FAKE_DATA_DATA: + data_dir = settings.FAKE_DATA_DATA + make_archive(str(data_dir), "zip", data_dir) + return Path(f"{data_dir}.zip") + + cache_path = Path(settings.DATA_CACHE) / ref + await download_file_from_ipfs_or_connector(ref, cache_path, "data") + return cache_path + + +async def check_squashfs_integrity(path: Path) -> None: + """Check that the squashfs file is not corrupted.""" + try: + await run_in_subprocess(["unsquashfs", "-stat", "-no-progress", str(path)], check=True) + except CalledProcessError as error: + msg = f"Corrupted squashfs file: {path}" + raise CorruptedFilesystemError(msg) from error + + +async def get_runtime_path(ref: str) -> Path: + """Obtain the runtime used for the rootfs of a program.""" + if settings.FAKE_DATA_PROGRAM: + await check_squashfs_integrity(Path(settings.FAKE_DATA_RUNTIME)) + return Path(settings.FAKE_DATA_RUNTIME) + + cache_path = Path(settings.RUNTIME_CACHE) / ref + await download_file_from_ipfs_or_connector(ref, cache_path, "runtime") + + await check_squashfs_integrity(cache_path) + await chown_to_jailman(cache_path) + return cache_path + + +async def get_rootfs_base_path(ref: ItemHash) -> Path: + """Obtain the base partition for the rootfs of an instance.""" + if settings.USE_FAKE_INSTANCE_BASE and settings.FAKE_INSTANCE_BASE: + logger.debug("Using fake instance base") + return Path(settings.FAKE_INSTANCE_BASE) + + cache_path = Path(settings.RUNTIME_CACHE) / ref + + # if not cache_path.is_file(): + await download_file_from_ipfs_or_connector(ref, cache_path, "runtime") + + await chown_to_jailman(cache_path) + return cache_path + + +async def create_ext4(path: Path, size_mib: int) -> bool: + if path.is_file(): + logger.debug(f"File already exists, skipping ext4 creation on {path}") + return False + tmp_path = f"{path}.tmp" + await run_in_subprocess(["fallocate", "-l", f"{size_mib}M", str(tmp_path)]) + await run_in_subprocess(["mkfs.ext4", tmp_path]) + await chown_to_jailman(Path(tmp_path)) + Path(tmp_path).rename(path) + return True + + +async def create_volume_file(volume: PersistentVolume | RootfsVolume, namespace: str) -> Path: + volume_name = volume.name if isinstance(volume, PersistentVolume) else "rootfs" + # Assume that the main filesystem format is BTRFS + path = settings.PERSISTENT_VOLUMES_DIR / namespace / f"{volume_name}.btrfs" + if not path.is_file(): + logger.debug(f"Creating {volume.size_mib}MB volume") + # Ensure that the parent directory exists + path.parent.mkdir(exist_ok=True) + # Create an empty file the right size + await run_in_subprocess(["fallocate", "-l", f"{volume.size_mib}M", str(path)]) + await chown_to_jailman(path) + return path + + +async def create_loopback_device(path: Path, read_only: bool = False) -> str: + command_args = ["losetup", "--find", "--show"] + if read_only: + command_args.append("--read-only") + command_args.append(str(path)) + stdout = await run_in_subprocess(command_args) + loop_device = stdout.strip().decode() + return loop_device + + +async def get_block_size(device_path: Path) -> int: + command = ["blockdev", "--getsz", str(device_path)] + stdout = await run_in_subprocess(command) + block_size = int(stdout.decode("UTF-8").strip()) + return block_size + + +async def create_mapped_device(device_name: str, table_command: str) -> None: + command = ["dmsetup", "create", device_name] + await run_in_subprocess(command, stdin_input=table_command.encode()) + + +async def resize_and_tune_file_system(device_path: Path, mount_path: Path) -> None: + # This tune is needed to assign a random fsid to BTRFS device to be able to mount it + await run_in_subprocess(["btrfstune", "-m", str(device_path)]) + await run_in_subprocess(["mount", str(device_path), str(mount_path)]) + await run_in_subprocess(["btrfs", "filesystem", "resize", "max", str(mount_path)]) + await run_in_subprocess(["umount", str(mount_path)]) + + +async def create_devmapper(volume: PersistentVolume | RootfsVolume, namespace: str) -> Path: + """It creates a /dev/mapper/DEVICE inside the VM, that is an extended mapped device of the volume specified. + We follow the steps described here: https://community.aleph.im/t/deploying-mutable-vm-instances-on-aleph/56/2 + """ + volume_name = volume.name if isinstance(volume, PersistentVolume) else "rootfs" + mapped_volume_name = f"{namespace}_{volume_name}" + path_mapped_volume_name = Path(DEVICE_MAPPER_DIRECTORY) / mapped_volume_name + + # Check if rootfs volume is created + if path_mapped_volume_name.is_block_device(): + return path_mapped_volume_name + + parent_path = await get_rootfs_base_path(volume.parent.ref) + + image_volume_name = volume.parent.ref + image_block_size: int = await get_block_size(parent_path) + path_image_device_name = Path(DEVICE_MAPPER_DIRECTORY) / image_volume_name + # Checks if parent rootfs image block device is created + if not path_image_device_name.is_block_device(): + image_loop_device = await create_loopback_device(parent_path, read_only=True) + + # Creates the parent rootfs image block device with the entire image size + base_table_command = f"0 {image_block_size} linear {image_loop_device} 0" + await create_mapped_device(image_volume_name, base_table_command) + + volume_path = await create_volume_file(volume, namespace) + extended_block_size: int = await get_block_size(volume_path) + + mapped_volume_name_base = f"{namespace}_base" + path_mapped_volume_name_base = Path(DEVICE_MAPPER_DIRECTORY) / mapped_volume_name_base + if not path_mapped_volume_name_base.is_block_device(): + # Creates the base rootfs block device with the entire rootfs size using the image block device as source + base_table_command = ( + f"0 {image_block_size} linear {path_image_device_name} 0\n" + f"{image_block_size} {extended_block_size} zero " + ) + await create_mapped_device(mapped_volume_name_base, base_table_command) + + extended_loop_device = await create_loopback_device(volume_path) + + # Creates the final rootfs block device that is a snapshot of the base block device + snapshot_table_command = ( + f"0 {extended_block_size} snapshot {path_mapped_volume_name_base} {extended_loop_device} P 8" + ) + await create_mapped_device(mapped_volume_name, snapshot_table_command) + + mount_path = Path(f"/mnt/{mapped_volume_name}") + mount_path.mkdir(parents=True, exist_ok=True) + await resize_and_tune_file_system(path_mapped_volume_name, mount_path) + await chown_to_jailman(path_image_device_name) + await chown_to_jailman(path_mapped_volume_name_base) + await chown_to_jailman(path_mapped_volume_name) + return path_mapped_volume_name + + +async def get_existing_file(ref: str) -> Path: + if settings.FAKE_DATA_PROGRAM and settings.FAKE_DATA_VOLUME: + return Path(settings.FAKE_DATA_VOLUME) + + cache_path = Path(settings.DATA_CACHE) / ref + await download_file_from_ipfs_or_connector(ref, cache_path, "data") + + await chown_to_jailman(cache_path) + return cache_path + + +async def get_volume_path(volume: MachineVolume, namespace: str) -> Path: + if isinstance(volume, ImmutableVolume): + ref = volume.ref + return await get_existing_file(ref) + elif isinstance(volume, PersistentVolume | RootfsVolume): + volume_name = volume.name if isinstance(volume, PersistentVolume) else "rootfs" + + if volume.persistence != VolumePersistence.host: + msg = "Only 'host' persistence is supported" + raise NotImplementedError(msg) + if not re.match(r"^[\w\-_/]+$", volume_name): + # Sanitize volume names + logger.debug(f"Invalid values for volume name: {repr(volume_name)} detected, sanitizing") + volume_name = re.sub(r"[^\w\-_]", "_", volume_name) + (Path(settings.PERSISTENT_VOLUMES_DIR) / namespace).mkdir(exist_ok=True) + if volume.parent: + return await create_devmapper(volume, namespace) + else: + volume_path = Path(settings.PERSISTENT_VOLUMES_DIR) / namespace / f"{volume_name}.ext4" + await create_ext4(volume_path, volume.size_mib) + return volume_path + else: + msg = "Only immutable volumes are supported" + raise NotImplementedError(msg) + + +async def create_volume_snapshot(path: Path) -> Path: + new_path = Path(f"{path}.{datetime.now(tz=timezone.utc).date().strftime('%d%m%Y-%H%M%S')}.bak") + copy2(path, new_path) + return new_path + + +async def compress_volume_snapshot( + path: Path, + algorithm: SnapshotCompressionAlgorithm = SnapshotCompressionAlgorithm.gz, +) -> Path: + if algorithm != SnapshotCompressionAlgorithm.gz: + raise NotImplementedError + + new_path = Path(f"{path}.gz") + + await run_in_subprocess( + [ + "gzip", + str(path), + ] + ) + + return new_path diff --git a/src/aleph/vm/systemd.py b/src/aleph/vm/systemd.py new file mode 100644 index 000000000..fec117164 --- /dev/null +++ b/src/aleph/vm/systemd.py @@ -0,0 +1,79 @@ +""" +async SystemD Manager implementation. +""" + +import logging + +import dbus +from dbus import DBusException, SystemBus +from dbus.proxies import Interface + +logger = logging.getLogger(__name__) + + +class SystemDManager: + """SystemD Manager class. + + Used to manage the systemd services on the host on Linux. + """ + + bus: SystemBus + manager: Interface + + def __init__(self): + self.bus = dbus.SystemBus() + systemd = self.bus.get_object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + self.manager = dbus.Interface(systemd, "org.freedesktop.systemd1.Manager") + + def stop_and_disable(self, service: str) -> None: + if self.is_service_active(service): + self.stop(service) + if self.is_service_enabled(service): + self.disable(service) + + def enable(self, service: str) -> None: + self.manager.EnableUnitFiles([service], False, True) + logger.debug(f"Enabled {service} service") + + def start(self, service: str) -> None: + self.manager.StartUnit(service, "replace") + logger.debug(f"Started {service} service") + + def stop(self, service: str) -> None: + self.manager.StopUnit(service, "replace") + logger.debug(f"Stopped {service} service") + + def restart(self, service: str) -> None: + self.manager.RestartUnit(service, "replace") + logger.debug(f"Restarted {service} service") + + def disable(self, service: str) -> None: + self.manager.DisableUnitFiles([service], False) + logger.debug(f"Disabled {service} service") + + def is_service_enabled(self, service: str) -> bool: + try: + return self.manager.GetUnitFileState(service) == "enabled" + except DBusException as error: + logger.error(error) + return False + + def is_service_active(self, service: str) -> bool: + try: + if not self.is_service_enabled(service): + return False + unit_path = self.manager.GetUnit(service) + systemd_service = self.bus.get_object("org.freedesktop.systemd1", object_path=unit_path) + unit = dbus.Interface(systemd_service, "org.freedesktop.systemd1.Unit") + unit_properties = dbus.Interface(unit, "org.freedesktop.DBus.Properties") + active_state = unit_properties.Get("org.freedesktop.systemd1.Unit", "ActiveState") + return active_state == "active" + except DBusException as error: + logger.error(error) + return False + + def enable_and_start(self, service: str) -> None: + if not self.is_service_enabled(service): + self.enable(service) + if not self.is_service_active(service): + self.start(service) diff --git a/src/aleph/vm/utils/__init__.py b/src/aleph/vm/utils/__init__.py new file mode 100644 index 000000000..d8eecad95 --- /dev/null +++ b/src/aleph/vm/utils/__init__.py @@ -0,0 +1,253 @@ +import asyncio +import dataclasses +import hashlib +import json +import logging +import subprocess +from base64 import b16encode, b32decode +from collections.abc import Callable, Coroutine +from dataclasses import asdict as dataclass_as_dict +from dataclasses import is_dataclass +from pathlib import Path +from shutil import disk_usage +from typing import Any, Optional + +import aiodns +import msgpack +from aiohttp_cors import ResourceOptions, custom_cors +from aleph_message.models import ExecutableContent, InstanceContent, ProgramContent +from eth_typing import HexAddress, HexStr +from eth_utils import hexstr_if_str, is_address, to_hex + +logger = logging.getLogger(__name__) + + +def get_message_executable_content(message_dict: dict) -> ExecutableContent: + try: + return ProgramContent.parse_obj(message_dict) + except ValueError: + return InstanceContent.parse_obj(message_dict) + + +def cors_allow_all(function): + default_config = { + "*": ResourceOptions( + allow_credentials=True, + allow_headers="*", + expose_headers="*", + ) + } + return custom_cors(config=default_config)(function) + + +class MsgpackSerializable: + def __post_init__(self, *args, **kwargs): + if not is_dataclass(self): + msg = f"Decorated class must be a dataclass: {self}" + raise TypeError(msg) + super().__init_subclass__(*args, **kwargs) + + def as_msgpack(self) -> bytes: + if is_dataclass(self): + return msgpack.dumps(dataclasses.asdict(self), use_bin_type=True) # type: ignore + else: + msg = f"Decorated class must be a dataclass: {self}" + raise TypeError(msg) + + +def b32_to_b16(string: str) -> bytes: + """Convert base32 encoded bytes to base16 encoded bytes.""" + # Add padding + hash_b32: str = string.upper() + "=" * (56 - len(string)) + hash_bytes: bytes = b32decode(hash_b32.encode()) + return b16encode(hash_bytes).lower() + + +async def get_ref_from_dns(domain): + resolver = aiodns.DNSResolver() + record = await resolver.query(domain, "TXT") + return record[0].text + + +def to_json(o: Any) -> dict | str: + if hasattr(o, "to_dict"): # default method + return o.to_dict() + elif hasattr(o, "dict"): # Pydantic + return o.dict() + elif is_dataclass(o): + return dataclass_as_dict(o) # type: ignore + else: + return str(o) + + +def dumps_for_json(o: Any, indent: int | None = None): + return json.dumps(o, default=to_json, indent=indent) + + +async def run_and_log_exception(coro: Coroutine): + """Exceptions in coroutines may go unnoticed if they are not handled.""" + try: + return await coro + except Exception as error: + logger.exception(error) + raise + + +def create_task_log_exceptions(coro: Coroutine, *, name=None): + """Ensure that exceptions running in coroutines are logged.""" + return asyncio.create_task(run_and_log_exception(coro), name=name) + + +async def run_in_subprocess(command: list[str], check: bool = True, stdin_input: bytes | None = None) -> bytes: + """Run the specified command in a subprocess, returns the stdout of the process.""" + command = [str(arg) for arg in command] + logger.debug(f"command: {' '.join(command)}") + + process = await asyncio.create_subprocess_exec( + *command, + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await process.communicate(input=stdin_input) + + if check and process.returncode: + logger.error( + f"Command failed with error code {process.returncode}:\n" + f" stdin = {stdin_input!r}\n" + f" command = {command}\n" + f" stdout = {stderr!r}" + ) + raise subprocess.CalledProcessError(process.returncode, str(command), stderr.decode()) + + return stdout + + +def is_command_available(command): + try: + subprocess.check_output(["which", command], stderr=subprocess.STDOUT) + return True + except subprocess.CalledProcessError: + return False + + +def check_system_module(module_path: str) -> str | None: + p = Path("/sys/module") / module_path + if not p.exists(): + return None + return p.read_text().strip() + + +def check_amd_sev_supported() -> bool: + """Check if AMD SEV is supported on the system. + + AMD Secure Encrypted Virtualization (SEV) + Uses one key per virtual machine to isolate guests and the hypervisor from one another. + """ + return (check_system_module("kvm_amd/parameters/sev") == "Y") and Path("/dev/sev").exists() + + +def check_amd_sev_es_supported() -> bool: + """Check if AMD SEV-ES is supported on the system. + + AMD Secure Encrypted Virtualization-Encrypted State (SEV-ES) + Encrypts all CPU register contents when a VM stops running. + """ + return (check_system_module("kvm_amd/parameters/sev_es") == "Y") and Path("/dev/sev").exists() + + +def check_amd_sev_snp_supported() -> bool: + """Check if AMD SEV-SNP is supported on the system. + + AMD Secure Encrypted Virtualization-Secure Nested Paging (SEV-SNP) + Adds strong memory integrity protection to help prevent malicious hypervisor-based attacks like data replay, + memory re-mapping, and more in order to create an isolated execution environment. + """ + return check_system_module("kvm_amd/parameters/sev_snp") == "Y" + + +def fix_message_validation(message: dict) -> dict: + """Patch a fake message program to pass validation.""" + message["item_content"] = json.dumps(message["content"]) + message["item_hash"] = hashlib.sha256(message["item_content"].encode("utf-8")).hexdigest() + return message + + +class HostNotFoundError(Exception): + pass + + +async def ping(host: str, packets: int, timeout: int): + """ + Waits for a host to respond to a ping request. + """ + + try: + await run_in_subprocess(["ping", "-c", str(packets), "-W", str(timeout), host], check=True) + except subprocess.CalledProcessError as err: + raise HostNotFoundError() from err + + +def check_disk_space(bytes_to_use: int) -> bool: + host_disk_usage = disk_usage("/") + return host_disk_usage.free >= bytes_to_use + + +class NotEnoughDiskSpaceError(OSError): + pass + + +async def get_path_size(path: Path) -> int: + """Get the size in bytes of a given path.""" + if path.is_dir(): + return sum([f.stat().st_size for f in path.glob("**/*")]) + elif path.is_block_device(): + return await get_block_device_size(str(path)) + elif path.is_file(): + return path.stat().st_size + else: + msg = f"Unknown path type for {path}" + raise ValueError(msg) + + +async def get_block_device_size(device: str) -> int: + """Get the size in bytes of a given device block.""" + output = await run_in_subprocess( + ["lsblk", device, "--output", "SIZE", "--bytes", "--noheadings", "--nodeps"], + check=True, + ) + size = int(output.strip().decode()) + return size + + +def to_normalized_address(value: str) -> HexAddress: + """ + Converts an address to its normalized hexadecimal representation. + """ + try: + hex_address = hexstr_if_str(to_hex, value).lower() + except AttributeError: + msg = f"Value must be any string, instead got type {type(value)}" + raise TypeError(msg) + if is_address(hex_address): + return HexAddress(HexStr(hex_address)) + else: + msg = f"Unknown format {value}, attempted to normalize to {hex_address}" + raise ValueError(msg) + + +def md5sum(file_path: Path) -> str: + """Calculate the MD5 hash of a file. Externalize to the `md5sum` command for better performance.""" + return subprocess.check_output(["md5sum", file_path], text=True).split()[0] + + +def file_hashes_differ(source: Path, destination: Path, checksum: Callable[[Path], str] = md5sum) -> bool: + """Check if the MD5 hash of two files differ.""" + if not source.exists(): + msg = f"Source file does not exist: {source}" + raise FileNotFoundError(msg) + + if not destination.exists(): + return True + + return checksum(source) != checksum(destination) diff --git a/src/aleph/vm/utils/logs.py b/src/aleph/vm/utils/logs.py new file mode 100644 index 000000000..868aad7a3 --- /dev/null +++ b/src/aleph/vm/utils/logs.py @@ -0,0 +1,98 @@ +import asyncio +import logging +from collections.abc import Callable, Generator +from datetime import datetime, timedelta +from typing import TypedDict + +from systemd import journal + +logger = logging.getLogger(__name__) + + +class EntryDict(TypedDict): + SYSLOG_IDENTIFIER: str + MESSAGE: str + __REALTIME_TIMESTAMP: datetime + + +def make_logs_queue(stdout_identifier, stderr_identifier, skip_past=False) -> tuple[asyncio.Queue, Callable[[], None]]: + """Create a queue which streams the logs for the process. + + @param stdout_identifier: journald identifier for process stdout + @param stderr_identifier: journald identifier for process stderr + @param skip_past: Skip past history. + @return: queue and function to cancel the queue. + + The consumer is required to call the queue cancel function when it's done consuming the queue. + + Works by creating a journald reader, and using `add_reader` to call a callback when + data is available for reading. + In the callback we check the message type and fill the queue accordingly + + For more information refer to the sd-journal(3) manpage + and systemd.journal module documentation. + """ + journal_reader = journal.Reader() + journal_reader.add_match(SYSLOG_IDENTIFIER=stdout_identifier) + journal_reader.add_match(SYSLOG_IDENTIFIER=stderr_identifier) + queue: asyncio.Queue = asyncio.Queue(maxsize=5) + tasks: list[asyncio.Task] = [] + + loop = asyncio.get_event_loop() + + async def process_messages() -> None: + """Enqueue all the available log entries, wait if queue is full, then wait for new message via add_reader""" + # Remove reader so we don't get called again while processing + loop.remove_reader(journal_reader.fileno()) + entry: EntryDict + for entry in journal_reader: + log_type = "stdout" if entry["SYSLOG_IDENTIFIER"] == stdout_identifier else "stderr" + msg = entry["MESSAGE"] + # will wait if queue is full + await queue.put((log_type, msg)) + journal_reader.process() # reset fd status + journal_reader.process() # reset fd status + # Call _ready_for_read read when entries are readable again, this is non-blocking + loop.add_reader(journal_reader.fileno(), _ready_for_read) + + def _ready_for_read() -> None: + # wrapper around process_messages as add_reader don't take an async func + task = loop.create_task(process_messages(), name=f"process_messages-queue-{id(queue)}") + tasks.append(task) + task.add_done_callback(tasks.remove) + + if skip_past: + # seek_tail doesn't work see https://github.com/systemd/systemd/issues/17662 + journal_reader.seek_realtime(datetime.now() - timedelta(seconds=10)) + + _ready_for_read() + + def do_cancel(): + logger.info(f"cancelling queue and reader {journal_reader}") + loop.remove_reader(journal_reader.fileno()) + for task in tasks: + task.cancel() + journal_reader.close() + + return queue, do_cancel + + +def get_past_vm_logs(stdout_identifier, stderr_identifier) -> Generator[EntryDict, None, None]: + """Get existing log for the VM identifiers. + + @param stdout_identifier: journald identifier for process stdout + @param stderr_identifier: journald identifier for process stderr + @return: an iterator of log entry + + Works by creating a journald reader, and using `add_reader` to call a callback when + data is available for reading. + + For more information refer to the sd-journal(3) manpage + and systemd.journal module documentation. + """ + r = journal.Reader() + r.add_match(SYSLOG_IDENTIFIER=stdout_identifier) + r.add_match(SYSLOG_IDENTIFIER=stderr_identifier) + + r.seek_head() + yield from r diff --git a/src/aleph/vm/utils/test_helpers.py b/src/aleph/vm/utils/test_helpers.py new file mode 100644 index 000000000..ecdf4f40b --- /dev/null +++ b/src/aleph/vm/utils/test_helpers.py @@ -0,0 +1,86 @@ +import datetime +import json + +import eth_account.messages +import pytest +from eth_account.datastructures import SignedMessage +from eth_account.signers.local import LocalAccount +from jwcrypto import jwk +from jwcrypto.jwa import JWA + + +@pytest.fixture +def patch_datetime_now(monkeypatch): + """Fixture for patching the datetime.now() and datetime.utcnow() methods + to return a fixed datetime object. + This fixture creates a subclass of `datetime.datetime` called `mydatetime`, + which overrides the `now()` and `utcnow()` class methods to return a fixed + datetime object specified by `FAKE_TIME`. + """ + + class MockDateTime(datetime.datetime): + FAKE_TIME = datetime.datetime(2010, 12, 25, 17, 5, 55) + + @classmethod + def now(cls, tz=None, *args, **kwargs): + return cls.FAKE_TIME.replace(tzinfo=tz) + + @classmethod + def utcnow(cls, *args, **kwargs): + return cls.FAKE_TIME + + monkeypatch.setattr(datetime, "datetime", MockDateTime) + return MockDateTime + + +async def generate_signer_and_signed_headers_for_operation( + patch_datetime_now, operation_payload: dict +) -> tuple[LocalAccount, dict]: + """Generate a temporary eth_account for testing and sign the operation with it""" + account = eth_account.Account() + signer_account = account.create() + key = jwk.JWK.generate( + kty="EC", + crv="P-256", + # key_ops=["verify"], + ) + pubkey = { + "pubkey": json.loads(key.export_public()), + "alg": "ECDSA", + "domain": "localhost", + "address": signer_account.address, + "expires": (patch_datetime_now.FAKE_TIME + datetime.timedelta(days=1)).isoformat() + "Z", + } + pubkey_payload = json.dumps(pubkey).encode("utf-8").hex() + signable_message = eth_account.messages.encode_defunct(hexstr=pubkey_payload) + signed_message: SignedMessage = signer_account.sign_message(signable_message) + pubkey_signature = to_0x_hex(signed_message.signature) + pubkey_signature_header = json.dumps( + { + "payload": pubkey_payload, + "signature": pubkey_signature, + } + ) + payload_as_bytes = json.dumps(operation_payload).encode("utf-8") + + payload_signature = JWA.signing_alg("ES256").sign(key, payload_as_bytes) + headers = { + "X-SignedPubKey": pubkey_signature_header, + "X-SignedOperation": json.dumps( + { + "payload": payload_as_bytes.hex(), + "signature": payload_signature.hex(), + } + ), + } + return signer_account, headers + + +def to_0x_hex(b: bytes) -> str: + """ + Convert the bytes to a 0x-prefixed hex string + """ + + # force this for compat between different hexbytes versions which behave differenty + # and conflict with other package don't allow us to have the version we want + return "0x" + bytes.hex(b) diff --git a/src/aleph/vm/version.py b/src/aleph/vm/version.py new file mode 100644 index 000000000..73118aa74 --- /dev/null +++ b/src/aleph/vm/version.py @@ -0,0 +1,33 @@ +import logging +from subprocess import STDOUT, CalledProcessError, check_output + +logger = logging.getLogger(__name__) + + +def get_version_from_git() -> str | None: + try: + return check_output(("git", "describe", "--tags"), stderr=STDOUT).strip().decode() + except FileNotFoundError: + logger.warning("version: git not found") + return None + except CalledProcessError as err: + logger.info("version: git description not available: %s", err.output.decode().strip()) + return None + + +def get_version_from_apt() -> str | None: + try: + import apt + + return apt.Cache().get("aleph-vm").installed.version + except (ImportError, AttributeError): + logger.warning("apt version not available") + return None + + +def get_version() -> str | None: + return get_version_from_git() or get_version_from_apt() + + +# The version number is hardcoded in the following line when packaging the software +__version__ = get_version() or "version-unavailable" diff --git a/src/aleph/vm/vm_type.py b/src/aleph/vm/vm_type.py new file mode 100644 index 000000000..eb0b5e42a --- /dev/null +++ b/src/aleph/vm/vm_type.py @@ -0,0 +1,22 @@ +from enum import Enum + +from aleph_message.models import ExecutableContent, InstanceContent, ProgramContent + + +class VmType(Enum): + microvm = 1 + persistent_program = 2 + instance = 3 + + @staticmethod + def from_message_content(content: ExecutableContent) -> "VmType": + if isinstance(content, InstanceContent): + return VmType.instance + + elif isinstance(content, ProgramContent): + if content.on.persistent: + return VmType.persistent_program + return VmType.microvm + + msg = f"Unexpected message content type: {type(content)}" + raise TypeError(msg) diff --git a/tests/supervisor/test_authentication.py b/tests/supervisor/test_authentication.py new file mode 100644 index 000000000..b46dd315f --- /dev/null +++ b/tests/supervisor/test_authentication.py @@ -0,0 +1,336 @@ +import datetime +import json +from typing import Any + +import eth_account.messages +import pytest +import solathon +from aiohttp import web +from eth_account.datastructures import SignedMessage +from jwcrypto import jwk, jws +from jwcrypto.common import base64url_decode + +from aleph.vm.orchestrator.views.authentication import ( + authenticate_jwk, + require_jwk_authentication, +) +from aleph.vm.utils.test_helpers import ( + generate_signer_and_signed_headers_for_operation, + patch_datetime_now, + to_0x_hex, +) + +# Ensure this is not removed by ruff +assert patch_datetime_now + + +@pytest.mark.asyncio +async def test_require_jwk_authentication_missing_header(aiohttp_client): + """An HTTP request to a view decorated by `@require_jwk_authentication` must return an error + with a status code 400 and an error message in JSON when no authentication is provided. + """ + app = web.Application() + + @require_jwk_authentication + async def view(request, authenticated_sender): + return web.Response(text="ok") + + app.router.add_get("", view) + client = await aiohttp_client(app) + resp = await client.get("/") + assert resp.status == 400 + + r = await resp.json() + assert {"error": "Missing X-SignedPubKey header"} == r + + +@pytest.mark.asyncio +async def test_require_jwk_authentication_invalid_json_bugkey(aiohttp_client): + """An HTTP request to a view decorated by `@require_jwk_authentication` must return an error + with a status code 400 and an error message in JSON when the authentication key format is invalid. + """ + + app = web.Application() + + @require_jwk_authentication + async def view(request, authenticated_sender): + return web.Response(text="ok") + + app.router.add_get("", view) + client = await aiohttp_client(app) + resp = await client.get("/", headers={"X-SignedPubKey": "invalid_json"}) + assert resp.status == 400 + + r = await resp.json() + assert {"error": "Invalid X-SignedPubKey format"} == r + + +@pytest.mark.asyncio +async def test_require_jwk_authentication_expired(aiohttp_client): + app = web.Application() + account = eth_account.Account() + signer_account = account.create() + key = jwk.JWK.generate( + kty="EC", + crv="P-256", + # key_ops=["verify"], + ) + + pubkey = { + "pubkey": json.loads(key.export_public()), + "alg": "ECDSA", + "address": signer_account.address, + "expires": "2023-05-02T10:44:42.754994Z", + } + pubkey_payload = json.dumps(pubkey).encode("utf-8").hex() + signable_message = eth_account.messages.encode_defunct(hexstr=pubkey_payload) + signed_message: SignedMessage = signer_account.sign_message(signable_message) + pubkey_signature = to_0x_hex(signed_message.signature) + + pubkey_signature_header = json.dumps( + { + "payload": pubkey_payload, + "signature": pubkey_signature, + } + ) + + @require_jwk_authentication + async def view(request, authenticated_sender): + return web.Response(text="ok") + + app.router.add_get("", view) + client = await aiohttp_client(app) + + resp = await client.get("/", headers={"X-SignedPubKey": pubkey_signature_header}) + assert resp.status == 401 + + r = await resp.json() + assert {"error": "Token expired"} == r + + +@pytest.mark.asyncio +async def test_require_jwk_authentication_wrong_key(aiohttp_client, patch_datetime_now): + app = web.Application() + + @require_jwk_authentication + async def view(request, authenticated_sender): + return web.Response(text="ok") + + app.router.add_get("", view) + client = await aiohttp_client(app) + headers = { + "X-SignedPubKey": ( + json.dumps( + { + "payload": "7b227075626b6579223a207b22637276223a2022502d323536222c20226b7479223a20224543222c202278223a202273765759314e5652614a683231527834576a765f67657057772d714d436f774d76304a52353057327a7545222c202279223a2022794950424d6135474e7a49555878656c513762415a5f437776303875763448774d4c49456c656c43534473227d2c2022616c67223a20224543445341222c2022646f6d61696e223a20226c6f63616c686f7374222c202261646472657373223a2022307842323564623537643234304438353132366262364234384661633635343837323161343537343538222c202265787069726573223a2022323032332d30352d30325431303a34343a34322e3735343939345a227d", + "signature": "0x58e1498a6c4f88ac1982e7147ff49405ffe1b9633e048bb74cf741abb05ce0b63bb406f3079f641ae89f597654ecd2a704d37ffbf86a28e462140033cc0eedcb1c", + } + ) + ) + } + payload = {"time": "2010-12-25T17:05:55Z", "method": "GET", "path": "/", "domain": "localhost"} + headers["X-SignedOperation"] = json.dumps( + { + "payload": bytes.hex(json.dumps(payload).encode("utf-8")), + "signature": "96ffdbbd1704d5f6bfe4698235a0de0d2f58668deaa4371422bee26664f313f51fd483c78c34c6b317fc209779f9ddd9c45accf558e3bf881b49ad970ebf0ade", + } + ) + + resp = await client.get("/", headers=headers) + assert resp.status == 401, await resp.text() + + r = await resp.json() + assert {"error": "Invalid signature"} == r + + +@pytest.mark.asyncio +async def test_require_jwk_eth_signature_dont_match(aiohttp_client, patch_datetime_now): + app = web.Application() + + @require_jwk_authentication + async def view(request, authenticated_sender): + return web.Response(text="ok") + + account = eth_account.Account() + signer_account = account.create() + key = jwk.JWK.generate( + kty="EC", + crv="P-256", + # key_ops=["verify"], + ) + + pubkey = { + "pubkey": json.loads(key.export_public()), + "alg": "ECDSA", + "address": signer_account.address, + "expires": "2023-05-02T10:44:42.754994Z", + } + pubkey_payload = json.dumps(pubkey).encode("utf-8").hex() + signable_message = eth_account.messages.encode_defunct(hexstr=pubkey_payload) + signed_message: SignedMessage = signer_account.sign_message(signable_message) + pubkey_signature = to_0x_hex(signed_message.signature) + + app.router.add_get("", view) + client = await aiohttp_client(app) + headers = { + "X-SignedPubKey": ( + json.dumps( + { + "payload": pubkey_payload, + "signature": pubkey_signature, + } + ) + ) + } + invalid_operation_payload = {"time": "2010-12-25T17:05:55Z", "method": "GET", "path": "/", "domain": "baddomain"} + headers["X-SignedOperation"] = json.dumps( + { + "payload": bytes.hex(json.dumps(invalid_operation_payload).encode("utf-8")), + "signature": "96ffdbbd1704d5f6bfe4698235a0de0d2f58668deaa4371422bee26664f313f51fd483c78c34c6b317fc209779f9ddd9c45accf558e3bf881b49ad970ebf0ade", + } + ) + + resp = await client.get("/", headers=headers) + assert resp.status == 401, await resp.text() + + r = await resp.json() + assert {"error": "Invalid domain"} == r + + +@pytest.mark.asyncio +async def test_jwk(): + payload = "abc123" + key = jwk.JWK.generate( + kty="EC", + crv="P-256", + ) + pubkey = json.loads(key.export_public()) + jws_signer = jws.JWSCore(alg="ES256", key=key, payload=payload, header=None) + signature_and_payload_json_dict = jws_signer.sign() + signature = base64url_decode(signature_and_payload_json_dict["signature"]) + + # Verify signature + pub_jwk = jws.JWK(**pubkey) + jws_verifier = jws.JWSCore( + alg="ES256", + key=pub_jwk, + payload=payload, + header=None, + ) + assert jws_verifier.verify(signature=signature) + + +@pytest.mark.asyncio +async def test_require_jwk_authentication_good_key(aiohttp_client, patch_datetime_now): + """An HTTP request to a view decorated by `@require_jwk_authentication` + auth correctly a temporary key signed by a wallet and an operation signed by that key""" + app = web.Application() + payload = {"time": "2010-12-25T17:05:55Z", "method": "GET", "path": "/", "domain": "localhost"} + signer_account, headers = await generate_signer_and_signed_headers_for_operation(patch_datetime_now, payload) + + @require_jwk_authentication + async def view(request, authenticated_sender): + assert authenticated_sender == signer_account.address + return web.Response(text="ok") + + app.router.add_get("", view) + client = await aiohttp_client(app) + + resp = await client.get("/", headers=headers) + assert resp.status == 200, await resp.text() + + r = await resp.text() + assert "ok" == r + + +async def generate_sol_signer_and_signed_headers_for_operation( + patch_datetime_now, operation_payload: dict +) -> tuple[solathon.Keypair, dict]: + """Generate a temporary sol account for testing and sign the operation with it""" + + kp = solathon.Keypair() + key = jwk.JWK.generate( + kty="EC", + crv="P-256", + # key_ops=["verify"], + ) + + pubkey = { + "pubkey": json.loads(key.export_public()), + "alg": "ECDSA", + "domain": "localhost", + "address": str(kp.public_key), + "expires": (patch_datetime_now.FAKE_TIME + datetime.timedelta(days=1)).isoformat() + "Z", + "chain": "SOL", + } + pubkey_payload = json.dumps(pubkey).encode("utf-8").hex() + import nacl.signing + + signed_message: nacl.signing.SignedMessage = kp.sign(pubkey_payload) + pubkey_signature = to_0x_hex(signed_message.signature) + pubkey_signature_header = json.dumps( + { + "payload": pubkey_payload, + "signature": pubkey_signature, + } + ) + payload_as_bytes = json.dumps(operation_payload).encode("utf-8") + from jwcrypto.jwa import JWA + + payload_signature = JWA.signing_alg("ES256").sign(key, payload_as_bytes) + headers = { + "X-SignedPubKey": pubkey_signature_header, + "X-SignedOperation": json.dumps( + { + "payload": payload_as_bytes.hex(), + "signature": payload_signature.hex(), + } + ), + } + return kp, headers + + +@pytest.mark.asyncio +async def test_require_jwk_authentication_good_key_solana(aiohttp_client, patch_datetime_now): + """An HTTP request to a view decorated by `@require_jwk_authentication` + auth correctly a temporary key signed by a wallet and an operation signed by that key""" + + app = web.Application() + payload = {"time": "2010-12-25T17:05:55Z", "method": "GET", "path": "/", "domain": "localhost"} + + signer_account, headers = await generate_sol_signer_and_signed_headers_for_operation(patch_datetime_now, payload) + + @require_jwk_authentication + async def view(request, authenticated_sender): + assert authenticated_sender == str(signer_account.public_key) + return web.Response(text="ok") + + app.router.add_get("", view) + client = await aiohttp_client(app) + + resp = await client.get("/", headers=headers) + assert resp.status == 200, await resp.text() + + r = await resp.text() + assert "ok" == r + + +@pytest.fixture +def valid_jwk_headers(mocker): + mocker.patch("aleph.vm.orchestrator.views.authentication.is_token_still_valid", lambda timestamp: True) + return { + "X-SignedPubKey": '{"payload":"7b227075626b6579223a7b22616c67223a224553323536222c22637276223a22502d323536222c22657874223a747275652c226b65795f6f7073223a5b22766572696679225d2c226b7479223a224543222c2278223a224b65763844614d7356454673365a6b4679525a4272796344564138566a334f656e49756f34743561374634222c2279223a2279597343556d715978654767673643743736794f47525873545867446444795234644f5639514c6f6b6477227d2c22616c67223a224543445341222c22646f6d61696e223a226c6f63616c686f7374222c2261646472657373223a22307833343932346566393435623933316431653932393337353535366636396365326537666535646363222c2265787069726573223a313638393337353132342e3532317d","signature":"0x58e1498a6c4f88ac1982e7147ff49405ffe1b9633e048bb74cf741abb05ce0b63bb406f3079f641ae89f597654ecd2a704d37ffbf86a28e462140033cc0eedcb1c"}', + "X-SignedOperation": '{"time":"2023-07-14T22:14:14.132Z","signature":"96ffdbbd1704d5f6bfe4698235a0de0d2f58668deaa4371422bee26664f313f51fd483c78c34c6b317fc209779f9ddd9c45accf558e3bf881b49ad970ebf0add"}', + } + + +@pytest.mark.parametrize("missing_header", ["X-SignedPubKey", "X-SignedOperation"]) +@pytest.mark.asyncio +async def test_missing_headers(valid_jwk_headers: dict[str, Any], mocker, missing_header: str): + del valid_jwk_headers[missing_header] + + request = mocker.AsyncMock() + request.headers = valid_jwk_headers + + with pytest.raises(web.HTTPBadRequest): + await authenticate_jwk(request) diff --git a/tests/supervisor/test_checkpayment.py b/tests/supervisor/test_checkpayment.py new file mode 100644 index 000000000..d554b7bc7 --- /dev/null +++ b/tests/supervisor/test_checkpayment.py @@ -0,0 +1,227 @@ +import asyncio + +import pytest +from aleph_message.models import Chain, InstanceContent, PaymentType +from aleph_message.status import MessageStatus + +from aleph.vm.conf import settings +from aleph.vm.models import VmExecution +from aleph.vm.orchestrator.tasks import check_payment +from aleph.vm.pool import VmPool + + +@pytest.fixture() +def fake_instance_content(): + fake = { + "address": "0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9", + "time": 1713874241.800818, + "allow_amend": False, + "metadata": None, + "authorized_keys": None, + "variables": None, + "environment": {"reproducible": False, "internet": True, "aleph_api": True, "shared_cache": False}, + "resources": {"vcpus": 1, "memory": 256, "seconds": 30, "published_ports": None}, + "payment": {"type": "superfluid", "chain": "BASE"}, + "requirements": None, + "replaces": None, + "rootfs": { + "parent": {"ref": "63f07193e6ee9d207b7d1fcf8286f9aee34e6f12f101d2ec77c1229f92964696"}, + "ref": "63f07193e6ee9d207b7d1fcf8286f9aee34e6f12f101d2ec77c1229f92964696", + "use_latest": True, + "comment": "", + "persistence": "host", + "size_mib": 1000, + }, + } + + return fake + + +@pytest.mark.asyncio +async def test_enough_flow(mocker, fake_instance_content): + """Execution with community flow + + Cost 500 + Community 100 + CRN 400 + Both Flow are 500. + Should not stop + + """ + mocker.patch.object(settings, "ALLOW_VM_NETWORKING", False) + mocker.patch.object(settings, "PAYMENT_RECEIVER_ADDRESS", "0xD39C335404a78E0BDCf6D50F29B86EFd57924288") + mock_community_wallet_address = "0x23C7A99d7AbebeD245d044685F1893aeA4b5Da90" + mocker.patch("aleph.vm.orchestrator.tasks.get_community_wallet_address", return_value=mock_community_wallet_address) + mocker.patch("aleph.vm.orchestrator.tasks.is_after_community_wallet_start", return_value=True) + + loop = asyncio.get_event_loop() + pool = VmPool(loop=loop) + mocker.patch("aleph.vm.orchestrator.tasks.get_stream", return_value=400, autospec=True) + mocker.patch("aleph.vm.orchestrator.tasks.get_message_status", return_value=MessageStatus.PROCESSED) + + async def compute_required_flow(executions): + return 500 * len(executions) + + mocker.patch("aleph.vm.orchestrator.tasks.compute_required_flow", compute_required_flow) + message = InstanceContent.parse_obj(fake_instance_content) + + hash = "decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca" + + mocker.patch.object(VmExecution, "is_running", new=True) + mocker.patch.object(VmExecution, "stop", new=mocker.AsyncMock(return_value=False)) + + execution = VmExecution( + vm_hash=hash, + message=message, + original=message, + persistent=False, + snapshot_manager=None, + systemd_manager=None, + ) + assert execution.times.started_at is None + + pool.executions = {hash: execution} + + executions_by_sender = pool.get_executions_by_sender(payment_type=PaymentType.superfluid) + assert len(executions_by_sender) == 1 + assert executions_by_sender == {"0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9": {Chain.BASE: [execution]}} + + await check_payment(pool=pool) + assert pool.executions == {hash: execution} + execution.stop.assert_not_called() + + +@pytest.mark.asyncio +async def test_enough_flow_not_community(mocker, fake_instance_content): + """Execution without community flow + + Cost 500 + Community 0 + CRN 500 + Both Flow are 500. + Should not stop + + """ + mocker.patch.object(settings, "ALLOW_VM_NETWORKING", False) + mocker.patch.object(settings, "PAYMENT_RECEIVER_ADDRESS", "0xD39C335404a78E0BDCf6D50F29B86EFd57924288") + mock_community_wallet_address = "0x23C7A99d7AbebeD245d044685F1893aeA4b5Da90" + mocker.patch("aleph.vm.orchestrator.tasks.get_community_wallet_address", return_value=mock_community_wallet_address) + mocker.patch("aleph.vm.orchestrator.tasks.is_after_community_wallet_start", return_value=False) + + loop = asyncio.get_event_loop() + pool = VmPool(loop=loop) + mocker.patch("aleph.vm.orchestrator.tasks.get_stream", return_value=500, autospec=True) + mocker.patch("aleph.vm.orchestrator.tasks.get_message_status", return_value=MessageStatus.PROCESSED) + + async def compute_required_flow(executions): + return 500 * len(executions) + + mocker.patch("aleph.vm.orchestrator.tasks.compute_required_flow", compute_required_flow) + message = InstanceContent.parse_obj(fake_instance_content) + + hash = "decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca" + + mocker.patch.object(VmExecution, "is_running", new=True) + mocker.patch.object(VmExecution, "stop", new=mocker.AsyncMock(return_value=False)) + + execution = VmExecution( + vm_hash=hash, + message=message, + original=message, + persistent=False, + snapshot_manager=None, + systemd_manager=None, + ) + assert execution.times.started_at is None + + pool.executions = {hash: execution} + + executions_by_sender = pool.get_executions_by_sender(payment_type=PaymentType.superfluid) + assert len(executions_by_sender) == 1 + assert executions_by_sender == {"0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9": {Chain.BASE: [execution]}} + + await check_payment(pool=pool) + assert pool.executions == {hash: execution} + execution.stop.assert_not_called() + + +@pytest.mark.asyncio +async def test_not_enough_flow(mocker, fake_instance_content): + mocker.patch.object(settings, "ALLOW_VM_NETWORKING", False) + mocker.patch.object(settings, "PAYMENT_RECEIVER_ADDRESS", "0xD39C335404a78E0BDCf6D50F29B86EFd57924288") + mocker.patch.object(settings, "IPFS_SERVER", "https://ipfs.io/ipfs") + mock_community_wallet_address = "0x23C7A99d7AbebeD245d044685F1893aeA4b5Da90" + mocker.patch("aleph.vm.orchestrator.tasks.get_community_wallet_address", return_value=mock_community_wallet_address) + + loop = asyncio.get_event_loop() + pool = VmPool(loop=loop) + mocker.patch("aleph.vm.orchestrator.tasks.get_stream", return_value=2, autospec=True) + mocker.patch("aleph.vm.orchestrator.tasks.get_message_status", return_value=MessageStatus.PROCESSED) + mocker.patch("aleph.vm.orchestrator.tasks.compute_required_flow", return_value=5) + message = InstanceContent.parse_obj(fake_instance_content) + + mocker.patch.object(VmExecution, "is_running", new=True) + mocker.patch.object(VmExecution, "stop", new=mocker.AsyncMock(return_value=False)) + hash = "decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca" + execution = VmExecution( + vm_hash=hash, + message=message, + original=message, + persistent=False, + snapshot_manager=None, + systemd_manager=None, + ) + + pool.executions = {hash: execution} + + executions_by_sender = pool.get_executions_by_sender(payment_type=PaymentType.superfluid) + assert len(executions_by_sender) == 1 + assert executions_by_sender == {"0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9": {Chain.BASE: [execution]}} + + await check_payment(pool=pool) + + execution.stop.assert_called_with() + + +@pytest.mark.asyncio +async def test_not_enough_community_flow(mocker, fake_instance_content): + mocker.patch.object(settings, "ALLOW_VM_NETWORKING", False) + mocker.patch.object(settings, "PAYMENT_RECEIVER_ADDRESS", "0xD39C335404a78E0BDCf6D50F29B86EFd57924288") + + loop = asyncio.get_event_loop() + pool = VmPool(loop=loop) + mock_community_wallet_address = "0x23C7A99d7AbebeD245d044685F1893aeA4b5Da90" + + async def get_stream(sender, receiver, chain): + if receiver == mock_community_wallet_address: + return 0 + elif receiver == settings.PAYMENT_RECEIVER_ADDRESS: + return 10 + + mocker.patch("aleph.vm.orchestrator.tasks.get_stream", new=get_stream) + mocker.patch("aleph.vm.orchestrator.tasks.get_community_wallet_address", return_value=mock_community_wallet_address) + mocker.patch("aleph.vm.orchestrator.tasks.get_message_status", return_value=MessageStatus.PROCESSED) + mocker.patch("aleph.vm.orchestrator.tasks.compute_required_flow", return_value=5) + message = InstanceContent.parse_obj(fake_instance_content) + + mocker.patch.object(VmExecution, "is_running", new=True) + mocker.patch.object(VmExecution, "stop", new=mocker.AsyncMock(return_value=False)) + hash = "decadecadecadecadecadecadecadecadecadecadecadecadecadecadecadeca" + execution = VmExecution( + vm_hash=hash, + message=message, + original=message, + persistent=False, + snapshot_manager=None, + systemd_manager=None, + ) + + pool.executions = {hash: execution} + + executions_by_sender = pool.get_executions_by_sender(payment_type=PaymentType.superfluid) + assert len(executions_by_sender) == 1 + assert executions_by_sender == {"0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9": {Chain.BASE: [execution]}} + + await check_payment(pool=pool) + + execution.stop.assert_called_with() diff --git a/tests/supervisor/test_execution.py b/tests/supervisor/test_execution.py new file mode 100644 index 000000000..812404441 --- /dev/null +++ b/tests/supervisor/test_execution.py @@ -0,0 +1,323 @@ +import asyncio +import json +import logging +from typing import Any + +import pytest +from aleph_message import parse_message +from aleph_message.models import ItemHash + +from aleph.vm.conf import Settings, settings +from aleph.vm.controllers.firecracker import AlephFirecrackerProgram +from aleph.vm.models import VmExecution +from aleph.vm.orchestrator import metrics +from aleph.vm.orchestrator.messages import load_updated_message +from aleph.vm.storage import get_executable_message +from aleph.vm.utils import fix_message_validation + + +@pytest.mark.asyncio +async def test_create_execution(mocker): + """ + Create a new VM execution and check that it starts properly. + """ + mock_settings = Settings() + mocker.patch("aleph.vm.conf.settings", new=mock_settings) + mocker.patch("aleph.vm.storage.settings", new=mock_settings) + mocker.patch("aleph.vm.controllers.firecracker.executable.settings", new=mock_settings) + mocker.patch("aleph.vm.controllers.firecracker.program.settings", new=mock_settings) + + if not mock_settings.FAKE_DATA_RUNTIME.exists(): + pytest.xfail("Test Runtime not setup. run `cd runtimes/aleph-debian-12-python && sudo ./create_disk_image.sh`") + + mock_settings.FAKE_DATA_PROGRAM = mock_settings.BENCHMARK_FAKE_DATA_PROGRAM + mock_settings.ALLOW_VM_NETWORKING = False + mock_settings.USE_JAILER = False + mock_settings.IPFS_SERVER = "https://ipfs.io/ipfs" + + logging.basicConfig(level=logging.DEBUG) + mock_settings.PRINT_SYSTEM_LOGS = True + + # Ensure that the settings are correct and required files present. + mock_settings.setup() + mock_settings.check() + + # The database is required for the metrics and is currently not optional. + engine = metrics.setup_engine() + await metrics.create_tables(engine) + + vm_hash = ItemHash("cafecafecafecafecafecafecafecafecafecafecafecafecafecafecafecafe") + message = await get_executable_message(ref=vm_hash) + + execution = VmExecution( + vm_hash=vm_hash, + message=message.content, + original=message.content, + snapshot_manager=None, + systemd_manager=None, + persistent=False, + ) + + # Downloading the resources required may take some time, limit it to 10 seconds + await asyncio.wait_for(execution.prepare(), timeout=30) + + vm = execution.create(vm_id=3, tap_interface=None) + + # Test that the VM is created correctly. It is not started yet. + assert isinstance(vm, AlephFirecrackerProgram) + assert vm.vm_id == 3 + + await execution.start() + await execution.stop() + + +# This test depends on having a vm-connector running on port 4021 +@pytest.mark.asyncio +async def test_create_execution_online(vm_hash: ItemHash = None): + """ + Create a new VM execution without building it locally and check that it starts properly. + """ + + vm_hash = vm_hash or settings.CHECK_FASTAPI_VM_ID + settings.IPFS_SERVER = "https://ipfs.io/ipfs" + + # Ensure that the settings are correct and required files present. + settings.setup() + settings.check() + + # The database is required for the metrics and is currently not optional. + engine = metrics.setup_engine() + await metrics.create_tables(engine) + + message, original_message = await load_updated_message(vm_hash) + + execution = VmExecution( + vm_hash=vm_hash, + message=message.content, + original=original_message.content, + snapshot_manager=None, + systemd_manager=None, + persistent=False, + ) + + # Downloading the resources required may take some time, limit it to 120 seconds + # since it is a bit slow in GitHub Actions + await asyncio.wait_for(execution.prepare(), timeout=120) + + vm = execution.create(vm_id=3, tap_interface=None) + + # Test that the VM is created correctly. It is not started yet. + assert isinstance(vm, AlephFirecrackerProgram) + vm.enable_console = True + vm.fvm.enable_log = True + assert vm.vm_id == 3 + + await execution.start() + await execution.stop() + + +@pytest.fixture() +def fake_message(): + fake = { + "sender": "0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9", + "chain": "ETH", + "signature": "0x12592841210ef84888315d12b9c39225b8ba6b958b067790540a7971a95e8d4e6ce81deeb8e1f05f6141d8d62218641be1aa9b335463cdc5a43354205d4c9e351c", + "type": "PROGRAM", + "item_type": "inline", + "item_hash": "63faf8b5db1cf8d965e6a464a0cb8062af8e7df131729e48738342d956f29ace", + "time": "2024-04-23T12:10:41.801703+00:00", + "channel": None, + "content": { + "address": "0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9", + "time": 1713874241.800818, + "allow_amend": False, + "metadata": None, + "authorized_keys": None, + "variables": None, + "environment": {"reproducible": False, "internet": True, "aleph_api": True, "shared_cache": False}, + "resources": {"vcpus": 1, "memory": 256, "seconds": 30, "published_ports": None}, + "payment": None, + "requirements": None, + "volumes": [ + { + "comment": "Persistence", + "mount": "/var/lib/example", + "parent": None, + "persistence": "host", + "name": "increment-storage", + "size_mib": 1, + }, + ], + "replaces": None, + "type": "vm-function", + "code": { + "encoding": "zip", + "entrypoint": "main:app", + "ref": "79f19811f8e843f37ff7535f634b89504da3d8f03e1f0af109d1791cf6add7af", + "interface": None, + "args": None, + "use_latest": True, + }, + "runtime": { + "ref": "63f07193e6ee9d207b7d1fcf8286f9aee34e6f12f101d2ec77c1229f92964696", + "use_latest": True, + "comment": "", + }, + "data": None, + "export": None, + "on": {"http": True, "message": None, "persistent": False}, + }, + "confirmed": True, + "confirmations": [ + { + "chain": "ETH", + "height": 19718321, + "hash": "0x4b8f9f232602ef8ca9bf0ba4fd907f1feef2bfc865a32b2c51fa40b72fa5ba49", + } + ], + } + + return fake + + +def drop_none_recursively(data: dict) -> dict: + """ + Recursively removes keys with None values from a dictionary. + + """ + if not isinstance(data, dict): + return data # Base case: if not a dictionary, return as-is. + + cleaned_dict: dict[Any, Any] = {} + + for key, value in data.items(): + if value is None: + continue # Skip keys with None values. + elif isinstance(value, dict): + # Recur for nested dictionaries. + nested_cleaned = drop_none_recursively(value) + if nested_cleaned: # Include only if not empty. + cleaned_dict[key] = nested_cleaned + elif isinstance(value, list): + # Recur for dictionaries within lists. + cleaned_list = [drop_none_recursively(item) if isinstance(item, dict) else item for item in value] + cleaned_dict[key] = [item for item in cleaned_list if item] + else: + cleaned_dict[key] = value # Keep other values. + + return cleaned_dict + + +@pytest.mark.asyncio +async def test_create_execution_from_fake_message(fake_message): + # Ensure that the settings are correct and required files present. + settings.setup() + settings.check() + + # The database is required for the metrics and is currently not optional. + engine = metrics.setup_engine() + await metrics.create_tables(engine) + + vm_hash = ItemHash("cafecafecafecafecafecafecafecafecafecafecafecafecafecafecafecafe") + + fake_message = drop_none_recursively(fake_message) + fix_message_validation(fake_message) + + parsed_fake_message = parse_message(message_dict=fake_message) + + message, original_message = parsed_fake_message, parsed_fake_message + + execution = VmExecution( + vm_hash=vm_hash, + message=message.content, + original=original_message.content, + snapshot_manager=None, + systemd_manager=None, + persistent=False, + ) + + # Downloading the resources required may take some time, limit it to 120 seconds + # since it is a bit slow in GitHub Actions + await asyncio.wait_for(execution.prepare(), timeout=120) + + vm = execution.create(vm_id=3, tap_interface=None) + + # Test that the VM is created correctly. It is not started yet. + assert isinstance(vm, AlephFirecrackerProgram) + vm.enable_console = True + vm.fvm.enable_log = True + assert vm.vm_id == 3 + + await execution.start() + await execution.stop() + + +@pytest.mark.asyncio +async def test_create_execution_volume_with_no_name(fake_message): + """Regression test for ALEPH-307: VM init fail if volume name is empty string""" + + vm_hash = ItemHash("cafecafecafecafecafecafecafecafecafecafecafecafecafecafecafecafe") + + # Ensure that the settings are correct and required files present. + settings.setup() + settings.check() + + # The database is required for the metrics and is currently not optional. + engine = metrics.setup_engine() + await metrics.create_tables(engine) + volume_with_no_name = { + "comment": "Persistence with no name", + "mount": "/var/lib/example", + "parent": None, + "persistence": "host", + "name": "", + "size_mib": 1, + } + volume_with_no_mount = { + "comment": "Persistence with no mount name", + "mount": "", + "parent": None, + "persistence": "host", + "name": "", + "size_mib": 1, + } + fake_message["content"]["volumes"] = [volume_with_no_name, volume_with_no_mount] + fake_message = drop_none_recursively(fake_message) + fix_message_validation(fake_message) + + parsed_fake_message = parse_message(message_dict=fake_message) + + message, original_message = parsed_fake_message, parsed_fake_message + + execution = VmExecution( + vm_hash=vm_hash, + message=message.content, + original=original_message.content, + snapshot_manager=None, + systemd_manager=None, + persistent=False, + ) + + # Downloading the resources required may take some time, limit it to 120 seconds + # since it is a bit slow in GitHub Actions + await asyncio.wait_for(execution.prepare(), timeout=120) + + vm = execution.create(vm_id=3, tap_interface=None) + + # Test that the VM is created correctly. It is not started yet. + assert isinstance(vm, AlephFirecrackerProgram) + vm.enable_console = True + vm.fvm.enable_log = True + assert vm.vm_id == 3 + + await execution.start() + await execution.stop() + + +# This test depends on having a vm-connector running on port 4021 +@pytest.mark.asyncio +async def test_create_execution_legacy(): + """ + Create a new VM execution based on the legacy FastAPI check and ensure that it starts properly. + """ + await test_create_execution_online(vm_hash=settings.LEGACY_CHECK_FASTAPI_VM_ID) diff --git a/tests/supervisor/test_instance.py b/tests/supervisor/test_instance.py new file mode 100644 index 000000000..69e0d0fa2 --- /dev/null +++ b/tests/supervisor/test_instance.py @@ -0,0 +1,127 @@ +import asyncio +import logging +from asyncio.subprocess import Process +from pathlib import Path + +import pytest +from aleph_message.models import ItemHash + +from aleph.vm.conf import settings +from aleph.vm.controllers.__main__ import configuration_from_file, execute_persistent_vm +from aleph.vm.controllers.firecracker import AlephFirecrackerInstance +from aleph.vm.hypervisors.firecracker.microvm import MicroVM +from aleph.vm.models import VmExecution +from aleph.vm.network.hostnetwork import Network, make_ipv6_allocator +from aleph.vm.orchestrator import metrics +from aleph.vm.storage import get_executable_message +from aleph.vm.systemd import SystemDManager +from aleph.vm.vm_type import VmType + + +@pytest.mark.asyncio +class MockSystemDManager(SystemDManager): + execution: MicroVM | None = None + process: Process | None = None + + async def enable_and_start(self, vm_hash: str): + config_path = Path(f"{settings.EXECUTION_ROOT}/{vm_hash}-controller.json") + config = configuration_from_file(config_path) + self.execution, self.process = await execute_persistent_vm(config) + return self.execution, self.process + + def is_service_enabled(self, service: str): + return self.process is not None + + def is_service_active(self, service: str): + return self.process is not None + + async def stop_and_disable(self, vm_hash: str): + if self.execution: + await self.execution.shutdown() + await self.execution.stop() + self.process = None + self.execution = None + return self.execution, self.process + + +@pytest.mark.asyncio +async def test_create_instance(): + """ + Create a fake instance locally and check that it start / init / stop properly. + """ + + settings.USE_FAKE_INSTANCE_BASE = True + settings.FAKE_DATA_PROGRAM = settings.BENCHMARK_FAKE_DATA_PROGRAM + # settings.FAKE_INSTANCE_MESSAGE + settings.ALLOW_VM_NETWORKING = True + settings.USE_JAILER = True + settings.IPFS_SERVER = "https://ipfs.io/ipfs" + + logging.basicConfig(level=logging.DEBUG) + settings.PRINT_SYSTEM_LOGS = True + + # Ensure that the settings are correct and required files present. + settings.setup() + settings.check() + if not settings.FAKE_INSTANCE_BASE.exists(): + pytest.xfail("Test Runtime not setup. run `cd runtimes/instance-rootfs && sudo ./create-debian-12-disk.sh`") + + # The database is required for the metrics and is currently not optional. + engine = metrics.setup_engine() + await metrics.create_tables(engine) + + vm_hash = ItemHash(settings.FAKE_INSTANCE_ID) + message = await get_executable_message(ref=vm_hash) + + mock_systemd_manager = MockSystemDManager() + + # Creating a Network to initialize the tap_interface that is needed for the creation of an instance + network = Network( + vm_ipv4_address_pool_range=settings.IPV4_ADDRESS_POOL, + vm_network_size=settings.IPV4_NETWORK_PREFIX_LENGTH, + external_interface=settings.NETWORK_INTERFACE, + ipv6_allocator=make_ipv6_allocator( + allocation_policy=settings.IPV6_ALLOCATION_POLICY, + address_pool=settings.IPV6_ADDRESS_POOL, + subnet_prefix=settings.IPV6_SUBNET_PREFIX, + ), + use_ndp_proxy=False, + ipv6_forwarding_enabled=False, + ) + + execution = VmExecution( + vm_hash=vm_hash, + message=message.content, + original=message.content, + snapshot_manager=None, + systemd_manager=None, + persistent=True, + ) + + # Downloading the resources required may take some time, limit it to 10 seconds + await asyncio.wait_for(execution.prepare(), timeout=30) + + vm_id = 3 + vm_type = VmType.from_message_content(message.content) + tap_interface = await network.prepare_tap(vm_id, vm_hash, vm_type) + await network.create_tap(vm_id, tap_interface) + + vm = execution.create(vm_id=vm_id, tap_interface=tap_interface) + + # Test that the VM is created correctly. It is not started yet. + assert isinstance(vm, AlephFirecrackerInstance) + assert vm.vm_id == vm_id + assert vm.persistent + assert vm.enable_networking + + await execution.start() + firecracker_execution, process = await mock_systemd_manager.enable_and_start(execution.vm_hash) + assert isinstance(firecracker_execution, MicroVM) + assert firecracker_execution.proc is not None + await execution.wait_for_init() + + # This sleep is to leave the instance to boot up and prevent disk corruption + await asyncio.sleep(60) + firecracker_execution, process = await mock_systemd_manager.stop_and_disable(execution.vm_hash) + await execution.stop() + assert firecracker_execution is None diff --git a/tests/supervisor/test_interfaces.py b/tests/supervisor/test_interfaces.py new file mode 100644 index 000000000..79868b85e --- /dev/null +++ b/tests/supervisor/test_interfaces.py @@ -0,0 +1,70 @@ +from ipaddress import IPv4Interface +from subprocess import run + +import pytest +from pyroute2 import IPRoute + +from aleph.vm.network.interfaces import ( + MissingInterfaceError, + add_ip_address, + create_tap_interface, + delete_tap_interface, + set_link_up, +) + + +def test_create_tap_interface(): + """Test the creation of a TAP interface and related error handling.""" + test_device_name = "test_tap" + try: + with IPRoute() as ipr: + create_tap_interface(ipr, test_device_name) + # Check that the interface was created + assert run(["ip", "link", "show", test_device_name], check=False).returncode == 0 + # Create the interface a second time, which should be ignored + create_tap_interface(ipr, test_device_name) + finally: + run(["ip", "tuntap", "del", test_device_name, "mode", "tap"], check=False) + + +def test_add_ip_address(): + """Test the addition of an IP address to an interface.""" + test_device_name = "test_tap" + test_ipv4 = IPv4Interface(("10.10.10.10", 24)) + try: + with IPRoute() as ipr: + # We need an interface to add an address to + create_tap_interface(ipr, test_device_name) + # Add an IP address to the interface + add_ip_address(ipr, test_device_name, test_ipv4) + # Check that the address was added + assert run(["ip", "address", "show", test_device_name], check=False).returncode == 0 + # Add the same address again, which should be ignored + add_ip_address(ipr, test_device_name, test_ipv4) + finally: + # Delete the interface, ignoring any errors + run(["ip", "tuntap", "del", test_device_name, "mode", "tap"], check=False) + + # Without an interface, the function should raise an error + with pytest.raises(MissingInterfaceError): + add_ip_address(IPRoute(), test_device_name, test_ipv4) + + +def test_link_up_down(): + """Test the addition of an IP address to an interface.""" + test_device_name = "test_tap" + try: + with IPRoute() as ipr: + # We need an interface to set the link up + create_tap_interface(ipr, test_device_name) + + set_link_up(ipr, test_device_name) + # Check that the interface is up + assert run(["ip", "link", "show", test_device_name], check=False).returncode == 0 + # Delete the interface + delete_tap_interface(ipr, test_device_name) + # Check that the interface is down + assert run(["ip", "link", "show", test_device_name], check=False).returncode != 0 + finally: + # Delete the interface, ignoring any errors + run(["ip", "tuntap", "del", test_device_name, "mode", "tap"], check=False) diff --git a/tests/supervisor/test_ipv6_allocator.py b/tests/supervisor/test_ipv6_allocator.py new file mode 100644 index 000000000..a3fdf11aa --- /dev/null +++ b/tests/supervisor/test_ipv6_allocator.py @@ -0,0 +1,21 @@ +import os + +from aleph.vm.network.hostnetwork import StaticIPv6Allocator +from aleph.vm.vm_type import VmType + +# Avoid failures linked to settings when initializing the global VmPool object +os.environ["ALEPH_VM_ALLOW_VM_NETWORKING"] = "False" + +from ipaddress import IPv6Network + +from aleph_message.models import ItemHash + + +def test_static_ipv6_allocator(): + allocator = StaticIPv6Allocator(ipv6_range=IPv6Network("1111:2222:3333:4444::/64"), subnet_prefix=124) + ip_subnet = allocator.allocate_vm_ipv6_subnet( + vm_id=3, + vm_hash=ItemHash("8920215b2e961a4d4c59a8ceb2803af53f91530ff53d6704273ab4d380bc6446"), + vm_type=VmType.microvm, + ) + assert ip_subnet == IPv6Network("1111:2222:3333:4444:0001:8920:215b:2e90/124") diff --git a/tests/supervisor/test_log.py b/tests/supervisor/test_log.py new file mode 100644 index 000000000..23f27aaaa --- /dev/null +++ b/tests/supervisor/test_log.py @@ -0,0 +1,15 @@ +from asyncio import QueueEmpty + +from aleph.vm.utils.logs import make_logs_queue + + +def test_make_logs_queue(): + stdout_identifier = "test_stdout" + stderr_identifier = "test_stderr" + queue, do_cancel = make_logs_queue(stdout_identifier, stderr_identifier) + import pytest + + with pytest.raises(QueueEmpty): + while queue.get_nowait(): + queue.task_done() + do_cancel() diff --git a/tests/supervisor/test_qemu_instance.py b/tests/supervisor/test_qemu_instance.py new file mode 100644 index 000000000..1e76d19a4 --- /dev/null +++ b/tests/supervisor/test_qemu_instance.py @@ -0,0 +1,180 @@ +import asyncio +import logging +from asyncio.subprocess import Process +from pathlib import Path + +import pytest +from aleph_message.models import ItemHash + +from aleph.vm.conf import settings +from aleph.vm.controllers.__main__ import configuration_from_file, execute_persistent_vm +from aleph.vm.controllers.qemu import AlephQemuInstance +from aleph.vm.hypervisors.qemu.qemuvm import QemuVM +from aleph.vm.models import VmExecution +from aleph.vm.network.hostnetwork import Network, make_ipv6_allocator +from aleph.vm.orchestrator import metrics +from aleph.vm.storage import get_executable_message +from aleph.vm.systemd import SystemDManager +from aleph.vm.vm_type import VmType + + +@pytest.mark.asyncio +class MockSystemDManager(SystemDManager): + execution: QemuVM | None = None + process: Process | None = None + + async def enable_and_start(self, vm_hash: str): + config_path = Path(f"{settings.EXECUTION_ROOT}/{vm_hash}-controller.json") + config = configuration_from_file(config_path) + self.execution, self.process = await execute_persistent_vm(config) + return self.execution, self.process + + def is_service_enabled(self, service: str): + return self.process is not None + + def is_service_active(self, service: str): + return self.process is not None + + async def stop_and_disable(self, vm_hash: str): + if self.process: + self.process.kill() + self.process = None + self.execution = None + return self.execution, self.process + + +@pytest.mark.asyncio +async def test_create_qemu_instance(): + """ + Create an instance and check that it start / init / stop properly. + """ + + settings.USE_FAKE_INSTANCE_BASE = True + settings.FAKE_INSTANCE_MESSAGE = settings.FAKE_INSTANCE_QEMU_MESSAGE + settings.FAKE_INSTANCE_BASE = settings.FAKE_QEMU_INSTANCE_BASE + settings.ENABLE_CONFIDENTIAL_COMPUTING = False + settings.ALLOW_VM_NETWORKING = False + settings.USE_JAILER = False + if not settings.FAKE_INSTANCE_BASE.exists(): + pytest.xfail("Test Runtime not setup. run `cd runtimes/instance-rootfs && sudo ./create-debian-12-disk.sh`") + + logging.basicConfig(level=logging.DEBUG) + + # Ensure that the settings are correct and required files present. + settings.setup() + settings.check() + + # The database is required for the metrics and is currently not optional. + engine = metrics.setup_engine() + await metrics.create_tables(engine) + + vm_hash = ItemHash(settings.FAKE_INSTANCE_ID) + message = await get_executable_message(ref=vm_hash) + + mock_systemd_manager = MockSystemDManager() + + execution = VmExecution( + vm_hash=vm_hash, + message=message.content, + original=message.content, + snapshot_manager=None, + systemd_manager=None, + persistent=True, + ) + + await asyncio.wait_for(execution.prepare(), timeout=60) + vm_id = 3 + + vm = execution.create(vm_id=vm_id, tap_interface=None) + + # Test that the VM is created correctly. It is not started yet. + assert isinstance(vm, AlephQemuInstance) + assert vm.vm_id == vm_id + + await execution.start() + qemu_execution, process = await mock_systemd_manager.enable_and_start(execution.vm_hash) + assert isinstance(qemu_execution, QemuVM) + assert qemu_execution.qemu_process is not None + qemu_execution, process = await mock_systemd_manager.stop_and_disable(execution.vm_hash) + await execution.stop() + assert qemu_execution is None + + +@pytest.mark.asyncio +async def test_create_qemu_instance_online(): + """ + Create an instance and check that it start / init / stop properly. + """ + + settings.USE_FAKE_INSTANCE_BASE = True + settings.FAKE_INSTANCE_MESSAGE = settings.FAKE_INSTANCE_QEMU_MESSAGE + settings.FAKE_INSTANCE_BASE = settings.FAKE_QEMU_INSTANCE_BASE + settings.ENABLE_CONFIDENTIAL_COMPUTING = False + settings.ALLOW_VM_NETWORKING = True + settings.USE_JAILER = False + settings.IPFS_SERVER = "https://ipfs.io/ipfs" + + logging.basicConfig(level=logging.DEBUG) + + # Ensure that the settings are correct and required files present. + settings.setup() + settings.check() + if not settings.FAKE_INSTANCE_BASE.exists(): + pytest.xfail("Test Runtime not setup. run `cd runtimes/instance-rootfs && sudo ./create-debian-12-disk.sh`") + + # The database is required for the metrics and is currently not optional. + engine = metrics.setup_engine() + await metrics.create_tables(engine) + + vm_hash = ItemHash(settings.FAKE_INSTANCE_ID) + message = await get_executable_message(ref=vm_hash) + + mock_systemd_manager = MockSystemDManager() + + network = ( + Network( + vm_ipv4_address_pool_range=settings.IPV4_ADDRESS_POOL, + vm_network_size=settings.IPV4_NETWORK_PREFIX_LENGTH, + external_interface=settings.NETWORK_INTERFACE, + ipv6_allocator=make_ipv6_allocator( + allocation_policy=settings.IPV6_ALLOCATION_POLICY, + address_pool=settings.IPV6_ADDRESS_POOL, + subnet_prefix=settings.IPV6_SUBNET_PREFIX, + ), + use_ndp_proxy=False, + ipv6_forwarding_enabled=False, + ) + if settings.ALLOW_VM_NETWORKING + else None + ) + + execution = VmExecution( + vm_hash=vm_hash, + message=message.content, + original=message.content, + snapshot_manager=None, + systemd_manager=None, + persistent=True, + ) + + await asyncio.wait_for(execution.prepare(), timeout=60) + vm_id = 3 + + vm_type = VmType.from_message_content(message.content) + tap_interface = await network.prepare_tap(vm_id, vm_hash, vm_type) + await network.create_tap(vm_id, tap_interface) + + vm = execution.create(vm_id=vm_id, tap_interface=tap_interface) + + # Test that the VM is created correctly. It is not started yet. + assert isinstance(vm, AlephQemuInstance) + assert vm.vm_id == vm_id + + await execution.start() + qemu_execution, process = await mock_systemd_manager.enable_and_start(execution.vm_hash) + assert isinstance(qemu_execution, QemuVM) + assert qemu_execution.qemu_process is not None + await execution.wait_for_init() + qemu_execution, process = await mock_systemd_manager.stop_and_disable(execution.vm_hash) + await execution.stop() + assert qemu_execution is None diff --git a/tests/supervisor/test_resolvectl_dns_servers.py b/tests/supervisor/test_resolvectl_dns_servers.py new file mode 100644 index 000000000..0af9b6fb8 --- /dev/null +++ b/tests/supervisor/test_resolvectl_dns_servers.py @@ -0,0 +1,30 @@ +# Avoid failures linked to nftables when initializing the global VmPool object +import os +from unittest import mock + +from aleph.vm.conf import resolvectl_dns_servers + +os.environ["ALEPH_VM_ALLOW_VM_NETWORKING"] = "False" + + +def test_resolvectl(): + with mock.patch( + "aleph.vm.conf.check_output", + return_value="Link 2 (eth0): 109.88.203.3 62.197.111.140\n", + ): + servers = {"109.88.203.3", "62.197.111.140"} + + dns_servers = set(resolvectl_dns_servers("eth0")) + assert dns_servers == servers + + +def test_resolvectl_ipv6(): + with mock.patch( + "aleph.vm.conf.check_output", + return_value="Link 2 (eth0): 109.88.203.3 62.197.111.140 2a02:2788:fff0:7::3\n 2a02:2788:fff0:5::140\n", + ): + ipv4_servers = {"109.88.203.3", "62.197.111.140"} + ipv6_servers = {"2a02:2788:fff0:7::3", "2a02:2788:fff0:5::140"} + + dns_servers = set(resolvectl_dns_servers("eth0")) + assert dns_servers == ipv4_servers | ipv6_servers diff --git a/tests/supervisor/test_resources.py b/tests/supervisor/test_resources.py new file mode 100644 index 000000000..fea79fe71 --- /dev/null +++ b/tests/supervisor/test_resources.py @@ -0,0 +1,38 @@ +from unittest import mock + +from aleph.vm.resources import get_gpu_devices + + +def mock_is_kernel_enabled_gpu(pci_host: str) -> bool: + value = True if pci_host == "01:00.0" else False + return value + + +def test_get_gpu_devices(): + class DevicesReturn: + stdout: str = ( + '00:1f.0 "ISA bridge [0601]" "Intel Corporation [8086]" "Device [7a06]" -r11 -p00 "ASUSTeK Computer Inc. [1043]" "Device [8882]"' + '\n00:1f.4 "SMBus [0c05]" "Intel Corporation [8086]" "Raptor Lake-S PCH SMBus Controller [7a23]" -r11 -p00 "ASUSTeK Computer Inc. [1043]" "Device [8882]"' + '\n00:1f.5 "Serial bus controller [0c80]" "Intel Corporation [8086]" "Raptor Lake SPI (flash) Controller [7a24]" -r11 -p00 "ASUSTeK Computer Inc. [1043]" "Device [8882]"' + '\n01:00.0 "VGA compatible controller [0300]" "NVIDIA Corporation [10de]" "AD104GL [RTX 4000 SFF Ada Generation] [27b0]" -ra1 -p00 "NVIDIA Corporation [10de]" "AD104GL [RTX 4000 SFF Ada Generation] [16fa]"' + '\n01:00.1 "Audio device [0403]" "NVIDIA Corporation [10de]" "Device [22bc]" -ra1 -p00 "NVIDIA Corporation [10de]" "Device [16fa]"' + '\n02:00.0 "Non-Volatile memory controller [0108]" "Samsung Electronics Co Ltd [144d]" "NVMe SSD Controller PM9A1/PM9A3/980PRO [a80a]" -p02 "Samsung Electronics Co Ltd [144d]" "NVMe SSD Controller PM9A1/PM9A3/980PRO [aa0a]"' + ) + + with mock.patch( + "subprocess.run", + return_value=DevicesReturn(), + ): + with mock.patch( + "aleph.vm.resources.is_kernel_enabled_gpu", + wraps=mock_is_kernel_enabled_gpu, + ): + expected_gpu_devices = get_gpu_devices() + + print(expected_gpu_devices) + + assert expected_gpu_devices[0].vendor == "NVIDIA" + assert expected_gpu_devices[0].device_name == "AD104GL [RTX 4000 SFF Ada Generation]" + assert expected_gpu_devices[0].device_class == "0300" + assert expected_gpu_devices[0].pci_host == "01:00.0" + assert expected_gpu_devices[0].device_id == "10de:27b0" diff --git a/tests/supervisor/test_status.py b/tests/supervisor/test_status.py new file mode 100644 index 000000000..0e0449dbf --- /dev/null +++ b/tests/supervisor/test_status.py @@ -0,0 +1,24 @@ +from unittest.mock import AsyncMock, MagicMock, Mock + +import pytest +from aleph_message.models import ItemHash + +from aleph.vm.orchestrator.status import check_internet + + +@pytest.mark.asyncio +async def test_check_internet_wrong_result_code(): + vm_id = ItemHash("cafecafecafecafecafecafecafecafecafecafecafecafecafecafecafecafe") + + mock_session = Mock() + mock_session.get = MagicMock() + + mock_session.get.return_value.__aenter__.return_value.json = AsyncMock( + return_value={"result": 200, "headers": {"Server": "nginx"}} + ) + assert await check_internet(mock_session, vm_id) is True + + mock_session.get.return_value.__aenter__.return_value.json = AsyncMock( + return_value={"result": 400, "headers": {"Server": "nginx"}} + ) + assert await check_internet(mock_session, vm_id) is False diff --git a/tests/supervisor/test_utils.py b/tests/supervisor/test_utils.py new file mode 100644 index 000000000..51eef026a --- /dev/null +++ b/tests/supervisor/test_utils.py @@ -0,0 +1,37 @@ +from unittest import mock + +from aleph.vm.utils import ( + check_amd_sev_es_supported, + check_amd_sev_snp_supported, + check_amd_sev_supported, + check_system_module, +) + + +def test_check_system_module_enabled(): + with mock.patch( + "pathlib.Path.exists", + return_value=True, + ): + expected_value = "Y" + with mock.patch( + "aleph.vm.utils.Path.open", + mock.mock_open(read_data=expected_value), + ): + output = check_system_module("kvm_amd/parameters/sev_enp") + assert output == expected_value + + assert check_amd_sev_supported() is True + assert check_amd_sev_es_supported() is True + assert check_amd_sev_snp_supported() is True + + with mock.patch( + "aleph.vm.utils.Path.open", + mock.mock_open(read_data="N"), + ): + output = check_system_module("kvm_amd/parameters/sev_enp") + assert output == "N" + + assert check_amd_sev_supported() is False + assert check_amd_sev_es_supported() is False + assert check_amd_sev_snp_supported() is False diff --git a/tests/supervisor/test_views.py b/tests/supervisor/test_views.py new file mode 100644 index 000000000..d94ce60f1 --- /dev/null +++ b/tests/supervisor/test_views.py @@ -0,0 +1,192 @@ +import tempfile +from pathlib import Path +from unittest import mock +from unittest.mock import call + +import pytest +from aiohttp import web + +from aleph.vm.conf import settings +from aleph.vm.orchestrator.supervisor import setup_webapp +from aleph.vm.sevclient import SevClient + + +@pytest.mark.asyncio +async def test_allocation_fails_on_invalid_item_hash(aiohttp_client): + """Test that the allocation endpoint fails when an invalid item_hash is provided.""" + app = setup_webapp() + client = await aiohttp_client(app) + settings.ALLOCATION_TOKEN_HASH = "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08" # = "test" + response: web.Response = await client.post( + "/control/allocations", json={"persistent_vms": ["not-an-ItemHash"]}, headers={"X-Auth-Signature": "test"} + ) + assert response.status == 400 + assert await response.json() == [ + { + "loc": [ + "persistent_vms", + 0, + ], + "msg": "Could not determine hash type: 'not-an-ItemHash'", + "type": "value_error.unknownhash", + }, + ] + + +@pytest.mark.asyncio +async def test_system_usage(aiohttp_client): + """Test that the usage system endpoints responds. No auth needed""" + + class FakeVmPool: + gpus = [] + + def get_available_gpus(self): + return [] + + app = setup_webapp() + app["vm_pool"] = FakeVmPool() + client = await aiohttp_client(app) + response: web.Response = await client.get("/about/usage/system") + assert response.status == 200 + # check if it is valid json + resp = await response.json() + assert "cpu" in resp + assert resp["cpu"]["count"] > 0 + + +@pytest.mark.asyncio +async def test_system_usage_mock(aiohttp_client, mocker): + """Test that the usage system endpoints response value. No auth needed""" + + class FakeVmPool: + gpus = [] + + def get_available_gpus(self): + return [] + + mocker.patch( + "cpuinfo.cpuinfo.get_cpu_info", + { + "arch_string_raw": "x86_64", + "vendor_id_raw": "AuthenticAMD", + }, + ) + mocker.patch( + "psutil.getloadavg", + lambda: [1, 2, 3], + ) + mocker.patch( + "psutil.cpu_count", + lambda: 200, + ) + + app = setup_webapp() + app["vm_pool"] = FakeVmPool() + client = await aiohttp_client(app) + response: web.Response = await client.get("/about/usage/system") + assert response.status == 200 + # check if it is valid json + resp = await response.json() + assert resp["properties"]["cpu"]["architecture"] == "x86_64" + assert resp["properties"]["cpu"]["vendor"] == "AuthenticAMD" + assert resp["cpu"]["load_average"] == {"load1": 1.0, "load15": 3.0, "load5": 2.0} + assert resp["cpu"]["count"] == 200 + + +@pytest.mark.asyncio +async def test_allocation_invalid_auth_token(aiohttp_client): + """Test that the allocation endpoint fails when an invalid auth token is provided.""" + settings.ALLOCATION_TOKEN_HASH = "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08" # = "test" + app = setup_webapp() + client = await aiohttp_client(app) + response = await client.post( + "/control/allocations", + json={"persistent_vms": []}, + headers={"X-Auth-Signature": "notTest"}, + ) + assert response.status == 401 + assert await response.text() == "Authentication token received is invalid" + + +@pytest.mark.asyncio +async def test_allocation_missing_auth_token(aiohttp_client): + """Test that the allocation endpoint fails when auth token is not provided.""" + app = setup_webapp() + client = await aiohttp_client(app) + response: web.Response = await client.post( + "/control/allocations", + json={"persistent_vms": []}, + ) + assert response.status == 401 + assert await response.text() == "Authentication token is missing" + + +@pytest.mark.asyncio +async def test_allocation_valid_token(aiohttp_client): + """Test that the allocation endpoint fails when an invalid auth is provided. + + This is a very simple test that don't start or stop any VM so the mock is minimal""" + + class FakeVmPool: + def get_persistent_executions(self): + return [] + + settings.ALLOCATION_TOKEN_HASH = "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08" # = "test" + app = setup_webapp() + app["vm_pool"] = FakeVmPool() + app["pubsub"] = FakeVmPool() + client = await aiohttp_client(app) + + response: web.Response = await client.post( + "/control/allocations", + json={"persistent_vms": []}, + headers={"X-Auth-Signature": "test"}, + ) + assert response.status == 200 + assert await response.json() == {"success": True, "successful": [], "failing": [], "errors": {}} + + +@pytest.mark.asyncio +async def test_about_certificates_missing_setting(aiohttp_client): + """Test that the certificates system endpoint returns an error if the setting isn't enabled""" + settings.ENABLE_CONFIDENTIAL_COMPUTING = False + + app = setup_webapp() + app["sev_client"] = SevClient(Path().resolve(), Path("/opt/sevctl").resolve()) + client = await aiohttp_client(app) + response: web.Response = await client.get("/about/certificates") + assert response.status == 400 + assert await response.text() == "400: Confidential computing setting not enabled on that server" + + +@pytest.mark.asyncio +async def test_about_certificates(aiohttp_client): + """Test that the certificates system endpoint responds. No auth needed""" + + settings.ENABLE_QEMU_SUPPORT = True + settings.ENABLE_CONFIDENTIAL_COMPUTING = True + settings.setup() + + with mock.patch( + "pathlib.Path.is_file", + return_value=False, + ) as is_file_mock: + with mock.patch( + "aleph.vm.sevclient.run_in_subprocess", + return_value=True, + ) as export_mock: + with tempfile.TemporaryDirectory() as tmp_dir: + app = setup_webapp() + sev_client = SevClient(Path(tmp_dir), Path("/opt/sevctl")) + app["sev_client"] = sev_client + # Create mock file to return it + Path(sev_client.certificates_archive).touch(exist_ok=True) + + client = await aiohttp_client(app) + response: web.Response = await client.get("/about/certificates") + assert response.status == 200 + is_file_mock.assert_has_calls([call()]) + certificates_expected_dir = sev_client.certificates_archive + export_mock.assert_called_once_with( + ["/opt/sevctl", "export", str(certificates_expected_dir)], check=True + ) diff --git a/tests/supervisor/views/test_operator.py b/tests/supervisor/views/test_operator.py new file mode 100644 index 000000000..51ad5323d --- /dev/null +++ b/tests/supervisor/views/test_operator.py @@ -0,0 +1,544 @@ +import asyncio +import datetime +import json +import tempfile +from asyncio import Queue +from unittest import mock +from unittest.mock import MagicMock + +import aiohttp +import pytest +from aiohttp.test_utils import TestClient +from aleph_message.models import ItemHash + +from aleph.vm.conf import settings +from aleph.vm.orchestrator.metrics import ExecutionRecord +from aleph.vm.orchestrator.supervisor import setup_webapp +from aleph.vm.storage import get_executable_message +from aleph.vm.utils.logs import EntryDict +from aleph.vm.utils.test_helpers import ( + generate_signer_and_signed_headers_for_operation, + patch_datetime_now, +) + +# Ensure this is not removed by ruff +assert patch_datetime_now + + +@pytest.mark.asyncio +async def test_operator_confidential_initialize_not_authorized(aiohttp_client): + """Test that the confidential initialize endpoint rejects if the sender is not the good one. Auth needed""" + + settings.ENABLE_QEMU_SUPPORT = True + settings.ENABLE_CONFIDENTIAL_COMPUTING = True + settings.setup() + + class FakeExecution: + message = None + is_running: bool = True + is_confidential: bool = False + + class FakeVmPool: + executions: dict[ItemHash, FakeExecution] = {} + + def __init__(self): + self.executions[settings.FAKE_INSTANCE_ID] = FakeExecution() + + with mock.patch( + "aleph.vm.orchestrator.views.authentication.authenticate_jwk", + return_value="", + ): + with mock.patch( + "aleph.vm.orchestrator.views.operator.is_sender_authorized", + return_value=False, + ) as is_sender_authorized_mock: + app = setup_webapp() + app["vm_pool"] = FakeVmPool() + client = await aiohttp_client(app) + response = await client.post( + f"/control/machine/{settings.FAKE_INSTANCE_ID}/confidential/initialize", + ) + assert response.status == 403 + assert await response.text() == "Unauthorized sender" + is_sender_authorized_mock.assert_called_once() + + +@pytest.mark.asyncio +async def test_operator_confidential_initialize_already_running(aiohttp_client, mocker): + """Test that the confidential initialize endpoint rejects if the VM is already running. Auth needed""" + + settings.ENABLE_QEMU_SUPPORT = True + settings.ENABLE_CONFIDENTIAL_COMPUTING = True + settings.setup() + + vm_hash = ItemHash(settings.FAKE_INSTANCE_ID) + instance_message = await get_executable_message(ref=vm_hash) + + fake_vm_pool = mocker.Mock( + executions={ + vm_hash: mocker.Mock( + vm_hash=vm_hash, + message=instance_message.content, + is_confidential=False, + is_running=True, + ), + }, + ) + + # Disable auth + mocker.patch( + "aleph.vm.orchestrator.views.authentication.authenticate_jwk", + return_value=instance_message.sender, + ) + app = setup_webapp() + app["vm_pool"] = fake_vm_pool + client: TestClient = await aiohttp_client(app) + response = await client.post( + f"/control/machine/{vm_hash}/confidential/initialize", + json={"persistent_vms": []}, + ) + assert response.status == 400 + assert response.content_type == "application/json" + assert await response.json() == { + "code": "vm_running", + "description": "Operation not allowed, instance already running", + } + + +@pytest.mark.asyncio +@pytest.mark.skip() +async def test_operator_expire(aiohttp_client, mocker): + """Test that the expires endpoint work. SPOILER it doesn't""" + + settings.ENABLE_QEMU_SUPPORT = True + settings.ENABLE_CONFIDENTIAL_COMPUTING = True + settings.setup() + + vm_hash = ItemHash(settings.FAKE_INSTANCE_ID) + instance_message = await get_executable_message(ref=vm_hash) + + fake_vm_pool = mocker.Mock( + executions={ + vm_hash: mocker.Mock( + vm_hash=vm_hash, + message=instance_message.content, + is_confidential=False, + is_running=False, + ), + }, + ) + + # Disable auth + mocker.patch( + "aleph.vm.orchestrator.views.authentication.authenticate_jwk", + return_value=instance_message.sender, + ) + app = setup_webapp() + app["vm_pool"] = fake_vm_pool + client: TestClient = await aiohttp_client(app) + response = await client.post( + f"/control/machine/{vm_hash}/expire", + data={"timeout": 1}, + # json={"timeout": 1}, + ) + assert response.status == 200, await response.text() + assert fake_vm_pool["executions"][vm_hash].expire.call_count == 1 + + +@pytest.mark.asyncio +async def test_operator_stop(aiohttp_client, mocker): + """Test that the stop endpoint call the method on pool""" + + settings.ENABLE_QEMU_SUPPORT = True + settings.ENABLE_CONFIDENTIAL_COMPUTING = True + settings.setup() + + vm_hash = ItemHash(settings.FAKE_INSTANCE_ID) + instance_message = await get_executable_message(ref=vm_hash) + + fake_vm_pool = mocker.AsyncMock( + executions={ + vm_hash: mocker.AsyncMock( + vm_hash=vm_hash, + message=instance_message.content, + is_running=True, + ), + }, + ) + + # Disable auth + mocker.patch( + "aleph.vm.orchestrator.views.authentication.authenticate_jwk", + return_value=instance_message.sender, + ) + app = setup_webapp() + app["vm_pool"] = fake_vm_pool + client: TestClient = await aiohttp_client(app) + response = await client.post( + f"/control/machine/{vm_hash}/stop", + ) + assert response.status == 200, await response.text() + assert fake_vm_pool.stop_vm.call_count == 1 + + +@pytest.mark.asyncio +async def test_operator_confidential_initialize_not_confidential(aiohttp_client, mocker): + """Test that the confidential initialize endpoint rejects if the VM is not confidential""" + + settings.ENABLE_QEMU_SUPPORT = True + settings.ENABLE_CONFIDENTIAL_COMPUTING = True + settings.setup() + + vm_hash = ItemHash(settings.FAKE_INSTANCE_ID) + instance_message = await get_executable_message(ref=vm_hash) + + fake_vm_pool = mocker.Mock( + executions={ + vm_hash: mocker.Mock( + vm_hash=vm_hash, + message=instance_message.content, + is_confidential=False, + is_running=False, + ), + }, + ) + + # Disable auth + mocker.patch( + "aleph.vm.orchestrator.views.authentication.authenticate_jwk", + return_value=instance_message.sender, + ) + app = setup_webapp() + app["vm_pool"] = fake_vm_pool + client: TestClient = await aiohttp_client(app) + response = await client.post( + f"/control/machine/{vm_hash}/confidential/initialize", + json={"persistent_vms": []}, + ) + assert response.status == 400 + assert response.content_type == "application/json" + assert await response.json() == { + "code": "not_confidential", + "description": "Instance is not a confidential instance", + } + + +@pytest.mark.asyncio +async def test_operator_confidential_initialize(aiohttp_client): + """Test that the certificates system endpoint responds. No auth needed""" + + settings.ENABLE_QEMU_SUPPORT = True + settings.ENABLE_CONFIDENTIAL_COMPUTING = True + settings.setup() + + vm_hash = ItemHash(settings.FAKE_INSTANCE_ID) + instance_message = await get_executable_message(ref=vm_hash) + + class FakeExecution: + message = instance_message.content + is_running: bool = False + is_confidential: bool = True + controller_service: str = "" + + class MockSystemDManager: + enable_and_start = MagicMock(return_value=True) + + class FakeVmPool: + executions: dict[ItemHash, FakeExecution] = {} + + def __init__(self): + self.executions[vm_hash] = FakeExecution() + self.systemd_manager = MockSystemDManager() + + with tempfile.NamedTemporaryFile() as temp_file: + form_data = aiohttp.FormData() + form_data.add_field("session", open(temp_file.name, "rb"), filename="session.b64") + form_data.add_field("godh", open(temp_file.name, "rb"), filename="godh.b64") + + with mock.patch( + "aleph.vm.orchestrator.views.authentication.authenticate_jwk", + return_value=instance_message.sender, + ): + app = setup_webapp() + app["vm_pool"] = FakeVmPool() + client = await aiohttp_client(app) + response = await client.post( + f"/control/machine/{vm_hash}/confidential/initialize", + data=form_data, + ) + assert response.status == 200 + assert await response.text() == f"Started VM with ref {vm_hash}" + app["vm_pool"].systemd_manager.enable_and_start.assert_called_once() + + +@pytest.mark.asyncio +async def test_reboot_ok(aiohttp_client, mocker): + mock_address = "mock_address" + mock_hash = "fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_" + mocker.patch( + "aleph.vm.orchestrator.views.authentication.authenticate_jwk", + return_value=mock_address, + ) + + class FakeVmPool: + executions = { + mock_hash: mocker.Mock( + vm_hash=mock_hash, + message=mocker.Mock(address=mock_address), + is_confidential=False, + is_running=True, + ), + } + systemd_manager = mocker.Mock(restart=mocker.Mock()) + + app = setup_webapp() + pool = FakeVmPool() + app["vm_pool"] = pool + app["pubsub"] = FakeVmPool() + client = await aiohttp_client(app) + response = await client.post( + f"/control/machine/{mock_hash}/reboot", + ) + assert response.status == 200 + assert ( + await response.text() == "Rebooted VM with ref fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_" + ) + assert pool.systemd_manager.restart.call_count == 1 + + +@pytest.mark.asyncio +async def test_websocket_logs_missing_auth(aiohttp_client, mocker): + mock_address = "mock_address" + mock_hash = "fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_" + + fake_queue: Queue[tuple[str, str]] = asyncio.Queue() + await fake_queue.put(("stdout", "this is a first log entry")) + + fakeVmPool = mocker.Mock( + executions={ + mock_hash: mocker.Mock( + vm_hash=mock_hash, + message=mocker.Mock(address=mock_address), + is_confidential=False, + is_running=True, + vm=mocker.Mock( + get_log_queue=mocker.Mock(return_value=fake_queue), + ), + ), + }, + ) + app = setup_webapp() + app["vm_pool"] = fakeVmPool + app["pubsub"] = None + client = await aiohttp_client(app) + websocket = await client.ws_connect( + f"/control/machine/{mock_hash}/stream_logs", + ) + # Wait for message without sending an auth package. + # Test with a timeout because we receive nothing + with pytest.raises((TimeoutError, asyncio.exceptions.TimeoutError)): + response = await websocket.receive_json(timeout=1) + assert False + + # It's totally reachable with the pytest.raises + # noinspection PyUnreachableCode + await websocket.close() + assert websocket.closed + + +@pytest.mark.asyncio +async def test_websocket_logs_invalid_auth(aiohttp_client, mocker): + mock_address = "mock_address" + mock_hash = "fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_" + + fake_queue: Queue[tuple[str, str]] = asyncio.Queue() + await fake_queue.put(("stdout", "this is a first log entry")) + + fakeVmPool = mocker.Mock( + executions={ + mock_hash: mocker.Mock( + vm_hash=mock_hash, + message=mocker.Mock(address=mock_address), + is_confidential=False, + is_running=True, + vm=mocker.Mock( + get_log_queue=mocker.Mock(return_value=fake_queue), + ), + ), + }, + ) + app = setup_webapp() + app["vm_pool"] = fakeVmPool + app["pubsub"] = None + client: TestClient = await aiohttp_client(app) + websocket = await client.ws_connect( + f"/control/machine/{mock_hash}/stream_logs", + ) + + await websocket.send_json({"auth": "invalid auth package"}) + response = await websocket.receive() + # Subject to change in the future, for now the connexion si broken and closed + assert response.type == aiohttp.WSMsgType.TEXT + assert ( + response.data == '{"status": "failed", "reason": "Invalid format for auth packet, see /doc/operator_auth.md"}' + ) + response = await websocket.receive() + assert response.type == aiohttp.WSMsgType.CLOSE + assert websocket.closed + + +@pytest.mark.asyncio +async def test_websocket_logs_good_auth(aiohttp_client, mocker, patch_datetime_now): + "Test valid authentification for websocket logs endpoint" + payload = {"time": "2010-12-25T17:05:55Z", "method": "GET", "path": "/", "domain": "localhost"} + signer_account, headers = await generate_signer_and_signed_headers_for_operation(patch_datetime_now, payload) + + mock_address = signer_account.address + mock_hash = "fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_" + + fake_queue: Queue[tuple[str, str]] = asyncio.Queue() + await fake_queue.put(("stdout", "this is a first log entry")) + + fakeVmPool = mocker.Mock( + executions={ + mock_hash: mocker.Mock( + vm_hash=mock_hash, + message=mocker.Mock(address=mock_address), + is_confidential=False, + is_running=True, + vm=mocker.Mock( + get_log_queue=mocker.Mock(return_value=fake_queue), + ), + ), + }, + ) + app = setup_webapp() + app["vm_pool"] = fakeVmPool + app["pubsub"] = None + client = await aiohttp_client(app) + websocket = await client.ws_connect( + f"/control/machine/{mock_hash}/stream_logs", + ) + # Need to deserialize since we pass a json otherwhise it get double json encoded + # which is not what the endpoint expect + auth_package = { + "X-SignedPubKey": json.loads(headers["X-SignedPubKey"]), + "X-SignedOperation": json.loads(headers["X-SignedOperation"]), + } + + await websocket.send_json({"auth": auth_package}) + response = await websocket.receive_json() + assert response == {"status": "connected"} + + response = await websocket.receive_json() + assert response == {"message": "this is a first log entry", "type": "stdout"} + + await websocket.close() + assert websocket.closed + + +@pytest.mark.asyncio +async def test_get_past_logs(aiohttp_client, mocker, patch_datetime_now): + mock_address = "0x40684b43B88356F62DCc56017547B6A7AC68780B" + mock_hash = "fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_" + mocker.patch( + "aleph.vm.orchestrator.views.authentication.authenticate_jwk", + return_value=mock_address, + ) + mocker.patch( + "aleph.vm.orchestrator.metrics.get_last_record_for_vm", + return_value=ExecutionRecord( + message="""{ + "address": "0x40684b43B88356F62DCc56017547B6A7AC68780B", + "time": 1720816744.639107, + "allow_amend": false, + "metadata": null, + "authorized_keys": null, + "variables": null, + "environment": { + "reproducible": false, + "internet": true, + "aleph_api": true, + "shared_cache": false + }, + "resources": { + "vcpus": 1, + "memory": 1024, + "seconds": 300, + "published_ports": null + }, + "payment": null, + "requirements": null, + "volumes": [ + { + "comment": null, + "mount": "/opt/packages", + "ref": "7338478721e2e966da6395dbfa37dab7b017b48da55b1be22d4eccf3487b836c", + "use_latest": true + } + ], + "replaces": null, + "type": "vm-function", + "code": { + "encoding": "squashfs", + "entrypoint": "main:app", + "ref": "c4253bf514d2e0a271456c9023c4b3f13f324e53c176e9ec29b98b5972b02bc7", + "interface": null, + "args": null, + "use_latest": true + }, + "runtime": { + "ref": "63f07193e6ee9d207b7d1fcf8286f9aee34e6f12f101d2ec77c1229f92964696", + "use_latest": true, + "comment": "" + }, + "data": null, + "export": null, + "on": { + "http": true, + "message": null, + "persistent": false + } +}""" + ), + ) + mocker.patch( + "aleph.vm.orchestrator.views.operator.get_past_vm_logs", + return_value=[ + EntryDict( + SYSLOG_IDENTIFIER=f"vm-{mock_hash}-stdout", + MESSAGE="logline1", + __REALTIME_TIMESTAMP=datetime.datetime(2020, 10, 12, 1, 2), + ), + EntryDict( + SYSLOG_IDENTIFIER=f"vm-{mock_hash}-stderr", + MESSAGE="logline2", + __REALTIME_TIMESTAMP=datetime.datetime(2020, 10, 12, 1, 3), + ), + ], + ) + + app = setup_webapp() + pool = mocker.MagicMock(executions={}) + app["vm_pool"] = pool + app["pubsub"] = mocker.MagicMock() + client = await aiohttp_client(app) + response = await client.get( + f"/control/machine/{mock_hash}/logs", + ) + + assert response.status == 200 + assert await response.json() == [ + { + "MESSAGE": "logline1", + "SYSLOG_IDENTIFIER": "vm-fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_-stdout", + "__REALTIME_TIMESTAMP": "2020-10-12 01:02:00", + "file": "stdout", + }, + { + "MESSAGE": "logline2", + "SYSLOG_IDENTIFIER": "vm-fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_fake_vm_-stderr", + "__REALTIME_TIMESTAMP": "2020-10-12 01:03:00", + "file": "stderr", + }, + ] diff --git a/tests/supervisor/views/test_run_code.py b/tests/supervisor/views/test_run_code.py new file mode 100644 index 000000000..0c42b5243 --- /dev/null +++ b/tests/supervisor/views/test_run_code.py @@ -0,0 +1,46 @@ +import pytest +from aiohttp import ClientResponseError, web +from aiohttp.test_utils import make_mocked_request +from aiohttp.web_exceptions import HTTPBadRequest +from aleph_message.exceptions import UnknownHashError +from aleph_message.models import ItemHash + +from aleph.vm.conf import settings +from aleph.vm.orchestrator.views import run_code_from_path + + +@pytest.mark.asyncio +async def test_run_code_from_invalid_path(aiohttp_client): + """ + Test that the run_code_from_path endpoint raises the right + error on invalid paths. + """ + item_hash = "invalid-item-hash" + with pytest.raises(UnknownHashError): + assert ItemHash(item_hash).is_storage(item_hash) + + app = web.Application() + + app.router.add_route("*", "/vm/{ref}{suffix:.*}", run_code_from_path) + client = await aiohttp_client(app) + + invalid_hash_request: web.Request = make_mocked_request( + "GET", + "/vm/" + item_hash, + match_info={ + "ref": item_hash, + "suffix": "/some/suffix", + }, + headers={"Host": settings.DOMAIN_NAME}, + app=app, + ) + with pytest.raises(HTTPBadRequest): + await run_code_from_path(invalid_hash_request) + + # Calling the view from an HTTP client should result in a Bad Request error. + resp = await client.get("/vm/" + item_hash + "/some/suffix") + assert resp.status == HTTPBadRequest.status_code + text = await resp.text() + assert text == f"Invalid message reference: {item_hash}" + with pytest.raises(ClientResponseError): + resp.raise_for_status() diff --git a/tutorials/ADVANCED.md b/tutorials/ADVANCED.md new file mode 100644 index 000000000..4eb5cf06e --- /dev/null +++ b/tutorials/ADVANCED.md @@ -0,0 +1,156 @@ +# Tutorial: Advanced usage of Aleph-VM + +## Aleph messages + +The [aleph-client](https://github.com/aleph-im/aleph-client) library is pre-installed and +pre-configured in the official Aleph-VM Python runtime. It is tweaked to work even +for programs with the access to internet disabled. + +### Get messages + +Use `aleph_client.asynchronous.get_messages` to get messages from the Aleph network. + +```python +from aleph_client.asynchronous import get_messages + +(...) +messages = await get_messages( + hashes=["f246f873c3e0f637a15c566e7a465d2ecbb83eaa024d54ccb8fb566b549a929e"] +) +``` + +## Post Aleph messages + +ℹ️ Messages posted by VMs may not be authorized by the Aleph network yet. + +Posting messages on the Aleph network requires signing them using a valid account. +Since programs on Aleph-VM are public, they should not contain secrets. Instead of signing messages +themselves, programs should therefore ask their execution host to sign messages on their behalf +using a `RemoteAccount`. The hash of the VM will be referenced in the message content `'address'` +field. + +```python +from aleph_client.chains.remote import RemoteAccount + +(...) + +account = await RemoteAccount.from_crypto_host( + host="http://localhost", unix_socket="/tmp/socat-socket") + +content = { + "date": datetime.utcnow().isoformat(), + "test": True, + "answer": 42, + "something": "interesting", +} +response = await create_post( + account=account, + post_content=content, + post_type="test", + ref=None, + channel="TEST", + inline=True, + storage_engine="storage", +) +``` + +## Shared cache + +The shared cache is a simple key-value store available to programs to store information that would +be useful to persist between executions but can be recovered from other sources. +The cache is specific to one program on one execution node. + +The persistence of the cache should not be relied on - its content can be deleted anytime when +the program is not running. Important data must be persisted on the Aleph network. + +To use the cache, you can use the following methods: +```python +from aleph_client.vm.cache import VmCache +cache = VmCache() + +async def f(): + await cache.set('key', 'value') + value = await cache.get('key') + await cache.delete('key') +``` + +## Volumes + +Volumes consist in extra storage that can be used by programs on Aleph-VM. If a `mount` point +is specified, they will be mounted on the virtual machine filesystem before your program is +started. + +### Immutable volumes + +Immutable volumes contain extra files that can be used by a program and are stored on the Aleph +network. They can be shared by multiple programs and updated independently of the code of the program. + +You can use them to store Python libraries that your program depends on, use them in multiple +programs and update them independently of other programs. + +#### 1. Create an immutable volume + +Create with a volume containing a Python library: + +```shell +mkdir extralib +cd extralib +mksquashfs extralib extra-lib.squashfs +``` + +Start an IPFS daemon: +```shell +ipfs daemon +``` + +Upload the volume to IPFS: +```shell +ipfs add extra-lib.squashfs +``` +and retrieve the printed IPFS hash. + +Pin the volume on Aleph using `aleph pin`: +```shell +aleph pin $IPFS_HASH --channel TEST +``` + +Mention the volume in the prompt of `aleph program (...)` + +#### 2. Update an immutable volume + +Follow the same procedure you used to create an immutable volume, but pin it with a +reference to the original using: + +```shell +aleph pin $IPFS_HASH --channel TEST --ref $ORIGINAL_HASH +``` + +### Host persistent volumes + +Host persistent volumes are empty volumes that your program can use to store information that +would be useful to persist between executions but can be recovered from other sources. +Like the cache, host persistent volumes are specific to one program on one execution node. + +Unlike the cache, you can use these volumes to store any kind of files, including databases. + +There is no guarantee that these volumes will not be deleted anytime when the +program is not running and important data must be persisted on the Aleph network. + +Host persistent volumes have a fixed size and must be named. The name will be used in the future +to allow changing the mount point of a volume. + + +## Message structure + +Full example +https://github.com/aleph-im/aleph-message/blob/main/aleph_message/tests/messages/machine.json + +## Custom domains + +You can make your own domain point to a VM. To achieve this, you need to create the following DNS +records: + +1. A `CNAME` record to the server, for example: +`hosted-on-aleph.net IN CNAME aleph.sh` +2. A `TXT` record to the VM hash with the prefix _aleph-id, for example: +`_aleph-id.hosted-on-aleph.org 60 IN TXT "b34f193470c349b1d9b60903a6d172e8c335710736d4999ff05971692febe8bc"` diff --git a/tutorials/README.md b/tutorials/README.md new file mode 100644 index 000000000..d96bc3413 --- /dev/null +++ b/tutorials/README.md @@ -0,0 +1,257 @@ +# Tutorial: Creating and hosting a program on Aleph-VM + +This is the tutorial for Creating and hosting a program on Aleph-VM, which has been developed and maintained by [Aleph.im](https://www.aleph.im). + +## 0. Welcome + +Hi, welcome to _Creating and hosting a program on Aleph-VM_. In this tutorial we will take you +through the fundamentals of running programs on the [Aleph.im](https://aleph.im/) Virtual Machines. +After the tutorial you should have a rough mental picture of how the virtual machines work and +some good pointers for getting further with running programs of your own. + +We expect you to know a little Python and have some experience with Python web frameworks such as +[FastAPI](https://fastapi.tiangolo.com/) or Flask. +The first chapters of the [FastAPI Tutorial](https://fastapi.tiangolo.com/tutorial/) should cover +enough to get started. + +## What we will cover + +First we will see how to run the first example from FastAPI's tutorial on Aleph.im, how to +access it and how to update it. + +Then we will extend the program to add some Aleph specific functionalities. + +## Requirements + +To complete this tutorial, you will use the `aleph` command from +[aleph-client](https://github.com/aleph-im/aleph-client), the `fastapi` framework to create a +simple API and the `uvicorn` server to test your program on your desktop before uploading it on +Aleph. + +First, you need a recent version of Python and [pip](https://pip.pypa.io/en/stable/), +preferably running on Debian 11 or Ubuntu Linux 22.04 since we have not tested other platforms yet, +but feel free to use the platform of your choice if you have the skills to adapt our instructions to it. + +Some cryptographic functionalities of Aleph use curve secp256k1 and require installing [libsecp256k1](https://github.com/bitcoin-core/secp256k1). +Archiving programs and volumes requires +[Squashfs user space tools](https://github.com/plougher/squashfs-tools). + +- Linux : +``` +sudo apt-get install -y python3-pip libsecp256k1-dev squashfs-tools +``` + +- macOs : +``` +brew tap cuber/homebrew-libsecp256k1 +brew install libsecp256k1 squashfs +``` + +You will also need [Uvicorn](https://www.uvicorn.org/) for local testing +and the [Python Aleph client](https://github.com/aleph-im/aleph-client) for it's command-line tools: + +- Linux/ macOs : + +``` +pip3 install "uvicorn[standard]" aleph-client fastapi eth_account +``` + +## 1. Understanding the VMs + +Aleph is a cross-blockchain layer-2 network specifically focused on decentralized applications and +their related infrastructure (storage, computing servers, security). + +Aleph-VM is the computing part of the network: It allows the execution of programs stored on the +Aleph network. These programs can interact with the network itself and with the rest of the internet. + +In the current stage, these programs can be triggered from outside HTTPS calls. Future ways to +trigger the launch of the programs are planned, such as reacting to specific messages on the +network. + +### Virtual Machines + +Programs on Aleph run within virtual machines: emulated computer systems with dedicated +resources that run isolated from each other. + +Aleph Virtual Machines (VMs) are based on Linux and +use [Firecracker](https://firecracker-microvm.github.io/) under the hood. They boot very fast, +so they can be launched on demand and there is no need to keep them running while waiting for new +requests. + +Each program runs on its own dedicated Linux system, with the host providing additional +functionalities related to Aleph. + +### Runtime + +The base of each VM is a Linux +[root filesystem](https://en.wikipedia.org/wiki/Root_directory) named __runtime__ and configured +to run programs on the Aleph platform. + +Aleph provides a supported runtime to launch programs written in Python or binaries. +* Python programs must support the [ASGI interface](https://asgi.readthedocs.io/en/latest/), described in the example below. +* Binaries must listen for HTTP requests on port 8080 + +The runtime currently supported by Aleph is +[aleph-debian-11-python](../runtimes/aleph-debian-11-python). + +### Volumes + +VMs can be extended by specifying additional volumes that will be mounted in the system. + +**Read-only volumes** are useful to separate Python virtual environments, Javascript _node_modules_ +or static data from the program itself. These volumes can be updated independently from the +program and the runtime, and maintained by a third party. + +**Ephemeral volumes** provide temporary disk storage to a VM during its execution without requiring +more memory. + +**Host persistent volumes** are persisted on the VM execution node, but may be garbage collected +by the node without warning. + +**Store persistent volumes** (not available yet) are persisted on the Aleph network. New VMs will try to use the latest +version of this volume, with no guarantee against conflicts. + +## 2. Writing a Python program + +To create the first program, open your favourite code editor and create a directory named +`my-program`, containing a file named `main.py`. + +``` +. +└── my-program/ + └── main.py +``` + +Then write the following code in the file: +```python +from fastapi import FastAPI + +app = FastAPI() + + +@app.get("/") +async def root(): + return {"message": "Hello World"} +``` + +That's it for your first program. + +This code comes from the [FastAPI tutorial](https://fastapi.tiangolo.com/tutorial/first-steps/). +Have a look at it for a better understanding of what it does and how it works. + +## 3. Testing locally + +Before uploading your program on Aleph, it is best to first test it locally. + +Aleph uses the standard [ASGI interface](https://asgi.readthedocs.io/en/latest/introduction.html) to +interface with programs written in Python. ASGI interfaces with many Python frameworks, including +FastAPI but also [Django](https://www.djangoproject.com/) +or [Quart](https://github.com/pgjones/quart). + +Test your program locally using uvicorn, an ASGI server: + +```shell +uvicorn main:app --reload +``` + +If you are on Mac OS test your program locally by starting to run +```shell +vagrant ssh +``` + +Then go to your working repository and launch: + +```shell +python3 -m uvicorn main:app --reload --host=0.0.0.0 +``` + +Then open http://127.0.0.1:8000 . The `--reload` option will automatically reload your app +when the code changes. + +> ℹ️ If you are running this on a different system than your desktop, specify the IP address of +> that system using `uvicorn main:app --reload --host 1.2.3.4`, where `1.2.3.4` is the IP address +> of the system. +> Then open your browser on http://1.2.3.4:8000 instead. + +> ℹ Installing uvicorn should add the `uvicorn` command to your shell. If it does not, use +> `python3 -m uvicorn` to run it. + +## 4. Uploading + +After installing [aleph-client](https://github.com/aleph-im/aleph-client), you should have access to the `aleph` command: + +```shell +aleph --help +``` + +Upload your program: + +```shell +aleph program upload ./my-program main:app +``` + +Press Enter at the following prompt to use the default runtime: +``` +Ref of runtime ? [bd79839bf96e595a06da5ac0b6ba51dea6f7e2591bb913deccded04d831d29f4] +``` + +Press Enter again to skip adding extra volumes to your program: +``` +Add volume ? [y/N] +``` + +You should then get a response similar to the following: +``` +Your program has been uploaded on Aleph . + +Available on: + https://aleph.sh/vm/1d3842fc4257c0fd4f9c7d5c55bba16264de8d44f47265a14f8f6eb4d542dda2 + https://du4ef7cck7ap2t44pvoflo5bmjsn5dke6rzglikpr5xljvkc3wra.aleph.sh +Visualise on: + https://explorer.aleph.im/address/ETH/0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9/message/PROGRAM/1d3842fc4257c0fd4f9c7d5c55bba16264de8d44f47265a14f8f6eb4d542dda2 +``` + +You may get the warning `Message failed to publish on IPFS and/or P2P`. +This is common and usually not an issue. + +> ℹ The second URL uses a hostname dedicated to your VM. Aleph identifiers are too long to work +> for URL subdomains, so a base32 encoded version of the identifier is used instead. + +> ℹ You can make your own domain point to the VM. See the [ADVANCED](./ADVANCED.md) section. + +## 5. Running + +You can now run your program by opening one of the URLs above. Each URL is unique for one program. + +https://aleph.sh/vm/1d3842fc4257c0fd4f9c7d5c55bba16264de8d44f47265a14f8f6eb4d542dda2 + +## 6. Uploading updates + +`"Hello World"` is a nice message, but wouldn't it be nice to have something more friendly, such +as `"Hello Friend"` ? Update the program with the message of your choice. + +You could upload the new version as a new program, but this would break the URL above and you +would have to give the updated URL to all your friends. While Aleph messages cannot be edited, +there is a solution to this issue: you can publish _amend_ messages that reference the original +message to add some changes to it. + +The `aleph update` command is similar to `aleph program`, except it requires the hash of the +program to update. + +```shell +aleph update $HASH ./my-program +``` + +Note that _amend_ messages must be sent from the same Aleph address as the original +program to work, else they will be ignored. + +| ℹ️ Backup your private key, else you may lose the ability to update a program + +## Next steps + +Check out the [Requirements](./REQUIREMENTS.md) page to add additional Python packages to your +program from the Python Package Index ([PyPI](https://www.pypi.org)). + +Check out the [Writing a non-Python program](./SERVER.md) page to run a program written in another language than Python. + +Check out the [Advanced usage](./ADVANCED.md) page for more options and capabilities. diff --git a/tutorials/REQUIREMENTS.md b/tutorials/REQUIREMENTS.md new file mode 100644 index 000000000..6a87fe359 --- /dev/null +++ b/tutorials/REQUIREMENTS.md @@ -0,0 +1,112 @@ +# Tutorial: Adding Python libraries to an Aleph VM + +## 0.a Setup your environment (Debian/Ubuntu Linux) +```shell +sudo apt install python3-pip python3-venv squashfs-tools +``` + +```shell +pip3 install aleph-client +``` + +## 0.b Quick install (macOS using Vagrant) + +For starting to run aleph-vm on mac you have to initialize a VM. + +### Install VirtualBox +You will need VirtualBox, a free and open-source hosted hypervisor (or virtual machine manager) for the next step. + +You can download and install it here . + +### Install Vagrant +Vagrant is an open-source software product for building and maintaining portable virtual software development environments based on VirtualBox. + +Run following command for installing it (before make sure [homebrew](brew.sh) is installed on your mac). + +```shell +brew install vagrant +``` + +Once Vagrant is installed, go to your working repository and initialize vagrant + +```shell +vagrant init boxomatic/debian-11 +``` + +A `Vagrantfile` (in Ruby) will be created, you can consult it if you wish. + +Now in order to instantiate a new virtual machine, run the following command: + +```shell +vagrant up +``` + +If this does not work, check out you System Preferences > Security and Privacy and allow the "System software from developer" in the bottom of the window. + +Once the command is down, your virtual machine will be booted and ready! + +### Set Vagrantfile configuration + +Open the vagrantfile and add following `config.vm.box` + +```shell +config.vm.network "forwarded_port", guest:8000, host:8000 +``` + +### 1. Install the packages in a directory + +```shell +pip install -t /opt/packages -r requirements.txt +``` + +```shell +mksquashfs /opt/packages packages.squashfs +``` + + +## 2. Upload the packages + +### 2.a. Without IPFS (small size) + +```shell +aleph upload packages.squashfs +``` + +### 2.b. With IPFS +```shell +/opt/go-ipfs/ipfs daemon +``` + +```shell +ipfs add packages.squashfs +``` +| added QmWWX6BaaRkRSr2iNdwH5e29ACPg2nCHHXTRTfuBmVm3Ga venv.squashfs + +```shell +aleph pin QmWWX6BaaRkRSr2iNdwH5e29ACPg2nCHHXTRTfuBmVm3Ga +``` + +## 3. Create your program + +```shell +aleph program upload ./my-program main:app +``` + +Press Enter at the following prompt to use the default runtime: +``` +Ref of runtime ? [bd79839bf96e595a06da5ac0b6ba51dea6f7e2591bb913deccded04d831d29f4] +``` + +Press `Y` to add extra volumes to your program: +``` +Add volume ? [y/N] Y +Description: Python Packages +Mount: /opt/packages +Ref: 61f43ab261060ff94838dc94313a70cdb939a5fc6c99924b96d55dcc2c108d03 +Use latest version ? [Y/n] +``` + +Finally, press Enter to skip adding more volumes. +```shell +Add volume ? [y/N] +``` diff --git a/tutorials/SERVER.md b/tutorials/SERVER.md new file mode 100644 index 000000000..2dcff54e5 --- /dev/null +++ b/tutorials/SERVER.md @@ -0,0 +1,96 @@ +# Tutorial: Creating a non-Python program on Aleph-VM + +> This tutorial follows up the first tutorial [Creating and hosting a program on Aleph-VM](./README.md). + +## 0. Welcome + +In this second tutorial, we will guide you on how to run programs written in any programming language on Aleph Virtual Machines. + +In addition to running Python programs using ASGI as covered in the first tutorial, +Aleph VMs also support any program that listens for HTTP requests on port 8080. + +This can be used to run existing programs on Aleph VMs, or to use other programming languages to write programs and run them on Aleph-VM. + +### What we will cover + +Since Python is the only language currently supported, this tutorial we will cover two other languages: [Rust](https://www.rust-lang.org/) and Javascript ([NodeJS](https://nodejs.org/)). + +## 1. Rust + +In this first section, you will run a program written in Rust on an Aleph VM. + +### 1.a. Requirements + +You need a Rust compiler. You can install one using the [official Install Rust guide](https://www.rust-lang.org/tools/install) +or via your favourite package manager. + + $ sudo apt install rustc cargo + +### 1.b. Writing a Rust program + +Let's use a very simple HTTP server inspired by the [Building a Single-Threaded Web Server](https://doc.rust-lang.org/book/ch20-01-single-threaded.html) +section of The Rust Programming Language Book: + +```shell +$ cargo new example_http_rust + Created binary (application) `example_http_rust` project +$ cd example_http_rust +``` + +Filename: `src/main.rs` +```rust +use std::io::prelude::*; +use std::net::TcpListener; +use std::net::TcpStream; + +fn main() { + + let listener = TcpListener::bind("0.0.0.0:8080").unwrap(); + println!("Running on 0.0.0.0:8080"); + for stream in listener.incoming() { + let stream = stream.unwrap(); + handle_connection(stream); + } +} + +fn handle_connection(mut stream: TcpStream) { + println!("handling connection"); + + const MSG: &str = "helloworld"; + let msg = MSG.as_bytes(); + + let response = format!("{:x?}", msg); + + let mut buffer = [0; 1024]; + + stream.read(&mut buffer).unwrap(); + + let response = format!("HTTP/1.1 200 OK\n\nOKIDOK\n{}", response); + + stream.write(response.as_bytes()).unwrap(); + stream.flush().unwrap(); +} +``` + +```shell +cargo run +``` + +Open http://127.0.0.1:8080 in your browser to test your new server. + +### 1.c. Publishing a Rust program + +Compile your program: +```shell +cargo build --release +``` + +Publish it on Aleph using the same procedure as with the Python example, except the entrypoint refers to the name of the binary to execute. + +```shell +aleph program upload ./target/release/example_http_rust example_http_rust +``` + +If your program takes some arguments, pass them in the entrypoint by using quotes: `"example_http_rust --help`. + +ℹ️ If you get the error `Invalid zip archive`, you are probably missing the Squashfs user tool `mksquashfs`. In that case, first create the squashfs archive and then upload it using `aleph program upload ./target/release/example_http_rust.squashfs example_http_rust` diff --git a/tutorials/TESTING.md b/tutorials/TESTING.md new file mode 100644 index 000000000..71086da78 --- /dev/null +++ b/tutorials/TESTING.md @@ -0,0 +1,65 @@ +# Testing your VMs locally + +You can test your VM locally without uploading each version on the Aleph network. + +To do this, you'll want to use the `--fake-data-program` or `-f` argument of the VM Supervisor. + +## 0. Build the required squashfs volumes + +Build or download the required squashfs volumes: + +```shell +cd ./runtimes/aleph-debian-11-python/ +sudo bash ./create_disk_image.sh + +cd ../.. +``` +> ℹ️ This does not work in a container since debootstrap requires mounting volumes. + +This will create a local runtime root filesystem in `./runtimes/aleph-debian-11-python/rootfs.squashfs`. + +```shell +cd ./examples/volumes/ +bash ./build_squashfs.sh + +cd ../.. +``` +This will create a local example read-only volume named `./example/volumes/volume-venv.squashfs`. + +## 1. In a Docker container + +Run the developer image, mounting the two generated volumes: +```shell +docker run -ti --rm \ + -v "$(pwd)/runtimes/aleph-debian-11-python/rootfs.squashfs:/opt/aleph-vm/runtimes/aleph-debian-11-python/rootfs.squashfs:ro" \ + -v "$(pwd)/examples/volumes/volume-venv.squashfs:/opt/aleph-vm/examples/volumes/volume-venv.squashfs:ro" \ + --device /dev/kvm \ + -p 4020:4020 \ + docker.io/alephim/vm-supervisor-dev +``` + +Or launch this command using: +```shell +bash ./docker/run_vm_supervisor.sh +``` + + +Within the container, run the supervisor with fake data: +```shell +python3 -m orchestrator --print-settings --very-verbose --system-logs --fake-data-program ./examples/example_fastapi +``` + +> ℹ️ The command is in your .bash_history, press key up to skip typing it. + +## 2. On your system + +### 2.a. Install the system requirements + +See [../vm_supervisor/README.md](../src/aleph/vm/orchestrator/README.md) to install the system requirements. + +### 2.b. Run the supervisor with fake data: + +```shell +python3 -m orchestrator --print-settings --very-verbose --system-logs --fake-data-program ./examples/example_fastapi +``` + diff --git a/vm_connector/README.md b/vm_connector/README.md index e67b4c0d7..40397d97b 100644 --- a/vm_connector/README.md +++ b/vm_connector/README.md @@ -2,7 +2,7 @@ Service to schedule the execution of Aleph VM functions for the [Aleph.im](https://aleph.im/) project and assist -[VM Supervisors](../vm_supervisor) with operations related +[VM Supervisors](../src/aleph/vm/orchestrator) with operations related to the Aleph network. ## 1. Supported platforms @@ -20,33 +20,24 @@ apt update apt install -y docker.io ``` -### 2.b. Build the Docker image +### 2.b. Pull the Docker image -Clone this reposotiry on the host machine and enter it: ```shell -git clone https://github.com/aleph-im/aleph-vm.git -cd aleph-vm/ -```` - -Build the image: -```shell -docker build -t aleph-connector -f docker/vm_connector.dockerfile . +docker pull alephim/vm-connector:alpha ``` ## 3. Running -### Run the Docker image +Run the Docker image ```shell -docker run -ti --rm -p 8000:8000/tcp aleph-connector +docker run -d -p 4021:4021/tcp --restart=always --name vm-connector alephim/vm-connector:alpha ``` -http://localhost:8000/ - ## 4. Configuration -The VM Supervisor can be configured using environment variables: +The VM Supervisor can be configured using environment variables: -`ALEPH_SERVER` should point to your Aleph Node. -Defaults to https://api2.aleph.im +`API_SERVER` should point to your Aleph Node. +Defaults to https://official.aleph.cloud `IPFS_SERVER` should point to your IPFS Gateway, defaults to https://ipfs.aleph.im/ipfs diff --git a/vm_connector/conf.py b/vm_connector/conf.py index 1ebf4cfea..d2ee465fc 100644 --- a/vm_connector/conf.py +++ b/vm_connector/conf.py @@ -1,26 +1,37 @@ -from os import getenv +import logging from typing import NewType +from pydantic import BaseSettings + +logger = logging.getLogger(__name__) + Url = NewType("Url", str) -class Settings: - ALEPH_SERVER: Url = getenv("ALEPH_API_SERVER", "https://api2.aleph.im") - IPFS_SERVER: Url = getenv("ALEPH_IPFS_SERVER", "https://ipfs.aleph.im/ipfs") - OFFLINE_TEST_MODE: bool = getenv("ALEPH_OFFLINE_TEST_MODE", "false") == "true" +class ConnectorSettings(BaseSettings): + API_SERVER: Url = Url("https://official.aleph.cloud") + IPFS_SERVER: Url = Url("https://ipfs.aleph.im/ipfs") + OFFLINE_TEST_MODE: bool = False def update(self, **kwargs): for key, value in kwargs.items(): + if key != key.upper(): + logger.warning(f"Setting {key} is not uppercase") if hasattr(self, key): setattr(self, key, value) else: raise ValueError(f"Unknown setting '{key}'") def display(self) -> str: - result = "" - for annotation, value in self.__annotations__.items(): - result += f"{annotation} ({value.__name__}) = {getattr(self, annotation)}" - return result + return "\n".join( + f"{annotation:<17} = {getattr(self, annotation)}" for annotation, value in self.__annotations__.items() + ) + + class Config: + env_prefix = "ALEPH_" + case_sensitive = False + env_file = ".env" + # Settings singleton -settings = Settings() +settings = ConnectorSettings() diff --git a/vm_connector/main.py b/vm_connector/main.py index 7743d817d..86494dd53 100644 --- a/vm_connector/main.py +++ b/vm_connector/main.py @@ -1,13 +1,15 @@ import json import logging -import os.path -from typing import Optional, Dict, Union +from typing import Optional -# from aleph_client.chains.common import get_fallback_private_key -# from aleph_client.asynchronous import get_posts import aiohttp -from fastapi import FastAPI -from fastapi.responses import StreamingResponse, Response, FileResponse +from aleph_client.asynchronous import create_post +from aleph_client.chains.common import get_fallback_private_key +from aleph_client.chains.ethereum import ETHAccount +from aleph_client.types import StorageEnum +from fastapi import FastAPI, HTTPException, Request +from fastapi.responses import Response, StreamingResponse +from pydantic import BaseModel from .conf import settings @@ -22,14 +24,21 @@ def read_root(): return {"Server": "Aleph.im VM Connector"} -class Encoding: - plain = "plain" - zip = "zip" +async def get_latest_message_amend(ref: str, sender: str) -> Optional[dict]: + async with aiohttp.ClientSession() as session: + url = f"{settings.API_SERVER}/api/v0/messages.json?msgType=STORE&sort_order=-1&refs={ref}&addresses={sender}" + resp = await session.get(url) + resp.raise_for_status() + resp_data = await resp.json() + if resp_data["messages"]: + return resp_data["messages"][0] + else: + return None -async def get_message(hash_: str) -> Optional[Dict]: +async def get_message(hash_: str) -> Optional[dict]: async with aiohttp.ClientSession() as session: - url = f"{settings.ALEPH_SERVER}/api/v0/messages.json?hashes={hash_}" + url = f"{settings.API_SERVER}/api/v0/messages.json?hashes={hash_}" resp = await session.get(url) resp.raise_for_status() resp_data = await resp.json() @@ -52,115 +61,144 @@ async def stream_url_chunks(url): @app.get("/download/message/{ref}") -async def download_message( - ref: str, last_amend: Optional[bool] = True -) -> Union[Dict, Response]: +async def download_message(ref: str) -> dict: """ Fetch on Aleph and return a VM function message, after checking its validity. Used by the VM Supervisor run the code. :param ref: item_hash of the code file - :param last_amend: should the last amend to the code be used :return: a file containing the code file """ - if settings.OFFLINE_TEST_MODE: - filepath = os.path.abspath("./tests/test_message.json") - with open(filepath) as fd: - return json.load(fd) - msg = await get_message(hash_=ref) - # TODO: Validate the validity of the message (signature, hashes) + # TODO: Validate the message (signature, hashes) + if not msg: + raise HTTPException(status_code=404, detail="Hash not found") - return msg or Response(status_code=404, content="Hash not found") + return msg @app.get("/download/code/{ref}") -async def download_code( - ref: str, last_amend: Optional[bool] = True -) -> Union[StreamingResponse, Response]: +async def download_code(ref: str): """ Fetch on Aleph and return a VM code file, after checking its validity. Used by the VM Supervisor to download function source code. :param ref: item_hash of the code file - :param last_amend: should the last amend to the code be used :return: a file containing the code file """ - - if settings.OFFLINE_TEST_MODE: - filepath = os.path.abspath("./examples/example_fastapi_2.zip") - return FileResponse(filepath, filename=f"{ref}") - - msg = await get_message(hash_=ref) - if not msg: - return Response(status_code=404, content="Hash not found") - - data_hash = msg["content"]["item_hash"] - url = f"{settings.IPFS_SERVER}/{data_hash}" - return StreamingResponse(stream_url_chunks(url), media_type="application/zip") + return await download_data(ref=ref) @app.get("/download/data/{ref}") -async def download_data( - ref: str, last_amend: Optional[bool] = True -) -> Union[StreamingResponse, Response]: +async def download_data(ref: str): """ Fetch on Aleph and return a VM data file, after checking its validity. Used by the VM Supervisor to download state data. :param ref: item_hash of the data - :param last_amend: should the last amend to the data be used :return: a file containing the data """ - if settings.OFFLINE_TEST_MODE: - filepath = os.path.abspath("./examples/data.tgz") - return FileResponse(filepath, filename=f"{ref}.tgz") - # Download message msg = await get_message(hash_=ref) if not msg: return Response(status_code=404, content="Hash not found") + media_type = msg["content"].get("mime_type", "application/octet-stream") + data_hash = msg["content"]["item_hash"] - url = f"{settings.IPFS_SERVER}/{data_hash}" - return StreamingResponse(stream_url_chunks(url), media_type="application/gzip") + if msg["content"]["item_type"] == "ipfs": + url = f"{settings.IPFS_SERVER}/{data_hash}" + else: + url = f"{settings.API_SERVER}/api/v0/storage/raw/{data_hash}" + + return StreamingResponse(stream_url_chunks(url), media_type=media_type) @app.get("/download/runtime/{ref}") -async def download_runtime( - ref: str, last_amend: Optional[bool] = True -) -> Union[StreamingResponse, Response]: +async def download_runtime(ref: str): """ Fetch on Aleph and return a VM runtime, after checking its validity. Used by the VM Supervisor to download a runtime. :param ref: item_hash of the runtime - :param last_amend: should the last amend to the runtime be used :return: a file containing the runtime """ + return await download_data(ref=ref) - if settings.OFFLINE_TEST_MODE: - filepath = os.path.abspath("./runtimes/aleph-alpine-3.13-python/rootfs.ext4") - return FileResponse(filepath, filename=f"{ref}.ext4") - # Download message - msg = await get_message(hash_=ref) +@app.get("/compute/latest_amend/{item_hash}") +async def compute_latest_amend(item_hash: str) -> str: + msg = await get_message(hash_=item_hash) if not msg: - return Response(status_code=404, content="Hash not found") + raise HTTPException(status_code=404, detail="Hash not found") + sender = msg["sender"] + latest_amend = await get_latest_message_amend(ref=item_hash, sender=sender) + if latest_amend: + # Validation + assert latest_amend["sender"] == sender + assert latest_amend["content"]["ref"] == item_hash - data_hash = msg["content"]["item_hash"] - url = f"{settings.IPFS_SERVER}/{data_hash}" - return StreamingResponse(stream_url_chunks(url), media_type="application/ext4") + return latest_amend["item_hash"] + else: + # Original message is the latest + return item_hash + + +class PostBody(BaseModel): + topic: str + data: str -@app.post("/publish/data/") -async def publish_data(encoding: str): +@app.post("/api/v0/ipfs/pubsub/pub") +@app.post("/api/v0/p2p/pubsub/pub") +async def publish_data(body: PostBody): """ - Publish a new state on the Aleph Network. - :param encoding: - :return: + Publish a new POST message on the Aleph Network. """ - raise NotImplementedError() + private_key = get_fallback_private_key() + account: ETHAccount = ETHAccount(private_key=private_key) + + message = json.loads(body.data) + content = json.loads(message["item_content"]) + content_content = content["content"] + + result = await create_post( + account=account, + post_content=content_content, + post_type=content["type"], + address=content["address"], + ref=None, + channel=message["channel"], + inline=True, + storage_engine=StorageEnum.storage, + ) + return {"status": "success"} + + +@app.get("/properties") +async def properties(request: Request): + """Get signing key properties""" + private_key = get_fallback_private_key() + account: ETHAccount = ETHAccount(private_key=private_key) + + return { + "chain": account.CHAIN, + "curve": account.CURVE, + "address": account.get_address(), + "public_key": account.get_public_key(), + } + + +@app.post("/sign") +async def sign_message(request: Request): + """Sign a message""" + # TODO: Check + private_key = get_fallback_private_key() + account: ETHAccount = ETHAccount(private_key=private_key) + + message = await request.json() + message = await account.sign_message(message) + return message diff --git a/vm_connector/tests/test_message.json b/vm_connector/tests/test_message.json index ca24dfd16..b3079f130 100644 --- a/vm_connector/tests/test_message.json +++ b/vm_connector/tests/test_message.json @@ -4,7 +4,7 @@ "content": { "code": { "encoding": "zip", - "entrypoint": "example_fastapi_2:app", + "entrypoint": "example_fastapi:app", "ref": "7eb2eca2378ea8855336ed76c8b26219f1cb90234d04441de9cf8cb1c649d003", "latest_amend": true }, diff --git a/vm_supervisor/__init__.py b/vm_supervisor/__init__.py deleted file mode 100644 index 8996cc760..000000000 --- a/vm_supervisor/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import supervisor diff --git a/vm_supervisor/__main__.py b/vm_supervisor/__main__.py deleted file mode 100644 index 4890d352e..000000000 --- a/vm_supervisor/__main__.py +++ /dev/null @@ -1,91 +0,0 @@ -import argparse -import logging -import sys - -from . import supervisor -from .conf import settings - -logger = logging.getLogger(__name__) - - -def parse_args(args): - parser = argparse.ArgumentParser( - prog="vm_supervisor", description="Aleph.im VM Supervisor" - ) - parser.add_argument( - "--system-logs", - action="store_true", - dest="system_logs", - default=settings.PRINT_SYSTEM_LOGS, - ) - parser.add_argument( - "--no-jailer", - action="store_false", - dest="use_jailer", - default=settings.USE_JAILER, - ) - parser.add_argument( - "--jailer", action="store_true", dest="use_jailer", default=settings.USE_JAILER - ) - parser.add_argument( - "--prealloc", - action="store", - type=int, - dest="prealloc_vm_count", - required=False, - default=settings.PREALLOC_VM_COUNT, - ) - parser.add_argument( - "-v", - "--verbose", - dest="loglevel", - help="set loglevel to INFO", - action="store_const", - const=logging.INFO, - default=logging.WARNING, - ) - parser.add_argument( - "-vv", - "--very-verbose", - dest="loglevel", - help="set loglevel to DEBUG", - action="store_const", - const=logging.DEBUG, - ) - parser.add_argument( - "-p", - "--print-config", - dest="print_config", - default=False, - ) - parser.add_argument( - "-n", - "--do-not-run", - dest="do_not_run", - default=False, - ) - return parser.parse_args(args) - - -def main(): - args = parse_args(sys.argv[1:]) - logging.basicConfig(level=args.loglevel) - settings.update( - USE_JAILER=args.use_jailer, - PRINT_SYSTEM_LOGS=args.system_logs, - PREALLOC_VM_COUNT=args.prealloc_vm_count, - ) - if args.print_settings: - print(settings.display()) - - settings.check() - - if args.do_not_run: - logger.info("Option --do-not-run, exiting") - else: - settings.setup() - supervisor.run() - - -if __name__ == "__main__": - main() diff --git a/vm_supervisor/conf.py b/vm_supervisor/conf.py deleted file mode 100644 index ab64be101..000000000 --- a/vm_supervisor/conf.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -from os import getenv -from os.path import isfile, join -from typing import NewType - -from .models import FilePath - -Url = NewType("Url", str) - - -class Settings: - VM_ID_START_INDEX: int = int(getenv("ALEPH_VM_START_ID_INDEX", 4)) - PREALLOC_VM_COUNT: int = int(getenv("ALEPH_PREALLOC_VM_COUNT", 0)) - API_SERVER: str = getenv("ALEPH_API_SERVER", "https://api2.aleph.im") - USE_JAILER: bool = getenv("ALEPH_USER_JAILER", "true") == "true" - # System logs make boot ~2x slower - PRINT_SYSTEM_LOGS: bool = getenv("ALEPH_PRINT_SYSTEM_LOGS", "false") == "true" - FIRECRACKER_PATH: str = getenv( - "ALEPH_FIRECRACKER_PATH", "/opt/firecracker/firecracker" - ) - JAILER_PATH: str = getenv("ALEPH_JAILER_PATH", "/opt/firecracker/jailer") - LINUX_PATH: str = getenv("ALEPH_LINUX_PATH", os.path.abspath("./kernels/vmlinux.bin")) - - CONNECTOR_URL: Url = getenv("ALEPH_CONNECTOR_URL", "http://localhost:8000") - - CACHE_ROOT: FilePath = getenv("ALEPH_CACHE_ROOT", "/tmp/aleph/vm_supervisor") - MESSAGE_CACHE: FilePath = getenv("ALEPH_MESSAGE_CACHE", join(CACHE_ROOT, "message")) - CODE_CACHE: FilePath = getenv("ALEPH_CODE_CACHE", join(CACHE_ROOT, "code")) - RUNTIME_CACHE: FilePath = getenv("ALEPH_RUNTIME_CACHE", join(CACHE_ROOT, "runtime")) - DATA_CACHE: FilePath = getenv("ALEPH_DATA_CACHE", join(CACHE_ROOT, "data")) - - def update(self, **kwargs): - for key, value in kwargs.items(): - if hasattr(self, key): - setattr(self, key, value) - else: - raise ValueError(f"Unknown setting '{key}'") - - def check(self): - assert isfile(self.FIRECRACKER_PATH) - assert isfile(self.JAILER_PATH) - assert isfile(self.LINUX_PATH) - assert self.CONNECTOR_URL.startswith( - "http://" - ) or self.CONNECTOR_URL.startswith("https://") - - def setup(self): - os.makedirs(self.MESSAGE_CACHE, exist_ok=True) - os.makedirs(self.CODE_CACHE, exist_ok=True) - os.makedirs(self.RUNTIME_CACHE, exist_ok=True) - os.makedirs(self.DATA_CACHE, exist_ok=True) - - def display(self) -> str: - result = "" - for annotation, value in self.__annotations__.items(): - result += f"{annotation} ({value.__name__}) = {getattr(self, annotation)}" - return result - - -# Settings singleton -settings = Settings() diff --git a/vm_supervisor/models.py b/vm_supervisor/models.py deleted file mode 100644 index 13972854e..000000000 --- a/vm_supervisor/models.py +++ /dev/null @@ -1,55 +0,0 @@ -from enum import Enum -from typing import NewType - -from pydantic import BaseModel - -FilePath = NewType("FilePath", str) - - -class Encoding(str, Enum): - plain = "plain" - zip = "zip" - - -class CodeContent(BaseModel): - encoding: Encoding - entrypoint: str - ref: str - latest_amend: bool = True - - -class FunctionTriggers(BaseModel): - http: bool - - -class FunctionEnvironment(BaseModel): - reproducible: bool = False - internet: bool = False - aleph_api: bool = False - - -class FunctionResources(BaseModel): - vcpus: int = 1 - memory: int = 128 - seconds: int = 1 - - -class FunctionRuntime(BaseModel): - ref: str - latest_amend: bool = True - comment: str - - -class FunctionContent(BaseModel): - code: CodeContent - on: FunctionTriggers - environment: FunctionEnvironment - resources: FunctionResources - runtime: FunctionRuntime - - -class FunctionMessage(BaseModel): - type: str - address: str - content: FunctionContent - time: float diff --git a/vm_supervisor/pool.py b/vm_supervisor/pool.py deleted file mode 100644 index 6642fc938..000000000 --- a/vm_supervisor/pool.py +++ /dev/null @@ -1,35 +0,0 @@ -import asyncio - -from firecracker.microvm import MicroVM -from vm_supervisor.conf import settings -from vm_supervisor.vm.firecracker_microvm import start_new_vm - - -class VmPool: - """Pool of VMs pre-allocated in order to decrease response time. - The counter is used by the VMs to set their tap interface name and the corresponding - IPv4 subnet. - """ - - queue: asyncio.Queue - counter: int # Used for network interfaces - - def __init__(self): - self.queue = asyncio.Queue() - self.counter = settings.VM_ID_START_INDEX - - async def provision(self, kernel_image_path, rootfs_path): - self.counter += 1 - vm = await start_new_vm( - vm_id=self.counter, - kernel_image_path=kernel_image_path, - rootfs_path=rootfs_path, - ) - await self.queue.put(vm) - return vm - - async def get_a_vm(self, kernel_image_path, rootfs_path) -> MicroVM: - loop = asyncio.get_event_loop() - loop.create_task(self.provision(kernel_image_path, rootfs_path)) - # Return the first VM from the pool - return await self.queue.get() diff --git a/vm_supervisor/storage.py b/vm_supervisor/storage.py deleted file mode 100644 index 5776eb5b8..000000000 --- a/vm_supervisor/storage.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -This module is in charge of providing the source code corresponding to a 'code id'. - -In this prototype, it returns a hardcoded example. -In the future, it should connect to an Aleph node and retrieve the code from there. -""" -import json -import logging -import os -from os.path import isfile, join - -import aiohttp - -from .conf import settings -from .models import FunctionMessage, FilePath - -logger = logging.getLogger(__name__) - - -async def download_file(url: str, local_path: FilePath) -> None: - if isfile(local_path): - logger.debug(f"File already exists: {local_path}") - else: - logger.debug(f"Downloading {url} -> {local_path}") - async with aiohttp.ClientSession() as session: - resp = await session.get(url) - resp.raise_for_status() - try: - with open(local_path, "wb") as cache_file: - while True: - chunk = await resp.content.read(65536) - if not chunk: - break - cache_file.write(chunk) - logger.debug("Download complete") - except Exception: - # Ensure no partial file is left - os.remove(local_path) - raise - - -async def get_message(ref) -> FunctionMessage: - cache_path = FilePath(join(settings.MESSAGE_CACHE, ref) + ".json") - url = f"{settings.CONNECTOR_URL}/download/message/{ref}" - - await download_file(url, cache_path) - - with open(cache_path, "r") as cache_file: - msg = json.load(cache_file) - # TODO: Define VM Function type instead of wrapping in 'content' key - msg_content = msg["content"] - return FunctionMessage(**msg_content) - - -async def get_code(ref) -> FilePath: - cache_path = FilePath(join(settings.CODE_CACHE, ref)) - url = f"{settings.CONNECTOR_URL}/download/code/{ref}" - await download_file(url, cache_path) - return cache_path - - -async def get_data(ref) -> FilePath: - cache_path = FilePath(join(settings.DATA_CACHE, ref)) - url = f"{settings.CONNECTOR_URL}/download/data/{ref}" - await download_file(url, cache_path) - return cache_path - - -async def get_runtime(ref) -> FilePath: - cache_path = FilePath(join(settings.RUNTIME_CACHE, ref)) - url = f"{settings.CONNECTOR_URL}/download/runtime/{ref}" - await download_file(url, cache_path) - return cache_path diff --git a/vm_supervisor/supervisor.py b/vm_supervisor/supervisor.py deleted file mode 100644 index fffb8ed62..000000000 --- a/vm_supervisor/supervisor.py +++ /dev/null @@ -1,110 +0,0 @@ -""" -The VM Supervisor is in charge of executing code, starting and stopping VMs and provides -and API to launch these operations. - -At it's core, it is currently an asynchronous HTTP server using aiohttp, but this may -evolve in the future. -""" - -import logging -import os.path -from os import system - -from aiohttp import web, ClientResponseError, ClientConnectorError -from aiohttp.web_exceptions import HTTPNotFound, HTTPBadRequest, HTTPServiceUnavailable - -from .conf import settings -from .models import FilePath -from .pool import VmPool -from .storage import get_code, get_runtime, get_message - -logger = logging.getLogger(__name__) -pool = VmPool() - - -async def index(request: web.Request): - assert request - return web.Response(text="Server: Aleph VM Supervisor") - - -async def run_code(request: web.Request): - """ - Execute the code corresponding to the 'code id' in the path. - """ - msg_ref: str = request.match_info["ref"] - - try: - msg = await get_message(msg_ref) - except ClientConnectorError: - raise HTTPServiceUnavailable(reason="Aleph Connector unavailable") - except ClientResponseError as error: - if error.status == 404: - raise HTTPNotFound(reason="Hash not found") - else: - raise - - code_ref: str = msg.content.code.ref - runtime_ref: str = msg.content.runtime.ref - # data_ref: str = msg.content['data']['ref'] - - try: - code_path: FilePath = await get_code(code_ref) - rootfs_path: FilePath = await get_runtime(runtime_ref) - # data_path: FilePath = await get_data(data_ref) - except ClientResponseError as error: - if error.status == 404: - raise HTTPBadRequest(reason="Code or runtime not found") - else: - raise - - logger.debug("Got files") - - kernel_image_path = settings.LINUX_PATH - - vm = await pool.get_a_vm( - kernel_image_path=kernel_image_path, rootfs_path=rootfs_path - ) - - path = request.match_info["suffix"] - if not path.startswith("/"): - path = "/" + path - - logger.debug(f"Using vm={vm.vm_id}") - scope = { - "type": "http", - "path": path, - "method": request.method, - "query_string": request.query_string, - "headers": request.raw_headers, - } - with open(code_path, "rb") as code_file: - result = await vm.run_code( - code_file.read(), - entrypoint=msg.content.code.entrypoint, - encoding=msg.content.code.encoding, - scope=scope, - ) - await vm.stop() - system(f"rm -fr {vm.jailer_path}") - # TODO: Handle other content-types - return web.Response(body=result, content_type="application/json") - - -app = web.Application() - -app.add_routes([web.get("/", index)]) -app.add_routes([web.route("*", "/vm/function/{ref}{suffix:.*}", run_code)]) - - -def run(): - """Run the VM Supervisor.""" - - # runtime = 'aleph-alpine-3.13-python' - kernel_image_path = os.path.abspath("./kernels/vmlinux.bin") - # rootfs_path = os.path.abspath(f"./runtimes/{runtime}/rootfs.ext4") - - for path in (settings.FIRECRACKER_PATH, settings.JAILER_PATH, kernel_image_path): - if not os.path.isfile(path): - raise FileNotFoundError(path) - - web.run_app(app) diff --git a/vm_supervisor/vm/__init__.py b/vm_supervisor/vm/__init__.py deleted file mode 100644 index 7b4b9ce0b..000000000 --- a/vm_supervisor/vm/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .firecracker_microvm import start_new_vm as start_new_firecracker_vm diff --git a/vm_supervisor/vm/firecracker_microvm.py b/vm_supervisor/vm/firecracker_microvm.py deleted file mode 100644 index 2c7450eac..000000000 --- a/vm_supervisor/vm/firecracker_microvm.py +++ /dev/null @@ -1,41 +0,0 @@ -import asyncio -import logging -from os.path import isfile - -from vm_supervisor.conf import settings -from firecracker.microvm import MicroVM, setfacl - -logger = logging.getLogger(__name__) - - -async def start_new_vm(vm_id: int, kernel_image_path: str, rootfs_path: str) -> MicroVM: - logger.info("Created VM= %s", vm_id) - - assert isfile(kernel_image_path) - assert isfile(rootfs_path) - - await setfacl() - vm = MicroVM( - vm_id, - firecracker_bin_path=settings.FIRECRACKER_PATH, - use_jailer=settings.USE_JAILER, - jailer_bin_path=settings.JAILER_PATH, - ) - vm.cleanup_jailer() - await vm.start() - await vm.socket_is_ready() - await vm.set_boot_source( - kernel_image_path, enable_console=settings.PRINT_SYSTEM_LOGS - ) - await vm.set_rootfs(rootfs_path) - await vm.set_vsock() - await vm.set_network() - - if settings.PRINT_SYSTEM_LOGS: - asyncio.get_running_loop().create_task(vm.print_logs()) - - await asyncio.gather( - vm.start_instance(), - vm.wait_for_init(), - ) - return vm