diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..d6a2b59 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,20 @@ +# Match Ruff style in .editorconfig format +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*.py] +indent_style = space +indent_size = 4 +tab_width = 4 +max_line_length = 100 + +[{*.sh,*.sh.j2}] +indent_style = space +indent_size = 4 +tab_width = 4 +max_line_length = 100 \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 99730a1..da145cc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,8 @@ permissions: packages: write env: - KERNEL_VERSION: 6.18.16 + DEFAULT_FLAVOR_ID: "trixie-full" + FORCE_COLOR: "1" jobs: # ------------------------------------------------------------------- @@ -22,10 +23,8 @@ jobs: - name: Checkout code uses: actions/checkout@v6 - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.12" + - name: Install uv + uses: astral-sh/setup-uv@v7 - name: Install lint tools run: make lint-install @@ -33,153 +32,18 @@ jobs: - name: Lint run: make lint - # ------------------------------------------------------------------- - # Fast cache lookup – decides whether the OCI runner is needed - # There are consistent issues with OCI runners not getting scheduled. - # This is the workaround. - # ------------------------------------------------------------------- - check-kernel-cache: - runs-on: ubuntu-latest - outputs: - amd64-cache-hit: ${{ steps.amd64-cache.outputs.cache-hit }} - arm64-cache-hit: ${{ steps.arm64-cache.outputs.cache-hit }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - - - name: Check amd64 kernel cache - id: amd64-cache - uses: actions/cache/restore@v4 - with: - path: | - mkosi.output/kernel/${{ env.KERNEL_VERSION }}/amd64 - key: kernel-amd64-${{ env.KERNEL_VERSION }}-${{ hashFiles('kernel.configs/*', 'Dockerfile') }} - lookup-only: true - - - name: Check arm64 kernel cache - id: arm64-cache - uses: actions/cache/restore@v4 - with: - path: | - mkosi.output/kernel/${{ env.KERNEL_VERSION }}/arm64 - key: kernel-arm64-${{ env.KERNEL_VERSION }}-${{ hashFiles('kernel.configs/*', 'Dockerfile') }} - lookup-only: true - - # ------------------------------------------------------------------- - # Build kernel (vmlinuz + modules) inside Docker - # ------------------------------------------------------------------- - build-kernel: - needs: [lint, check-kernel-cache] - # Forks / cache-hit → cheap default runner; mainline cache-miss → fast oracle runner - runs-on: > - ${{ github.repository != 'tinkerbell/captain' && matrix.fork_runner - || needs.check-kernel-cache.outputs[format('{0}-cache-hit', matrix.arch)] == 'true' && matrix.fork_runner - || fromJSON(matrix.mainline_runner) }} - strategy: - fail-fast: false - matrix: - arch: [amd64, arm64] - include: - - arch: amd64 - fork_runner: ubuntu-latest - mainline_runner: '{"group":"Default","labels":["oracle-vm-16cpu-64gb-x86-64"]}' - - arch: arm64 - fork_runner: ubuntu-24.04-arm - mainline_runner: '{"group":"Default","labels":["oracle-vm-16cpu-64gb-arm64"]}' - env: - ARCH: ${{ matrix.arch }} - KERNEL_MODE: docker - MKOSI_MODE: skip - steps: - - name: Checkout code - uses: actions/checkout@v6 - - - name: Load shared config - run: cat .github/config.env >> "$GITHUB_ENV" - - - name: Log in to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Compute Dockerfile hash - id: dockerfile-hash - run: echo "hash=$(sha256sum Dockerfile | awk '{print $1}')" >> "$GITHUB_OUTPUT" - - - name: Pull or build builder image - id: builder - run: | - HASH="${{ steps.dockerfile-hash.outputs.hash }}" - REMOTE="ghcr.io/${{ github.repository }}/${{ env.BUILDER_IMAGE }}" - if docker pull "${REMOTE}:${HASH}-${{ matrix.arch }}"; then - docker tag "${REMOTE}:${HASH}-${{ matrix.arch }}" "${{ env.BUILDER_IMAGE }}:${HASH}" - docker tag "${REMOTE}:${HASH}-${{ matrix.arch }}" "${{ env.BUILDER_IMAGE }}" - echo "built=false" >> "$GITHUB_OUTPUT" - else - docker build -t "${{ env.BUILDER_IMAGE }}:${HASH}" -t "${{ env.BUILDER_IMAGE }}" . - echo "built=true" >> "$GITHUB_OUTPUT" - fi - - - name: Push builder image to GHCR - if: github.ref == 'refs/heads/main' && steps.builder.outputs.built == 'true' - run: | - HASH="${{ steps.dockerfile-hash.outputs.hash }}" - REMOTE="ghcr.io/${{ github.repository }}/${{ env.BUILDER_IMAGE }}" - docker tag "${{ env.BUILDER_IMAGE }}:${HASH}" "${REMOTE}:${HASH}-${{ matrix.arch }}" - docker push "${REMOTE}:${HASH}-${{ matrix.arch }}" - - - name: Compute kernel cache key - id: kernel-cache-key - run: echo "key=kernel-${{ matrix.arch }}-${{ env.KERNEL_VERSION }}-${{ hashFiles('kernel.configs/*', 'Dockerfile') }}" >> "$GITHUB_OUTPUT" - - - name: Restore kernel cache - id: kernel-cache - uses: actions/cache/restore@v4 - with: - path: | - mkosi.output/kernel/${{ env.KERNEL_VERSION }}/${{ matrix.arch }} - key: ${{ steps.kernel-cache-key.outputs.key }} - - - name: Install Python dependencies - run: pip install -r requirements.txt - - - name: Build kernel - run: ./build.py kernel - - - name: Fix output file ownership - run: sudo chown -R "$(id -u):$(id -g)" mkosi.output/ - - - name: Save kernel cache - if: github.ref == 'refs/heads/main' && steps.kernel-cache.outputs.cache-hit != 'true' - uses: actions/cache/save@v4 - with: - path: | - mkosi.output/kernel/${{ env.KERNEL_VERSION }}/${{ matrix.arch }} - key: ${{ steps.kernel-cache-key.outputs.key }} - - - name: Upload kernel artifacts - uses: actions/upload-artifact@v4 - with: - name: kernel-${{ matrix.arch }} - path: | - mkosi.output/kernel/${{ env.KERNEL_VERSION }}/${{ matrix.arch }} - retention-days: 1 - # ------------------------------------------------------------------- # Download tools (containerd, runc, nerdctl, CNI plugins) # ------------------------------------------------------------------- download-tools: - needs: [lint] + needs: [ lint ] runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} strategy: fail-fast: false matrix: - arch: [amd64, arm64] + arch: [ amd64, arm64 ] env: ARCH: ${{ matrix.arch }} - KERNEL_MODE: skip MKOSI_MODE: skip TOOLS_MODE: native steps: @@ -188,22 +52,24 @@ jobs: - name: Restore tools cache id: tools-cache - uses: actions/cache/restore@v4 + uses: actions/cache/restore@v5 with: path: | mkosi.output/tools/${{ matrix.arch }}/usr/local/bin mkosi.output/tools/${{ matrix.arch }}/opt/cni key: tools-${{ matrix.arch }}-${{ hashFiles('captain/tools.py') }} - - name: Install Python dependencies - run: pip install -r requirements.txt + - name: Install uv + uses: astral-sh/setup-uv@v7 - name: Download tools - run: ./build.py tools + env: + TOOLS_MODE: native + run: uv run captain tools - name: Save tools cache if: github.ref == 'refs/heads/main' && steps.tools-cache.outputs.cache-hit != 'true' - uses: actions/cache/save@v4 + uses: actions/cache/save@v5 with: path: | mkosi.output/tools/${{ matrix.arch }}/usr/local/bin @@ -211,7 +77,7 @@ jobs: key: tools-${{ matrix.arch }}-${{ hashFiles('captain/tools.py') }} - name: Upload tools artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: tools-${{ matrix.arch }} path: | @@ -220,218 +86,124 @@ jobs: retention-days: 1 # ------------------------------------------------------------------- - # Build initramfs via mkosi (depends on kernel + tools) + # Build Docker builder image, per-arch; pushes to ghcr.io. # ------------------------------------------------------------------- - build-initramfs: + build-dockerfile: + needs: [ lint ] runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} - needs: [build-kernel, download-tools] strategy: fail-fast: false - matrix: - arch: [amd64, arm64] + matrix: { arch: [ amd64, arm64 ] } env: ARCH: ${{ matrix.arch }} - KERNEL_MODE: skip - MKOSI_MODE: native steps: - name: Checkout code uses: actions/checkout@v6 - - name: Download kernel artifacts - uses: actions/download-artifact@v4 - with: - name: kernel-${{ matrix.arch }} - path: mkosi.output/kernel/${{ env.KERNEL_VERSION }}/${{ matrix.arch }} + - name: Install uv + uses: astral-sh/setup-uv@v7 - - name: Download tools artifacts - uses: actions/download-artifact@v4 + - name: Log in to GHCR + uses: docker/login-action@v4 with: - name: tools-${{ matrix.arch }} - path: mkosi.output/tools/${{ matrix.arch }} - - - name: Restore tool binary permissions - run: | - # GitHub Actions artifact upload/download strips execute permissions. - # Restore +x on all tool binaries so they work inside the initramfs. - chmod +x mkosi.output/tools/${{ matrix.arch }}/usr/local/bin/* - chmod +x mkosi.output/tools/${{ matrix.arch }}/opt/cni/bin/* - - - name: Refresh apt cache - run: sudo apt-get update - - - name: setup-mkosi - uses: systemd/mkosi@v26 - - - name: Install bubblewrap - run: | - sudo apt-get update - sudo apt-get install -y bubblewrap - - - name: Install Python dependencies - run: pip install -r requirements.txt + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - - name: Build initramfs - run: ./build.py initramfs - - - name: Upload initramfs artifacts - uses: actions/upload-artifact@v4 - with: - name: initramfs-${{ matrix.arch }} - path: out/ - retention-days: 1 + - name: Build Dockerfile and push + run: uv run captain builder --push # ------------------------------------------------------------------- - # Build UEFI-bootable ISO (depends on initramfs) + # Build initramfs via mkosi (depends on tools) # ------------------------------------------------------------------- - build-iso: + build-all: runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} - needs: [build-initramfs] + needs: [ download-tools, build-dockerfile ] strategy: fail-fast: false matrix: - arch: [amd64, arm64] include: - - arch: amd64 - output_arch: x86_64 - - arch: arm64 - output_arch: aarch64 + - { arch: amd64, output_arch: x86_64, iso: true, FLAVOR_ID: "trixie-full" } + - { arch: arm64, output_arch: aarch64, iso: true, FLAVOR_ID: "trixie-full" } + - { arch: arm64, output_arch: aarch64, iso: false, FLAVOR_ID: "trixie-rockchip64" } + - { arch: arm64, output_arch: aarch64, iso: false, FLAVOR_ID: "trixie-meson64" } env: ARCH: ${{ matrix.arch }} - KERNEL_MODE: skip - MKOSI_MODE: skip + MKOSI_MODE: docker ISO_MODE: docker + FLAVOR_ID: ${{ matrix.FLAVOR_ID }} steps: - name: Checkout code uses: actions/checkout@v6 - - name: Load shared config - run: cat .github/config.env >> "$GITHUB_ENV" - - - name: Log in to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Compute Dockerfile hash - id: dockerfile-hash - run: echo "hash=$(sha256sum Dockerfile | awk '{print $1}')" >> "$GITHUB_OUTPUT" - - - name: Pull or build builder image - id: builder - run: | - HASH="${{ steps.dockerfile-hash.outputs.hash }}" - REMOTE="ghcr.io/${{ github.repository }}/${{ env.BUILDER_IMAGE }}" - if docker pull "${REMOTE}:${HASH}-${{ matrix.arch }}"; then - docker tag "${REMOTE}:${HASH}-${{ matrix.arch }}" "${{ env.BUILDER_IMAGE }}:${HASH}" - docker tag "${REMOTE}:${HASH}-${{ matrix.arch }}" "${{ env.BUILDER_IMAGE }}" - echo "built=false" >> "$GITHUB_OUTPUT" - else - docker build -t "${{ env.BUILDER_IMAGE }}:${HASH}" -t "${{ env.BUILDER_IMAGE }}" . - echo "built=true" >> "$GITHUB_OUTPUT" - fi - - - name: Push builder image to GHCR - if: github.ref == 'refs/heads/main' && steps.builder.outputs.built == 'true' - run: | - HASH="${{ steps.dockerfile-hash.outputs.hash }}" - REMOTE="ghcr.io/${{ github.repository }}/${{ env.BUILDER_IMAGE }}" - docker tag "${{ env.BUILDER_IMAGE }}:${HASH}" "${REMOTE}:${HASH}-${{ matrix.arch }}" - docker push "${REMOTE}:${HASH}-${{ matrix.arch }}" - - - name: Download kernel artifacts - uses: actions/download-artifact@v4 - with: - name: kernel-${{ matrix.arch }} - path: mkosi.output/kernel/${{ env.KERNEL_VERSION }}/${{ matrix.arch }} - - - name: Download initramfs artifacts - uses: actions/download-artifact@v4 + - name: Download tools artifacts + uses: actions/download-artifact@v8 with: - name: initramfs-${{ matrix.arch }} - path: out + name: tools-${{ matrix.arch }} + path: mkosi.output/tools/${{ matrix.arch }} - - name: Stage initramfs for ISO build + - name: Restore tool binary permissions run: | - mkdir -p "mkosi.output/initramfs/${KERNEL_VERSION}/${{ matrix.arch }}" - cp "out/initramfs-${KERNEL_VERSION}-${{ matrix.output_arch }}" \ - "mkosi.output/initramfs/${KERNEL_VERSION}/${{ matrix.arch }}/image.cpio.zst" + # GitHub Actions artifact upload/download strips execute permissions. + # Restore +x on all tool binaries so they work inside the initramfs. + chmod +x mkosi.output/tools/${{ matrix.arch }}/usr/local/bin/* + chmod +x mkosi.output/tools/${{ matrix.arch }}/opt/cni/bin/* - - name: Install Python dependencies - run: pip install -r requirements.txt + - name: Install uv + uses: astral-sh/setup-uv@v7 - - name: Build ISO - run: ./build.py iso + - name: Build initramfs + run: uv run captain build # full build, incl initramfs and iso when appropriate - - name: Upload ISO artifact - uses: actions/upload-artifact@v4 + - name: Upload initramfs artifacts + uses: actions/upload-artifact@v6 with: - name: iso-${{ matrix.arch }} - path: out/captainos-${{ env.KERNEL_VERSION }}-${{ matrix.output_arch }}.iso + name: initramfs-${{ matrix.FLAVOR_ID }}-${{ matrix.arch }} + # The full 'out/' directory contents, but not any .iso files (if any) since those are uploaded later + path: | + out/ + !out/**/*.iso retention-days: 1 + # do not upload any .iso files (exclude) - # ------------------------------------------------------------------- - # Publish per-arch artifacts and compute checksums - # ------------------------------------------------------------------- - publish-per-arch: - if: github.ref == 'refs/heads/main' - runs-on: ${{ matrix.target == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} - needs: [build-iso] - strategy: - fail-fast: false - matrix: - target: [amd64, arm64] - env: - ARCH: ${{ matrix.target }} - TARGET: ${{ matrix.target }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - - name: Load shared config - run: cat .github/config.env >> "$GITHUB_ENV" - - - name: Download kernel artifacts - uses: actions/download-artifact@v4 - with: - name: kernel-${{ matrix.target }} - path: mkosi.output/kernel/${{ env.KERNEL_VERSION }}/${{ matrix.target }} - - - name: Download initramfs artifacts - uses: actions/download-artifact@v4 - with: - name: initramfs-${{ matrix.target }} - path: out + # ------------------------------------------------------------------- + # UEFI-bootable ISO - only for certain flavors (eg trixie-full) + # ------------------------------------------------------------------- - - name: Download ISO artifact - uses: actions/download-artifact@v4 + - name: Upload ISO artifact + if: ${{ matrix.iso }} # only if matrix entry had iso: true + uses: actions/upload-artifact@v6 with: - name: iso-${{ matrix.target }} - path: out + name: iso-${{ matrix.FLAVOR_ID }}-${{ matrix.arch }} + path: out/captainos-${{ matrix.FLAVOR_ID }}-${{ matrix.output_arch }}.iso + retention-days: 1 - - name: Install Python dependencies - run: pip install -r requirements.txt + # ------------------------------------------------------------------- + # Publish per-arch artifacts and compute checksums + # ------------------------------------------------------------------- - name: Log in to GHCR - uses: docker/login-action@v3 + if: github.ref == 'refs/heads/main' + uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Publish artifacts to GHCR - run: ./build.py release publish + if: github.ref == 'refs/heads/main' + env: + TARGET: ${{ matrix.arch }} + run: uv run captain release-publish # ------------------------------------------------------------------- # Publish combined multi-arch image (reuses per-arch registry blobs) # ------------------------------------------------------------------- publish-combined: if: github.ref == 'refs/heads/main' + name: "publish-combined" runs-on: ubuntu-latest - needs: [publish-per-arch] + needs: [ build-all ] env: ARCH: amd64 TARGET: combined @@ -444,51 +216,41 @@ jobs: - name: Load shared config run: cat .github/config.env >> "$GITHUB_ENV" - - name: Download kernel artifacts (amd64) - uses: actions/download-artifact@v4 - with: - name: kernel-amd64 - path: mkosi.output/kernel/${{ env.KERNEL_VERSION }}/amd64 - - name: Download initramfs artifacts (amd64) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: - name: initramfs-amd64 + name: initramfs-${{ env.DEFAULT_FLAVOR_ID }}-amd64 path: out - name: Download ISO artifact (amd64) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: - name: iso-amd64 + name: iso-${{ env.DEFAULT_FLAVOR_ID }}-amd64 path: out - - name: Download kernel artifacts (arm64) - uses: actions/download-artifact@v4 - with: - name: kernel-arm64 - path: mkosi.output/kernel/${{ env.KERNEL_VERSION }}/arm64 - - name: Download initramfs artifacts (arm64) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: - name: initramfs-arm64 + name: initramfs-${{ env.DEFAULT_FLAVOR_ID }}-arm64 path: out - name: Download ISO artifact (arm64) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: - name: iso-arm64 + name: iso-${{ env.DEFAULT_FLAVOR_ID }}-arm64 path: out - - name: Install Python dependencies - run: pip install -r requirements.txt + - name: Install uv + uses: astral-sh/setup-uv@v7 - name: Log in to GHCR - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Publish combined image to GHCR - run: ./build.py release publish + env: + FLAVOR_ID: "${{ env.DEFAULT_FLAVOR_ID }}" + run: uv run captain release-publish diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index cfaac90..c478a8f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -27,13 +27,13 @@ jobs: - name: Load shared config run: cat .github/config.env >> "$GITHUB_ENV" - - name: Install Python dependencies - run: pip install -r requirements.txt + - name: Install uv + uses: astral-sh/setup-uv@v7 - name: Pull release artifacts (combined) env: VERSION_EXCLUDE: ${{ github.ref_name }} - run: ./build.py release pull --target combined --pull-output artifacts/combined + run: uv run ./build.py release pull --target combined --pull-output artifacts/combined - name: Create GitHub Release env: @@ -47,4 +47,4 @@ jobs: - name: Tag OCI artifacts with version env: VERSION_EXCLUDE: ${{ github.ref_name }} - run: ./build.py release tag ${{ github.ref_name }} + run: uv run ./build.py release tag ${{ github.ref_name }} diff --git a/.gitignore b/.gitignore index 6781065..4098d42 100644 --- a/.gitignore +++ b/.gitignore @@ -4,14 +4,22 @@ mkosi.cache/ mkosi.tools/ mkosi.tools.manifest mkosi.builddir/ -kernel.configs/.config.resolved.* out/ +# Generated artifacts +/mkosi.conf +/mkosi.finalize +/mkosi.postinst +/mkosi.extra +/mkosi.sandbox +/mkosi.skeleton + # Editor *.swp *.swo *~ .vscode/ +.idea/ # OS .DS_Store @@ -29,3 +37,6 @@ dist/ .mypy_cache/ .pyright/ .pytest_cache/ + +# Ignore uv.lock for now; we should always use the latest version of packages etc +uv.lock diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d3f0f3a..f285650 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,7 +17,7 @@ Please read and understand the DCO found [here](https://github.com/tinkerbell/or ## Environment Details -Building is handled by a Python script, please see the [build.py](build.py) for details. Only Python >= 3.10 and Docker are required. +Building is handled by a Python script, please see the [build.py](build.py) for details. Only `uv` (Python) and Docker are required. ## How to Submit Change Requests diff --git a/Dockerfile b/Dockerfile index 5837829..b02bcdb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,19 +1,23 @@ # Builder container for CaptainOS using mkosi # Encapsulates all mkosi dependencies for reproducible builds. -# Usage: docker build -t captainos-builder . && docker run --rm --privileged -v $(pwd):/work captainos-builder build +# Includes skopeo and buildah for OCI image manipulation, and uv for Python tool management. FROM debian:trixie -ARG MKOSI_VERSION=v26 - # Avoid interactive prompts -ENV DEBIAN_FRONTEND=noninteractive +ENV DEBIAN_FRONTEND=noninteractive BUILDAH_ISOLATION=chroot FORCE_COLOR=1 -# Install mkosi runtime dependencies and kernel build dependencies in one layer -RUN apt-get update && apt-get install -y --no-install-recommends \ - # mkosi runtime deps - python3 \ - python3-pip \ - python3-venv \ +# Add foreign architecture for cross-compilation (arm64 on amd64 and vice versa) and apt-update +# Immediately install the cross-arch grub dependencies +RUN <<-FRAGMENT_WITH_VARIABLES +# Determine arch/cross-arch and install grub and other basic packages (around 200mb layer) +NATIVE_ARCH="$(dpkg --print-architecture)" +FOREIGN_ARCH=$([ "$NATIVE_ARCH" = "amd64" ] && echo "arm64" || echo "amd64") +dpkg --add-architecture "$FOREIGN_ARCH" +apt-get -o "Dpkg::Use-Pty=0" update +apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends \ + "grub-efi-${NATIVE_ARCH}-bin" \ + "grub-efi-${FOREIGN_ARCH}-bin:${FOREIGN_ARCH}" \ + grub-common \ apt \ dpkg \ debian-archive-keyring \ @@ -25,54 +29,112 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ systemd-container \ systemd \ udev \ - bubblewrap \ squashfs-tools \ mtools \ erofs-utils \ dosfstools \ e2fsprogs \ btrfs-progs \ - # Kernel build deps - build-essential \ - gcc \ - gcc-aarch64-linux-gnu \ + tree +FRAGMENT_WITH_VARIABLES + +# Cross-architecture support (arm64 on x86_64 and vice versa) - huge single package +RUN <<-QEMU_USER_FRAGMENT +# Install qemu-user but then delete all un-needed qemu binaries to save space (we only need aarch64 and x86_64) +apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends qemu-user +echo 'All qemus: ' +ls -lah /usr/bin/qemu-* +# keep only qemu binary for the arches we're interested in: aarch64 and x86_64 +echo 'To be deleted: ' +find /usr/bin -name 'qemu-*' -not -name 'qemu-aarch64' -not -name 'qemu-x86_64' -not -name 'qemu-arm*' -not -name 'qemu-amd*' -print0 | xargs -0 ls -lah +echo 'Deleting: ' +find /usr/bin -name 'qemu-*' -not -name 'qemu-aarch64' -not -name 'qemu-x86_64' -not -name 'qemu-arm*' -not -name 'qemu-amd*' -print0 | xargs -0 rm -fv +echo 'Remaining: ' +ls -lah /usr/bin/qemu-* + +QEMU_USER_FRAGMENT + +# Extra kernel build tools +RUN apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends \ make \ flex \ bison \ bc \ libelf-dev \ libssl-dev \ + dpkg-dev \ dwarves \ - pahole \ + pahole + +# Those are pulled by build-essential (cross...), but are quite big; pull them ealier to balance layer size +RUN apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends \ + binutils-common \ + libasan8 \ + liblsan0 \ + libubsan1 \ + libhwasan0 \ + binutils-x86-64-linux-gnu \ + libasan8-amd64-cross \ + liblsan0-amd64-cross \ + libtsan2-amd64-cross \ + libc6-amd64-cross \ + linux-libc-dev-amd64-cross \ + libc6-dev-amd64-cross + +RUN apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends \ rsync \ coreutils \ - # Cross-architecture support (arm64 on x86_64 and vice versa) - qemu-user-static \ - # Network tools (for fetching kernel source etc.) git \ curl \ ca-certificates \ - # Binary compression + qemu-user-static + +# Then both, of which one will already be fulfilled +RUN apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends crossbuild-essential-arm64 +RUN apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends crossbuild-essential-amd64 + +# Buildah and Skopeo +# Binary compression +# ISO image creation +# Kernel build deps: build-essential +RUN apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends \ + build-essential \ + containernetworking-plugins \ + bubblewrap \ + skopeo \ upx-ucl \ - # ISO image creation - xorriso \ - grub-common \ - && NATIVE_ARCH="$(dpkg --print-architecture)" \ - && FOREIGN_ARCH=$([ "$NATIVE_ARCH" = "amd64" ] && echo "arm64" || echo "amd64") \ - && apt-get install -y --no-install-recommends "grub-efi-${NATIVE_ARCH}-bin" \ - && dpkg --add-architecture "$FOREIGN_ARCH" \ - && apt-get update \ - && apt-get install -y --no-install-recommends "grub-efi-${FOREIGN_ARCH}-bin:${FOREIGN_ARCH}" \ - && rm -rf /var/lib/apt/lists/* + xorriso -# Install mkosi from GitHub (not on PyPI) -RUN pip3 install --break-system-packages \ - configargparse \ - "git+https://github.com/systemd/mkosi.git@${MKOSI_VERSION}" +# Buildah is pretty huge, gets its own layer. +RUN apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends buildah -# Verify mkosi is functional -RUN mkosi --version +# This is just to appease mkosi's later stages. +RUN apt-get -o "Dpkg::Use-Pty=0" install -y --no-install-recommends python3 python3-pip python3-pefile + +RUN <<-CONFIG_FRAG +## A few small config fragments to make life easier +# git: Ignore owner mismatches in /work, which will be bind-mounted from the host +git config --global --add safe.directory /work \ +# buildah: Configure rootless storage driver and chroot isolation (no user-namespace required — we only assemble scratch images, never RUN anything inside them). +printf '[storage]\ndriver = "vfs"\nrunroot = "/var/tmp/buildah-runroot"\ngraphroot = "/var/tmp/buildah-storage"\n' > /etc/containers/storage.conf +# Buildah 1.39+ on Debian requires netavark but we never need networking +# (all images are FROM scratch with no RUN steps). A no-op stub satisfies +# the startup check. +mkdir -p /usr/libexec/podman +printf '#!/bin/sh\nexit 0\n' > /usr/libexec/podman/netavark +chmod +x /usr/libexec/podman/netavark +CONFIG_FRAG + +# Install astral-sh's uv with a script - install to /usr for global access +RUN echo -n 'System Python: ' && python3 --version && curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="/usr/bin" sh && uv --version + +# Install mkosi from GitHub (not on PyPI) via the system pip3 +ARG MKOSI_VERSION=v26 +RUN pip3 install --break-system-packages --root-user-action=ignore "git+https://github.com/systemd/mkosi.git@${MKOSI_VERSION}" && mkosi --version && command -v mkosi +# Prime uv's cache with our pyproject.toml to speed up runtime +COPY pyproject.toml /work/pyproject.toml +COPY captain /work/captain +COPY build.py /work/build.py WORKDIR /work -ENTRYPOINT ["mkosi"] -CMD ["build"] +RUN uv --verbose run captain --version diff --git a/Dockerfile.release b/Dockerfile.release deleted file mode 100644 index 9d4babd..0000000 --- a/Dockerfile.release +++ /dev/null @@ -1,37 +0,0 @@ -# Lightweight container for OCI release operations (publish, index, pull, tag). -# Contains buildah, skopeo, Python 3, git, and configargparse — nothing else. -# -# Usage: -# docker build -f Dockerfile.release -t captainos-release . -# docker run --rm -v $(pwd):/work captainos-release release publish -FROM python:3.12-slim - -# Install buildah, skopeo, and git -RUN apt-get update && apt-get install -y --no-install-recommends \ - buildah \ - skopeo \ - git \ - ca-certificates \ - && rm -rf /var/lib/apt/lists/* \ - && git config --global --add safe.directory /work - -# Configure rootless storage driver and chroot isolation (no user-namespace -# required — we only assemble scratch images, never RUN anything inside them). -RUN printf '[storage]\ndriver = "vfs"\nrunroot = "/var/tmp/buildah-runroot"\ngraphroot = "/var/tmp/buildah-storage"\n' \ - > /etc/containers/storage.conf -ENV BUILDAH_ISOLATION=chroot - -# Buildah 1.39+ on Debian requires netavark but we never need networking -# (all images are FROM scratch with no RUN steps). A no-op stub satisfies -# the startup check. -RUN mkdir -p /usr/libexec/podman \ - && printf '#!/bin/sh\nexit 0\n' > /usr/libexec/podman/netavark \ - && chmod +x /usr/libexec/podman/netavark - -# Install Python dependencies -COPY requirements.txt /tmp/requirements.txt -RUN pip install --no-cache-dir -r /tmp/requirements.txt && rm /tmp/requirements.txt - -WORKDIR /work -ENTRYPOINT ["python3", "/work/build.py"] -CMD ["release", "--help"] diff --git a/Makefile b/Makefile index e9150bc..b5fa100 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,14 @@ .PHONY: lint fmt lint-install lint-install: - pip install -r requirements.txt -r requirements-dev.txt + uv sync --extra dev lint: - ruff check . - ruff format --check . - pyright . + uv tool run ruff check . + uv tool run ruff format --check . + uv sync --extra dev + uv run pyright fmt: - ruff check --fix . - ruff format . + uv tool run ruff check --fix . + uv tool run ruff format . diff --git a/README.md b/README.md index e938c3f..799ab84 100644 --- a/README.md +++ b/README.md @@ -33,13 +33,14 @@ The build has four stages: ## Usage -**Prerequisites:** Python >= 3.10, Docker, [configargparse](https://pypi.org/project/ConfigArgParse/) +**Prerequisites:** `uv` (Python), Docker. ```bash -pip install -r requirements.txt +# Install Astral's `uv` if you don't have it: https://docs.astral.sh/uv/getting-started/installation/ +curl -LsSf https://astral.sh/uv/install.sh | sh # then re-log in # Build with defaults (amd64, kernel 6.18.16) -./build.py --help +uv run ./build.py --help usage: build.py [flags] @@ -54,10 +55,7 @@ build configuration: --no-cache rebuild builder image without Docker cache kernel: - --kernel-version VER kernel version to build (default: 6.18.16) - --kernel-src PATH path to local kernel source tree - --kernel-mode {docker,native,skip} kernel stage execution mode (default: docker) - --force-kernel force kernel rebuild even if outputs exist + --flavor-id VER kernel version to build (default: 6.18.16) tools: --tools-mode {docker,native,skip} tools stage execution mode (default: docker) @@ -176,9 +174,9 @@ Each stage can be executed in one of three modes: ```bash . -├── build.py # Main build entry point (Python >= 3.10) +├── build.py # Main build entry point (Python >= 3.13; use `uv run build.py`) ├── captain/ # Build system package (stdlib only) -│ ├── __init__.py +│ ├── __init__.py # Package init incl logging │ ├── cli.py # CLI subcommands (argparse) │ ├── config.py # Configuration from environment │ ├── docker.py # Docker builder management @@ -190,7 +188,6 @@ Each stage can be executed in one of three modes: │ ├── skopeo.py # skopeo CLI wrapper (inspect/copy/export) │ ├── iso.py # ISO image assembly │ ├── qemu.py # QEMU boot testing -│ ├── log.py # Colored logging │ └── util.py # Shared helpers & arch mapping ├── Dockerfile # Builder container definition ├── Dockerfile.release # Lightweight container for OCI release ops diff --git a/build.py b/build.py index 85ae4e6..2375ec0 100755 --- a/build.py +++ b/build.py @@ -1,20 +1,26 @@ #!/usr/bin/env python3 -"""CaptainOS build system entry point. +"""CaptainOS build system — click-based CLI entry point. -Requires: Python >= 3.10, Docker (unless all stages use native or skip) +Requires: Python >= 3.13 and a lot of dependencies; Use Astral's ``uv`` to run:: + + uv run build.py --help + uv run build.py builder + uv run build.py build --arch arm64 + uv run build.py release-publish --target combined """ import sys -if sys.version_info < (3, 10): # noqa: UP036 - print("ERROR: Python >= 3.10 is required.", file=sys.stderr) +if sys.version_info < (3, 13): + print("ERROR: Python >= 3.13 is required.", file=sys.stderr) sys.exit(1) try: from captain.cli import main except ImportError as exc: print(f"ERROR: {exc}", file=sys.stderr) - print("Install dependencies: pip install -r requirements.txt", file=sys.stderr) + uv_url = "https://docs.astral.sh/uv/getting-started/installation/" + print(f"Missing dependencies, use uv to run. See {uv_url}", file=sys.stderr) sys.exit(1) if __name__ == "__main__": diff --git a/captain/__init__.py b/captain/__init__.py index 29fee9b..c962002 100644 --- a/captain/__init__.py +++ b/captain/__init__.py @@ -1 +1,52 @@ -# captain — CaptainOS build system +"""captain — CaptainOS build system. + +Logging is configured here so that every ``logging.getLogger(__name__)`` +call in submodules automatically inherits the Rich console handler. +""" + +from __future__ import annotations + +import logging +import os +import shutil + +import click +from rich.console import Console +from rich.logging import RichHandler +from rich.traceback import install as _install_rich_traceback + +# Obtain terminal width +env_columns = shutil.get_terminal_size(fallback=(161, 24)).columns + +# Rich console — writes to stderr so log output never pollutes piped stdout. +# Install Rich traceback handler globally (once, at import time). +if os.environ.get("FORCE_COLOR", "0") == "1": + console: Console = Console(stderr=True, color_system="standard", width=env_columns) + _install_rich_traceback(console=console, show_locals=True, width=env_columns, suppress=[click]) +else: + console: Console = Console(stderr=True) + _install_rich_traceback(console=console, show_locals=True, width=None, suppress=[click]) + + +class _StageFormatter(logging.Formatter): + def format(self, record: logging.LogRecord) -> str: + name = record.name.replace("captain.", "") + record.__dict__["stage"] = name + if os.environ.get("CAPTAIN_IN_DOCKER", "") == "docker": + record.__dict__["stage"] = f"🐳 {name}" + return super().format(record) + + +_handler = RichHandler( + console=console, + show_time=False, + show_level=True, + show_path=True, + markup=True, # interprets [braket]stuff[/bracket] in log messages, beware + rich_tracebacks=True, + tracebacks_show_locals=True, + tracebacks_suppress=[click], # don't wanna see click infra code in traces +) +_handler.setFormatter(_StageFormatter("[bold][cyan]%(stage)s[/cyan][/bold]: %(message)s")) + +logging.basicConfig(level="INFO", datefmt="[%X]", handlers=[_handler]) diff --git a/captain/artifacts.py b/captain/artifacts.py index f40158f..82b07ab 100644 --- a/captain/artifacts.py +++ b/captain/artifacts.py @@ -3,14 +3,14 @@ from __future__ import annotations import hashlib +import logging import shutil from pathlib import Path from captain.config import Config -from captain.log import StageLogger, for_stage from captain.util import ensure_dir -_default_log = for_stage("artifacts") +log = logging.getLogger(__name__) def _sha256(path: Path) -> str: @@ -31,52 +31,66 @@ def _human_size(size: int) -> str: return f"{size:.1f}T" -def collect_kernel(cfg: Config, logger: StageLogger | None = None) -> None: - """Copy the kernel image from mkosi.output/kernel/{version}/{arch}/ to out/.""" - _log = logger or _default_log +def collect_kernel(cfg: Config) -> None: + """Copy the kernel image produced by mkosi.""" out = ensure_dir(cfg.output_dir) - vmlinuz_dir = cfg.kernel_output - vmlinuz_files = sorted(vmlinuz_dir.glob("vmlinuz-*")) if vmlinuz_dir.is_dir() else [] - if vmlinuz_files: - vmlinuz_src = vmlinuz_files[0] - vmlinuz_dst = out / f"vmlinuz-{cfg.kernel_version}-{cfg.arch_info.output_arch}" + log.debug("Looking for kernel image produced by mkosi in %s", cfg.initramfs_output) + vmlinu_files = sorted(cfg.initramfs_output.glob("*.vmlinu*")) + if vmlinu_files: + vmlinuz_src = vmlinu_files[0] + vmlinuz_dst = out / f"vmlinuz-{cfg.flavor_id}-{cfg.arch_info.output_arch}" shutil.copy2(vmlinuz_src, vmlinuz_dst) - _log.log(f"kernel: {vmlinuz_dst} ({_human_size(vmlinuz_dst.stat().st_size)})") + log.info( + "mkosi supplied kernel: %s (%s)", vmlinuz_dst, _human_size(vmlinuz_dst.stat().st_size) + ) else: - _log.warn(f"No kernel image found in {cfg.kernel_output}") + log.error("No kernel image produced by mkosi in %s", cfg.initramfs_output) -def collect_initramfs(cfg: Config, logger: StageLogger | None = None) -> None: +def collect_initramfs(cfg: Config) -> None: """Copy the initramfs CPIO from mkosi.output/initramfs/{arch}/ to out/.""" - _log = logger or _default_log out = ensure_dir(cfg.output_dir) cpio_files = sorted(cfg.initramfs_output.glob("*.cpio*")) if cpio_files: initrd_src = cpio_files[0] - initrd_dst = out / f"initramfs-{cfg.kernel_version}-{cfg.arch_info.output_arch}" + initrd_dst = out / f"initramfs-{cfg.flavor_id}-{cfg.arch_info.output_arch}" shutil.copy2(initrd_src, initrd_dst) - _log.log(f"initramfs: {initrd_dst} ({_human_size(initrd_dst.stat().st_size)})") + log.info("initramfs: %s (%s)", initrd_dst, _human_size(initrd_dst.stat().st_size)) + else: + log.warning("No initramfs CPIO found in %s", cfg.initramfs_output) + + +def collect_dtbs(cfg): + """Collect the dtb directory produced by mkosi's finalize script.""" + indir = ensure_dir(cfg.initramfs_output) + dtb_dir: Path = indir / "dtb" + if dtb_dir.exists(): + log.info("Found dtb directory in %s, copying to output...", dtb_dir) + out = ensure_dir(cfg.output_dir) + target_dtb_dir = out / f"dtb-{cfg.flavor_id}-{cfg.arch_info.output_arch}" + if target_dtb_dir.exists(): + shutil.rmtree(target_dtb_dir) + shutil.copytree(dtb_dir, target_dtb_dir) + log.info("Copied dtb directory: %s", target_dtb_dir) else: - _log.warn(f"No initramfs CPIO found in {cfg.initramfs_output}") + log.warning("No dtb directory found in %s", dtb_dir) -def collect_iso(cfg: Config, logger: StageLogger | None = None) -> None: +def collect_iso(cfg: Config) -> None: """Copy the ISO image from mkosi.output/iso/{arch}/ to out/.""" - _log = logger or _default_log out = ensure_dir(cfg.output_dir) iso_dir = cfg.iso_output iso_files = sorted(iso_dir.glob("*.iso")) if iso_dir.is_dir() else [] if iso_files: iso_src = iso_files[0] - iso_dst = out / f"captainos-{cfg.kernel_version}-{cfg.arch_info.output_arch}.iso" + iso_dst = out / f"captainos-{cfg.flavor_id}-{cfg.arch_info.output_arch}.iso" shutil.copy2(iso_src, iso_dst) - _log.log(f"iso: {iso_dst} ({_human_size(iso_dst.stat().st_size)})") + log.info("iso: %s (%s)", iso_dst, _human_size(iso_dst.stat().st_size)) def collect_checksums( files: list[Path], output: Path, - logger: StageLogger | None = None, ) -> None: """Compute SHA-256 checksums for *files* and write them to *output*. @@ -87,11 +101,10 @@ def collect_checksums( Only the bare filename (no directory component) is recorded so that ``sha256sum -c`` works from the directory containing the files. """ - _log = logger or _default_log lines: list[str] = [] for path in files: if not path.is_file(): - _log.warn(f"Skipping missing file: {path}") + log.warning("Skipping missing file: %s", path) continue digest = _sha256(path) lines.append(f"{digest} {path.name}") @@ -99,24 +112,23 @@ def collect_checksums( content = "\n".join(lines) + "\n" output.parent.mkdir(parents=True, exist_ok=True) if output.is_file() and output.read_text() == content: - _log.log(f"Checksums unchanged: {output}") + log.info("Checksums unchanged: %s", output) else: output.write_text(content) - _log.log(f"Wrote checksums to {output}") + log.info("Wrote checksums to %s", output) for line in lines: - _log.log(f" {line}") + log.info(" %s", line) else: - # All specified files were missing or non-regular; no checksums written. - _log.warn( - f"No checksums were written for {len(files)} requested file(s); " - "no output checksum file was created." + log.warning( + "No checksums were written for %d requested file(s); " + "no output checksum file was created.", + len(files), ) -def collect(cfg: Config, logger: StageLogger | None = None) -> None: +def collect(cfg: Config) -> None: """Copy initramfs, kernel, and ISO images from mkosi.output/ to out/.""" - _log = logger or _default_log - _log.log("Collecting build artifacts...") - collect_initramfs(cfg, logger=_log) - collect_kernel(cfg, logger=_log) - collect_iso(cfg, logger=_log) + log.info("Collecting build artifacts...") + collect_initramfs(cfg) + collect_kernel(cfg) + collect_iso(cfg) diff --git a/captain/buildah.py b/captain/buildah.py index 9230641..e595069 100644 --- a/captain/buildah.py +++ b/captain/buildah.py @@ -12,30 +12,25 @@ from __future__ import annotations +import logging +import os from pathlib import Path -from captain.log import StageLogger, for_stage from captain.util import run -_default_log = for_stage("buildah") +log = logging.getLogger(__name__) def from_image( image: str, *, platform: str | None = None, - logger: StageLogger | None = None, ) -> str: - """Create a working container from *image* (local ID or ``scratch``). - - Returns the container ID. - """ - _log = logger or _default_log cmd: list[str] = ["buildah", "from"] if platform: cmd += ["--platform", platform] cmd.append(image) - _log.log(f"buildah from {image}") + log.info("buildah from %s", image) result = run(cmd, capture=True) return result.stdout.strip() @@ -43,12 +38,8 @@ def from_image( def add( container: str, files: list[Path], - *, - logger: StageLogger | None = None, ) -> None: - """Add *files* into the root of *container*.""" - _log = logger or _default_log - _log.log(f"buildah add {container} ({len(files)} files)") + log.info("buildah add %s (%d files)", container, len(files)) cmd: list[str] = ["buildah", "add", container] cmd += [str(f) for f in files] cmd.append("/") @@ -62,10 +53,7 @@ def config( arch: str | None = None, annotations: dict[str, str] | None = None, labels: dict[str, str] | None = None, - logger: StageLogger | None = None, ) -> None: - """Set image metadata on *container*.""" - _log = logger or _default_log cmd: list[str] = ["buildah", "config"] if os: cmd += ["--os", os] @@ -76,7 +64,7 @@ def config( for key, value in (labels or {}).items(): cmd += ["--label", f"{key}={value}"] cmd.append(container) - _log.log(f"buildah config {container}") + log.info("buildah config %s", container) run(cmd) @@ -84,15 +72,8 @@ def commit( container: str, *, timestamp: int | None = None, - logger: StageLogger | None = None, ) -> str: - """Commit *container* to a local image and remove the container. - - *timestamp* sets the creation timestamp (epoch seconds) for - deterministic builds. Returns the image ID. - """ - _log = logger or _default_log - _log.log(f"buildah commit {container}") + log.info("buildah commit %s", container) cmd: list[str] = ["buildah", "commit", "--rm"] if timestamp is not None: cmd += ["--timestamp", str(timestamp)] @@ -104,30 +85,15 @@ def commit( def push( image_id: str, dest: str, - *, - logger: StageLogger | None = None, ) -> None: - """Push *image_id* to a remote registry. - - *dest* should be a fully-qualified image reference (without the - ``docker://`` transport prefix — it is added automatically). - """ - _log = logger or _default_log - _log.log(f"buildah push → {dest}") + log.info("buildah push → %s", dest) run(["buildah", "push", image_id, f"docker://{dest}"]) def manifest_create( ref: str, - *, - logger: StageLogger | None = None, ) -> str: - """Create a new manifest list named *ref*. - - Returns the manifest list ID. - """ - _log = logger or _default_log - _log.log(f"buildah manifest create {ref}") + log.info("buildah manifest create %s", ref) result = run(["buildah", "manifest", "create", ref], capture=True) return result.stdout.strip() @@ -138,38 +104,37 @@ def manifest_add( *, os: str | None = None, arch: str | None = None, - logger: StageLogger | None = None, ) -> None: - """Add *image* to a manifest list.""" - _log = logger or _default_log cmd: list[str] = ["buildah", "manifest", "add"] if os: cmd += ["--os", os] if arch: cmd += ["--arch", arch] cmd += [manifest, image] - _log.log(f"buildah manifest add {manifest} ← {image}") + log.info("buildah manifest add %s ← %s", manifest, image) run(cmd) def manifest_push( manifest: str, dest: str, - *, - logger: StageLogger | None = None, ) -> None: - """Push *manifest* list (with all referenced images) to *dest*.""" - _log = logger or _default_log - _log.log(f"buildah manifest push → {dest}") - run(["buildah", "manifest", "push", "--all", manifest, f"docker://{dest}"]) + log.info("buildah manifest push → %s", dest) + run( + [ + "buildah", + "manifest", + "push", + *(("--tls-verify=false",) if os.environ.get("BUILDAH_INSECURE") == "1" else ()), + "--all", + manifest, + f"docker://{dest}", + ] + ) def rmi( image: str, - *, - logger: StageLogger | None = None, ) -> None: - """Remove a local image or manifest list.""" - _log = logger or _default_log - _log.log(f"buildah rmi {image}") + log.info("buildah rmi %s", image) run(["buildah", "rmi", image]) diff --git a/captain/cli/__init__.py b/captain/cli/__init__.py index fa440c1..442221b 100644 --- a/captain/cli/__init__.py +++ b/captain/cli/__init__.py @@ -1,22 +1,20 @@ -"""CLI entry point — single configargparse parser with pre-extracted subcommand. +"""Click-based CLI for CaptainOS. -Every configuration parameter is both a ``--cli-flag`` and an environment -variable, following the ff priority model: +Provides the ``captain`` console script with subcommands: - CLI args > environment variables > defaults +- ``builder`` — build the Docker builder image, optionally push it +- ``build`` — full build pipeline (tools → initramfs → iso → artifacts) +- ``release-publish`` — publish artifacts as OCI images via buildah -The subcommand (``build``, ``kernel``, ``tools``, …) is extracted from -``sys.argv`` *before* parsing so that flags work in any position:: +Shell completion is available for bash and zsh:: - ./build.py --arch=arm64 kernel # works - ./build.py kernel --arch=arm64 # also works - ARCH=arm64 ./build.py kernel # also works + # bash + eval "$(_CAPTAIN_COMPLETE=bash_source captain)" + + # zsh + eval "$(_CAPTAIN_COMPLETE=zsh_source captain)" """ from captain.cli._main import main -from captain.cli._parser import COMMANDS -__all__ = [ - "COMMANDS", - "main", -] +__all__ = ["main"] diff --git a/captain/cli/_build.py b/captain/cli/_build.py new file mode 100644 index 0000000..b18dfe6 --- /dev/null +++ b/captain/cli/_build.py @@ -0,0 +1,133 @@ +"""``captain build`` — full build pipeline (tools → initramfs → iso → artifacts).""" + +from __future__ import annotations + +import logging + +import click + +import captain.flavor +from captain import artifacts +from captain.cli._main import CliContext, cli +from captain.cli._stages import ( + _build_iso_stage, + _build_mkosi_stage, + _build_tools_stage, +) + +log = logging.getLogger(__name__) + + +@cli.command( + "build", + short_help="Run the full build pipeline via mkosi.", +) +@click.option( + "--mkosi-mode", + envvar="MKOSI_MODE", + default="docker", + show_default=True, + type=click.Choice(["docker", "native", "skip"], case_sensitive=False), + metavar="MODE", + help="Mkosi stage execution mode (docker, native, skip).", +) +@click.option( + "--tools-mode", + envvar="TOOLS_MODE", + default="docker", + show_default=True, + type=click.Choice(["docker", "native", "skip"], case_sensitive=False), + metavar="MODE", + help="Tools download stage execution mode (docker, native, skip).", +) +@click.option( + "--iso-mode", + envvar="ISO_MODE", + default="docker", + show_default=True, + type=click.Choice(["docker", "native", "skip"], case_sensitive=False), + metavar="MODE", + help="ISO build stage execution mode (docker, native, skip).", +) +@click.option( + "--force", + "force_mkosi", + is_flag=True, + default=False, + help="Pass --force through to mkosi.", +) +@click.option( + "--force-tools", + envvar="FORCE_TOOLS", + is_flag=True, + default=False, + help="Re-download tools even if outputs already exist.", +) +@click.option( + "--force-iso", + envvar="FORCE_ISO", + is_flag=True, + default=False, + help="Force ISO rebuild even if outputs already exist.", +) +@click.pass_obj +def build_cmd( + cli_ctx: CliContext, + *, + mkosi_mode: str, + tools_mode: str, + iso_mode: str, + force_mkosi: bool, + force_tools: bool, + force_iso: bool, +) -> None: + """Run the full CaptainOS build pipeline. + + Stages executed in order: tools → initramfs (mkosi) → ISO → artifact + collection. Each stage can be independently set to run inside Docker, + natively on the host, or skipped entirely. + + \b + Examples + -------- + captain build + captain build --arch arm64 + captain build --flavor-id trixie-meson64 --arch arm64 + captain build --mkosi-mode native --tools-mode native + captain build --force --force-tools + """ + + cfg = cli_ctx.make_config( + tools_mode=tools_mode, + mkosi_mode=mkosi_mode, + iso_mode=iso_mode, + force_tools=force_tools, + force_iso=force_iso, + ) + + if force_mkosi: + cfg.mkosi_args = ["--force"] + + # Instantiate and generate the flavor. + flavor = captain.flavor.create_and_setup_flavor_for_id(cfg.flavor_id, cfg) + flavor.generate() + + # Tools stage. + _build_tools_stage(cfg) + + # Initramfs (mkosi) stage + artifact collection. + _build_mkosi_stage(cfg, list(cfg.mkosi_args)) + artifacts.collect_initramfs(cfg) + artifacts.collect_kernel(cfg) + artifacts.collect_dtbs(cfg) + log.info("Initramfs build complete.") + + # ISO stage (if the flavor supports it). + if flavor.has_iso(): + _build_iso_stage(cfg) + else: + log.info("Flavor '%s' does not produce an ISO — skipping.", flavor.id) + + # Final artifact collection. + artifacts.collect(cfg) + log.info("Build complete!") diff --git a/captain/cli/_builder.py b/captain/cli/_builder.py new file mode 100644 index 0000000..8e8d3ea --- /dev/null +++ b/captain/cli/_builder.py @@ -0,0 +1,47 @@ +"""``captain builder`` — build (and optionally push) the Docker builder image.""" + +from __future__ import annotations + +import logging + +import click + +from captain.cli._main import CliContext, cli +from captain.docker import obtain_builder + +log = logging.getLogger(__name__) + + +@cli.command( + "builder", + short_help="Build the Docker builder image and optionally push it.", +) +@click.option( + "--push", + is_flag=True, + default=False, + help="Push the built image to a registry after building.", +) +@click.pass_obj +def builder_cmd( + cli_ctx: CliContext, + *, + push: bool, +) -> None: + """Build the Docker builder image used by other build stages. + + By default the image is built locally only. Pass --push to also push the image to a + remote container registry. + + \b + Examples + -------- + captain builder + captain builder --no-cache + captain builder --push + """ + cfg = cli_ctx.make_config(builder_push=push) + + # 1. Build the image. + obtain_builder(cfg) + log.info("Builder image '%s' is ready.", cfg.builder_image) diff --git a/captain/cli/_commands.py b/captain/cli/_commands.py deleted file mode 100644 index 2789757..0000000 --- a/captain/cli/_commands.py +++ /dev/null @@ -1,290 +0,0 @@ -"""Build and utility command handlers.""" - -from __future__ import annotations - -import shutil -from pathlib import Path - -from captain import artifacts, docker, qemu -from captain.config import Config -from captain.log import StageLogger, for_stage -from captain.util import run - -from ._stages import ( - _build_iso_stage, - _build_kernel_stage, - _build_mkosi_stage, - _build_tools_stage, -) - - -def _cmd_kernel(cfg: Config, _extra_args: list[str]) -> None: - """Build only the kernel (no tools, no mkosi).""" - klog = for_stage("kernel") - _build_kernel_stage(cfg) - # Copy vmlinuz to the standard out/ directory. - artifacts.collect_kernel(cfg, logger=klog) - klog.log("Kernel build stage complete!") - - -def _cmd_tools(cfg: Config, _extra_args: list[str]) -> None: - """Download tools (containerd, runc, nerdctl, CNI plugins).""" - _build_tools_stage(cfg) - tlog = for_stage("tools") - tlog.log("Tools stage complete!") - - -def _check_kernel_modules(cfg: Config) -> None: - """Verify kernel modules exist before building the initramfs. - - The initramfs depends on pre-built kernel modules in the ExtraTrees - directory. If they are missing (e.g. due to an artifact download - issue) the build should fail immediately rather than silently - producing an initramfs without modules. - """ - ilog = for_stage("initramfs") - modules_dir = cfg.modules_output / "usr" / "lib" / "modules" - if not modules_dir.is_dir(): - ilog.err(f"Kernel modules directory not found: {modules_dir}") - ilog.err("Ensure the kernel build artifacts are downloaded correctly.") - raise SystemExit(1) - # Check that at least one module version directory exists with modules - version_dirs = [d for d in modules_dir.iterdir() if d.is_dir()] - if not version_dirs: - ilog.err(f"No kernel version directories found in {modules_dir}") - raise SystemExit(1) - # Search all version directories for at least one kernel module - for version_dir in version_dirs: - if any(version_dir.rglob("*.ko*")): - ilog.log(f"Kernel modules found in {version_dir} (version: {version_dir.name})") - return - searched = ", ".join(str(d) for d in version_dirs) - ilog.err("No kernel modules (.ko/.ko.zst) found in any kernel version directory.") - ilog.err(f"Searched directories: {searched}") - raise SystemExit(1) - - -def _cmd_initramfs(cfg: Config, extra_args: list[str]) -> None: - """Build only the initramfs via mkosi, then collect artifacts.""" - ilog = for_stage("initramfs") - _check_kernel_modules(cfg) - _build_mkosi_stage(cfg, extra_args) - artifacts.collect_initramfs(cfg, logger=ilog) - artifacts.collect_kernel(cfg, logger=ilog) - ilog.log("Initramfs build complete!") - - -def _cmd_iso(cfg: Config, _extra_args: list[str]) -> None: - """Build only the ISO image.""" - isolog = for_stage("iso") - _build_iso_stage(cfg) - artifacts.collect_iso(cfg, logger=isolog) - isolog.log("ISO build complete!") - - -def _cmd_build(cfg: Config, extra_args: list[str]) -> None: - """Full build: kernel → tools → initramfs → iso → artifacts.""" - blog = for_stage("build") - _build_kernel_stage(cfg) - _build_tools_stage(cfg) - _build_mkosi_stage(cfg, extra_args) - _build_iso_stage(cfg) - artifacts.collect(cfg, logger=blog) - blog.log("Build complete!") - - -def _cmd_shell(cfg: Config, _extra_args: list[str]) -> None: - """Interactive shell inside the builder container.""" - slog = for_stage("shell") - docker.build_builder(cfg, logger=slog) - slog.log("Entering builder shell (type 'exit' to leave)...") - docker.run_in_builder(cfg, "-it", "--entrypoint", "/bin/bash", cfg.builder_image) - - -def _cmd_clean(cfg: Config, _extra_args: list[str], args: object = None) -> None: - """Remove build artifacts for the selected kernel version, or all.""" - clog = for_stage("clean") - clean_all = getattr(args, "clean_all", False) - - if clean_all: - _clean_all(cfg, clog) - else: - _clean_version(cfg, clog) - - -def _clean_version(cfg: Config, clog: StageLogger) -> None: - """Remove build artifacts for a single kernel version.""" - kver = cfg.kernel_version - clog.log(f"Cleaning build artifacts for kernel {kver} ({cfg.arch})...") - mkosi_output = cfg.mkosi_output - - # Version-specific directories under mkosi.output/{stage}/{version}/{arch} - version_dirs = [ - mkosi_output / "kernel" / kver / cfg.arch, - mkosi_output / "initramfs" / kver / cfg.arch, - mkosi_output / "iso" / kver / cfg.arch, - ] - - has_docker = shutil.which("docker") is not None - existing = [d for d in version_dirs if d.exists()] - if existing and has_docker: - # Use Docker to remove root-owned files from mkosi. - # Invoke rm directly (no shell) to avoid injection via path components. - container_path_args = [ - f"/work/mkosi.output/{d.relative_to(mkosi_output)}" for d in existing - ] - run( - [ - "docker", - "run", - "--rm", - "-v", - f"{cfg.project_dir}:/work", - "-w", - "/work", - "debian:trixie", - "rm", - "-rf", - "--", - *container_path_args, - ], - ) - elif existing: - for d in existing: - shutil.rmtree(d, ignore_errors=True) - - # Remove versioned artifacts from out/ - if cfg.output_dir.exists(): - for pattern in ( - f"vmlinuz-{kver}-*", - f"initramfs-{kver}-*", - f"captainos-{kver}-*", - f"sha256sums-{kver}-*", - ): - for p in cfg.output_dir.glob(pattern): - p.unlink(missing_ok=True) - - clog.log(f"Clean complete for kernel {kver}.") - - -def _clean_all(cfg: Config, clog: StageLogger) -> None: - """Remove all build artifacts (all kernel versions).""" - clog.log("Cleaning ALL build artifacts...") - mkosi_output = cfg.mkosi_output - mkosi_cache = cfg.project_dir / "mkosi.cache" - - has_docker = shutil.which("docker") is not None - if has_docker: - # Use Docker to remove root-owned files from mkosi - if mkosi_output.exists() or mkosi_cache.exists(): - run( - [ - "docker", - "run", - "--rm", - "-v", - f"{cfg.project_dir}:/work", - "-w", - "/work", - "debian:trixie", - "sh", - "-c", - "rm -rf /work/mkosi.output/image*" - " /work/mkosi.output/initramfs" - " /work/mkosi.output/kernel" - " /work/mkosi.output/tools" - " /work/mkosi.output/iso" - " /work/mkosi.cache", - ], - ) - else: - # No Docker available — remove directly (may need sudo for root-owned mkosi files) - for pattern in ("image*", "initramfs", "kernel", "tools", "iso"): - for p in mkosi_output.glob(pattern): - if p.is_dir(): - shutil.rmtree(p, ignore_errors=True) - else: - p.unlink(missing_ok=True) - if mkosi_cache.exists(): - shutil.rmtree(mkosi_cache, ignore_errors=True) - - if cfg.output_dir.exists(): - shutil.rmtree(cfg.output_dir) - clog.log("Clean complete.") - - -def _cmd_summary(cfg: Config, _extra_args: list[str]) -> None: - """Print mkosi configuration summary.""" - slog = for_stage("summary") - tools_tree = str(cfg.tools_output) - modules_tree = str(cfg.modules_output) - output_dir = str(cfg.initramfs_output) - match cfg.mkosi_mode: - case "docker": - docker.build_builder(cfg, logger=slog) - container_tree = f"/work/mkosi.output/tools/{cfg.arch}" - container_modules = f"/work/mkosi.output/kernel/{cfg.kernel_version}/{cfg.arch}/modules" - container_outdir = f"/work/mkosi.output/initramfs/{cfg.kernel_version}/{cfg.arch}" - docker.run_mkosi( - cfg, - f"--extra-tree={container_tree}", - f"--extra-tree={container_modules}", - f"--output-dir={container_outdir}", - "summary", - logger=slog, - ) - case "native": - run( - [ - "mkosi", - f"--architecture={cfg.arch_info.mkosi_arch}", - f"--extra-tree={tools_tree}", - f"--extra-tree={modules_tree}", - f"--output-dir={output_dir}", - "summary", - ], - cwd=cfg.project_dir, - ) - case "skip": - slog.err("Cannot show mkosi summary when MKOSI_MODE=skip.") - raise SystemExit(1) - - -def _cmd_checksums(cfg: Config, _extra_args: list[str], args: object = None) -> None: - """Compute SHA-256 checksums for the specified files.""" - clog = for_stage("checksums") - files = getattr(args, "files", None) or [] - output = getattr(args, "output", None) - - if files: - # Explicit mode: user provided specific files and output. - if not output: - clog.err("--output is required when specifying files explicitly.") - raise SystemExit(1) - artifacts.collect_checksums( - [Path(f) for f in files], - Path(output), - logger=clog, - ) - else: - # Default mode: produce checksums for the selected architecture. - out = cfg.output_dir - oarch = cfg.arch_info.output_arch - kver = cfg.kernel_version - arch_files = [ - out / f"vmlinuz-{kver}-{oarch}", - out / f"initramfs-{kver}-{oarch}", - out / f"captainos-{kver}-{oarch}.iso", - ] - existing = [f for f in arch_files if f.is_file()] - if not existing: - clog.err(f"No artifacts found for {kver}-{oarch} in {out}") - raise SystemExit(1) - dest = Path(output) if output else out / f"sha256sums-{kver}-{oarch}.txt" - artifacts.collect_checksums(existing, dest, logger=clog) - clog.log("Checksums complete!") - - -def _cmd_qemu_test(cfg: Config, _extra_args: list[str], args: object = None) -> None: - """Boot the image in QEMU for testing.""" - qemu.run_qemu(cfg, args=args) # type: ignore[arg-type] diff --git a/captain/cli/_iso.py b/captain/cli/_iso.py new file mode 100644 index 0000000..2e72b44 --- /dev/null +++ b/captain/cli/_iso.py @@ -0,0 +1,58 @@ +"""``captain iso`` — build a bootable ISO image for the specified flavor and architecture.""" + +from __future__ import annotations + +import logging + +import click + +import captain.flavor +from captain import artifacts +from captain.cli._main import CliContext, cli +from captain.cli._stages import ( + _build_iso_stage, +) + +log = logging.getLogger(__name__) + + +@cli.command( + "iso", + short_help="Build ISO image only. Part of build.", +) +@click.option( + "--iso-mode", + envvar="ISO_MODE", + default="docker", + show_default=True, + type=click.Choice(["docker", "native", "skip"], case_sensitive=False), + metavar="MODE", + help="ISO build stage execution mode (docker, native, skip).", +) +@click.option( + "--force-iso", + envvar="FORCE_ISO", + is_flag=True, + default=False, + help="Force ISO rebuild even if outputs already exist.", +) +@click.pass_obj +def build_cmd( + cli_ctx: CliContext, + *, + iso_mode: str, + force_iso: bool, +) -> None: + """Run the CaptainOS ISO build.""" + + cfg = cli_ctx.make_config( + iso_mode=iso_mode, + force_iso=force_iso, + ) + + # Instantiate the flavor + captain.flavor.create_and_setup_flavor_for_id(cfg.flavor_id, cfg) + + _build_iso_stage(cfg) + artifacts.collect_iso(cfg) + log.info("ISO build complete!!!") diff --git a/captain/cli/_main.py b/captain/cli/_main.py index c9c7575..664007e 100644 --- a/captain/cli/_main.py +++ b/captain/cli/_main.py @@ -1,144 +1,187 @@ -"""CLI entry point — single configargparse parser with pre-extracted subcommand. - -Every configuration parameter is both a ``--cli-flag`` and an environment -variable, following the ff priority model: - - CLI args > environment variables > defaults - -The subcommand (``build``, ``kernel``, ``tools``, …) is extracted from -``sys.argv`` *before* parsing so that flags work in any position:: - - ./build.py --arch=arm64 kernel # works - ./build.py kernel --arch=arm64 # also works - ARCH=arm64 ./build.py kernel # also works -""" +"""Click CLI — main group with shared options and subcommand registration.""" from __future__ import annotations +import logging import sys +from dataclasses import dataclass from pathlib import Path +from typing import Any + +import click +from trogon import tui + +from captain.config import DEFAULT_FLAVOR_ID, Config +from captain.flavor import list_available_flavors +from captain.util import detect_current_machine_arch + +log = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# CLI context object — shared state for all subcommands +# --------------------------------------------------------------------------- + + +@dataclass(slots=True) +class CliContext: + """Resolved common CLI options, passed to subcommands via ``@click.pass_obj``.""" + + project_dir: Path + arch: str + flavor_id: str + builder_registry: str | None + builder_repository: str | None + builder_image: str + + def make_config(self, **overrides: Any) -> Config: + """Build a :class:`Config` from the common options plus per-command *overrides*.""" + return Config( + project_dir=self.project_dir, + output_dir=self.project_dir / "out", + arch=self.arch, + flavor_id=self.flavor_id, + builder_registry=self.builder_registry, + builder_repository=self.builder_repository, + builder_image=self.builder_image, + **overrides, + ) + + +# --------------------------------------------------------------------------- +# Resolve project directory +# --------------------------------------------------------------------------- + + +def resolve_project_dir(project_dir: str | None) -> Path: + """Return an absolute ``Path`` for the project root.""" + if project_dir is not None: + return Path(project_dir) + # Walk upward from this file until we find pyproject.toml. + candidate = Path(__file__).resolve().parent.parent.parent + if (candidate / "pyproject.toml").is_file(): + return candidate + click.echo("Error: cannot auto-detect project directory. Pass --project-dir.", err=True) + sys.exit(1) + + +# --------------------------------------------------------------------------- +# Top-level Click group +# --------------------------------------------------------------------------- +# Important: decorator order matters here. The @tui() decorator must be outermost +# to properly wrap the entire CLI, including subcommands. + +CONTEXT_SETTINGS = dict( + help_option_names=["-h", "--help"], + max_content_width=120, +) + -from captain import docker -from captain.config import Config -from captain.log import for_stage -from captain.util import run - -from ._commands import ( - _cmd_build, - _cmd_checksums, - _cmd_clean, - _cmd_initramfs, - _cmd_iso, - _cmd_kernel, - _cmd_qemu_test, - _cmd_shell, - _cmd_summary, - _cmd_tools, +@tui() +@click.group( + context_settings=CONTEXT_SETTINGS, + invoke_without_command=True, + help=( + "CaptainOS build system.\n\n" + "Run 'captain COMMAND --help' for details on each subcommand.\n\n" + "Shell completion (bash/zsh):\n\n" + ' eval "$(_CAPTAIN_COMPLETE=bash_source captain)" # bash\n\n' + ' eval "$(_CAPTAIN_COMPLETE=zsh_source captain)" # zsh' + ), +) +@click.version_option(package_name="captain") +@click.option( + "-v", + "--verbose", + is_flag=True, + default=False, + envvar="CAPTAIN_VERBOSE", + help="Enable verbose (DEBUG-level) logging.", +) +@click.option( + "--arch", + envvar="ARCH", + default=(detect_current_machine_arch()), + show_default=True, + type=click.Choice(["amd64", "arm64"], case_sensitive=False), + metavar="ARCH", + help="Target architecture (amd64, arm64).", +) +@click.option( + "--flavor-id", + envvar="FLAVOR_ID", + default=DEFAULT_FLAVOR_ID, + show_default=True, + type=click.Choice(list_available_flavors(), case_sensitive=False), + help="Flavor (kernel/board config) to build.", +) +@click.option( + "--project-dir", + envvar="CAPTAIN_PROJECT_DIR", + default=None, + type=click.Path(exists=True, file_okay=False, resolve_path=True), + help="Project root directory (auto-detected when omitted).", +) +@click.option( + "--builder-registry", + envvar="REGISTRY", + default="ghcr.io", + show_default=True, + help="OCI registry hostname for the Docker builder image", +) +@click.option( + "--builder-repository", + envvar="GITHUB_REPOSITORY", + default="tinkerbell/captain", + show_default=True, + help="Repository path (owner/name) for the Docker builder image", +) +@click.option( + "--builder-image", + envvar="BUILDER_IMAGE", + default="captainos-builder", + show_default=True, + help="Local name/tag of Docker builder image name", ) -from ._parser import _build_parser, _extract_command -from ._release import _cmd_release - - -def main(project_dir: Path | None = None) -> None: - """Main CLI entry point.""" - - # 1. Extract the subcommand from argv before parsing so flags - # work in any position (before or after the command name). - raw_argv = sys.argv[1:] - command, flag_argv = _extract_command(raw_argv) - - # For release subcommands, defer -h/--help to _cmd_release so it - # can print subcommand-specific help instead of the generic release help. - # We defer whenever there's any positional token (not just valid ones), - # so that invalid subcommands like "push" show the proper error instead - # of the parent help. - help_deferred = False - if command == "release": - has_positional = any(not tok.startswith("-") for tok in flag_argv) - has_help = "-h" in flag_argv or "--help" in flag_argv - if has_positional and has_help: - flag_argv = [t for t in flag_argv if t not in ("-h", "--help")] - help_deferred = True - - # 2. Build the parser (TINK flags added only for qemu-test). - parser = _build_parser(command) - - # 3. Parse known args — anything unrecognised passes through to mkosi. - args, extra = parser.parse_known_args(flag_argv) - if help_deferred: - extra.append("--help") - - # 4. Separate --force (mkosi passthrough) from the rest. - mkosi_args: list[str] = [] - if getattr(args, "force", False): - mkosi_args.append("--force") - - # 5. Determine project directory. - if project_dir is None: - project_dir = Path(__file__).resolve().parent.parent.parent - - # 6. Build Config from the parsed namespace. - cfg = Config.from_args(args, project_dir) - cfg.mkosi_args = mkosi_args - - # 7. Dispatch. - dispatch: dict[str, object] = { - "build": _cmd_build, - "kernel": _cmd_kernel, - "tools": _cmd_tools, - "initramfs": _cmd_initramfs, - "iso": _cmd_iso, - "checksums": _cmd_checksums, - "shell": _cmd_shell, - "clean": _cmd_clean, - "release": _cmd_release, - "summary": _cmd_summary, - "qemu-test": _cmd_qemu_test, - } - - handler = dispatch.get(command) - if handler is not None: - if command in ("qemu-test", "checksums", "release", "clean"): - handler(cfg, extra, args=args) # type: ignore[operator] - else: - handler(cfg, extra) # type: ignore[operator] - else: - # Pass through to mkosi (shouldn't happen with _extract_command - # but kept as a safety net). - mlog = for_stage("mkosi") - tools_tree = str(cfg.tools_output) - modules_tree = str(cfg.modules_output) - output_dir = str(cfg.initramfs_output) - match cfg.mkosi_mode: - case "docker": - docker.build_builder(cfg, logger=mlog) - container_tree = f"/work/mkosi.output/tools/{cfg.arch}" - container_modules = ( - f"/work/mkosi.output/kernel/{cfg.kernel_version}/{cfg.arch}/modules" - ) - container_outdir = f"/work/mkosi.output/initramfs/{cfg.kernel_version}/{cfg.arch}" - docker.run_mkosi( - cfg, - f"--extra-tree={container_tree}", - f"--extra-tree={container_modules}", - f"--output-dir={container_outdir}", - command, - *extra, - logger=mlog, - ) - case "native": - run( - [ - "mkosi", - f"--architecture={cfg.arch_info.mkosi_arch}", - f"--extra-tree={tools_tree}", - f"--extra-tree={modules_tree}", - f"--output-dir={output_dir}", - command, - *extra, - ], - cwd=cfg.project_dir, - ) - case "skip": - mlog.err(f"Cannot pass '{command}' to mkosi when MKOSI_MODE=skip.") - raise SystemExit(1) +@click.pass_context +def cli( + ctx: click.Context, + *, + verbose: bool, + arch: str, + flavor_id: str, + project_dir: str | None, + builder_registry: str | None, + builder_repository: str | None, + builder_image: str, +) -> None: + """CaptainOS build system — click CLI.""" + # Configure log level based on --verbose. + logging.getLogger().setLevel(logging.DEBUG if verbose else logging.INFO) + + if ctx.invoked_subcommand is None: + click.echo(ctx.get_help()) + return + + # Build the shared context object for subcommands. + ctx.obj = CliContext( + project_dir=resolve_project_dir(project_dir), + arch=arch, + flavor_id=flavor_id, + builder_registry=builder_registry, + builder_repository=builder_repository, + builder_image=builder_image, + ) + + +# --------------------------------------------------------------------------- +# Register subcommands (imported lazily to avoid circular imports) +# --------------------------------------------------------------------------- + + +def main() -> None: + """Console-script entry point.""" + # Import subcommand modules to register them on the group. + from captain.cli import _build, _builder, _iso, _release_publish, _tools # noqa: F401 + + cli() diff --git a/captain/cli/_parser.py b/captain/cli/_parser.py deleted file mode 100644 index 4e6d129..0000000 --- a/captain/cli/_parser.py +++ /dev/null @@ -1,583 +0,0 @@ -"""CLI parser infrastructure — formatter, constants, and flag definitions.""" - -from __future__ import annotations - -import argparse -import shutil -import sys -from collections.abc import Callable, Iterable - -import configargparse - -from captain.config import DEFAULT_KERNEL_VERSION - -# --------------------------------------------------------------------------- -# Known subcommands (order matters for help text) -# --------------------------------------------------------------------------- - -COMMANDS: dict[str, str] = { - "build": "Run all build stages: kernel → tools → initramfs → iso (default)", - "kernel": "Build only the kernel + modules", - "tools": "Download tools (containerd, runc, nerdctl, CNI)", - "initramfs": "Build only the initramfs via mkosi", - "iso": "Build a UEFI-bootable ISO image", - "checksums": "Compute SHA-256 checksums for specified files", - "release": "OCI artifact operations (publish, pull, tag)", - "shell": "Interactive shell inside the builder container", - "clean": "Remove build artifacts (per kernel version or all)", - "summary": "Print mkosi configuration summary", - "qemu-test": "Boot the image in QEMU for testing", -} - -VALID_MODES = ("docker", "native", "skip") - -# Boolean (store_true) flags — these do NOT consume the next token as a value. -# Used by _extract_command to avoid treating a flag value as a subcommand. -_BOOLEAN_FLAGS = frozenset( - { - "--all", - "--force-kernel", - "--force-tools", - "--force-iso", - "--force", - "--no-cache", - "-h", - "--help", - } -) - - -class _HelpFormatter(argparse.RawDescriptionHelpFormatter): - """Clean help: word-wrapped text, raw epilog, no env-var refs, - no defaults when the value is empty / None / False, and a short - usage line.""" - - def _get_help_string(self, action: argparse.Action) -> str: - """Append ``(default: X)`` only when X is meaningful.""" - text = action.help or "" - if action.default in (None, "", False, argparse.SUPPRESS): - return text - if "%(default)" not in text: - text += " (default: %(default)s)" - return text - - def _format_usage( - self, - usage: str | None, - actions: Iterable[argparse.Action], - groups: Iterable[argparse._MutuallyExclusiveGroup], - prefix: str | None, - ) -> str: - """Show a short usage line with the command placeholder.""" - prog = self._prog - # Top-level ("build.py") and release ("build.py release") have subcommands. - if prog in ("build.py", "build.py release"): - return f"usage: {prog} [command] [flags]\n\n" - if prog == "build.py release tag": - return f"usage: {prog} [flags]\n\n" - return f"usage: {prog} [flags]\n\n" - - -def _extract_command(argv: list[str]) -> tuple[str, list[str]]: - """Remove and return the first recognised subcommand from *argv*. - - Returns ``("build", argv)`` when no subcommand is found. - - The scanner skips tokens that are likely flag *values* (the token - immediately after a ``--flag`` that is not boolean and does not use - ``=`` syntax). This prevents ``--builder-image build`` from - incorrectly extracting ``build`` as the subcommand. - """ - prev_was_value_flag = False - for i, tok in enumerate(argv): - if tok.startswith("-"): - # Boolean flags don't consume the next token. - prev_was_value_flag = False if "=" in tok else tok not in _BOOLEAN_FLAGS - continue - if prev_was_value_flag: - # This token is the value of the preceding flag — skip it. - prev_was_value_flag = False - continue - # Standalone positional token — check if it's a command. - if tok in COMMANDS: - return tok, argv[:i] + argv[i + 1 :] - # Unknown positional token — not a recognised command. - valid = ", ".join(COMMANDS) - print( - f"error: unknown command '{tok}'\nvalid commands: {valid}", - file=sys.stderr, - ) - raise SystemExit(2) - return "build", list(argv) - - -def _build_parser(command: str) -> configargparse.ArgParser: - """Construct a command-specific CLI parser. - - Only the flags relevant to *command* are added, so ``--help`` - shows a focused help message. - """ - - # -- description & epilog ------------------------------------------ - if command == "build": - desc = "Build CaptainOS images. Stages: kernel → tools → initramfs → iso." - commands_list = "\n".join(f" {name:14s} {d}" for name, d in COMMANDS.items()) - epilog = f"""\ -commands: -{commands_list} -""" - elif command == "release": - desc = "OCI release workflow: pull (or build) → publish → tag" - release_cmds = { - "publish": "Publish artifacts as a multi-arch OCI image", - "pull": "Pull and extract artifacts (amd64, arm64, or combined)", - "tag": "Tag all artifact images with a version", - } - commands_list = "\n".join(f" {name:14s} {d}" for name, d in release_cmds.items()) - epilog = f"""\ -commands: -{commands_list} -""" - else: - desc = COMMANDS.get(command, command) - epilog = None - - # Adapt to the real terminal width so argparse wraps at word - # boundaries instead of letting the terminal hard-wrap mid-word. - # max_help_position=38 accommodates the widest flag+metavar. - columns = shutil.get_terminal_size().columns - - parser = configargparse.ArgParser( - prog=f"build.py {command}" if command != "build" else "build.py", - description=desc, - epilog=epilog, - add_env_var_help=False, - formatter_class=lambda prog: _HelpFormatter( - prog, - max_help_position=38, - width=columns, - ), - ) - - # -- Add only the flag groups relevant to this command ------------- - for adder in _COMMAND_FLAGS.get(command, []): - adder(parser) - - return parser - - -# --------------------------------------------------------------------------- -# Flag-group helpers — each adds one argument group to the parser -# --------------------------------------------------------------------------- - - -def _add_common_flags(parser: configargparse.ArgParser) -> None: - """--arch, --builder-image, --no-cache""" - g = parser.add_argument_group("build configuration") - g.add_argument( - "--arch", - env_var="ARCH", - default="amd64", - choices=["amd64", "arm64"], - help="target architecture", - ) - g.add_argument( - "--builder-image", - env_var="BUILDER_IMAGE", - metavar="IMAGE", - default="captainos-builder", - help="Docker builder image name", - ) - g.add_argument( - "--no-cache", - env_var="NO_CACHE", - action="store_true", - default=False, - help="rebuild builder image without Docker cache", - ) - - -def _add_kernel_flags(parser: configargparse.ArgParser) -> None: - """--kernel-version, --kernel-src, --kernel-mode, --force-kernel""" - g = parser.add_argument_group("kernel") - g.add_argument( - "--kernel-version", - env_var="KERNEL_VERSION", - metavar="VER", - default=DEFAULT_KERNEL_VERSION, - help="kernel version to build", - ) - g.add_argument( - "--kernel-config", - env_var="KERNEL_CONFIG", - metavar="PATH", - default=None, - help="path to kernel config file (overrides auto-detection from kernel.configs/)", - ) - g.add_argument( - "--kernel-src", - env_var="KERNEL_SRC", - metavar="PATH", - default=None, - help="path to local kernel source tree", - ) - g.add_argument( - "--kernel-mode", - env_var="KERNEL_MODE", - default="docker", - choices=list(VALID_MODES), - help="kernel stage execution mode", - ) - g.add_argument( - "--force-kernel", - env_var="FORCE_KERNEL", - action="store_true", - default=False, - help="force kernel rebuild even if outputs exist", - ) - - -def _add_tools_flags(parser: configargparse.ArgParser) -> None: - """--tools-mode, --force-tools""" - g = parser.add_argument_group("tools") - g.add_argument( - "--tools-mode", - env_var="TOOLS_MODE", - default="docker", - choices=list(VALID_MODES), - help="tools stage execution mode", - ) - g.add_argument( - "--force-tools", - env_var="FORCE_TOOLS", - action="store_true", - default=False, - help="re-download tools even if outputs exist", - ) - - -def _add_mkosi_flags(parser: configargparse.ArgParser) -> None: - """--mkosi-mode, --force (mkosi passthrough)""" - g = parser.add_argument_group("initramfs (mkosi)") - g.add_argument( - "--mkosi-mode", - env_var="MKOSI_MODE", - default="docker", - choices=list(VALID_MODES), - help="mkosi stage execution mode", - ) - g.add_argument( - "--force", - action="store_true", - default=False, - help="passed through to mkosi as --force", - ) - - -def _add_summary_flags(parser: configargparse.ArgParser) -> None: - """--mkosi-mode only (no --force).""" - g = parser.add_argument_group("mkosi") - g.add_argument( - "--mkosi-mode", - env_var="MKOSI_MODE", - default="docker", - choices=list(VALID_MODES), - help="mkosi stage execution mode", - ) - - -def _add_iso_flags(parser: configargparse.ArgParser) -> None: - """--iso-mode, --force-iso""" - g = parser.add_argument_group("iso") - g.add_argument( - "--iso-mode", - env_var="ISO_MODE", - default="docker", - choices=list(VALID_MODES), - help="iso stage execution mode", - ) - g.add_argument( - "--force-iso", - env_var="FORCE_ISO", - action="store_true", - default=False, - help="force ISO rebuild even if outputs exist", - ) - - -def _add_mode_flags(parser: configargparse.ArgParser) -> None: - """All four --*-mode flags (used by 'shell' which checks needs_docker).""" - g = parser.add_argument_group("stage modes") - for stage in ("kernel", "tools", "mkosi", "iso"): - g.add_argument( - f"--{stage}-mode", - env_var=f"{stage.upper()}_MODE", - default="docker", - choices=list(VALID_MODES), - help=f"{stage} stage execution mode", - ) - - -def _add_clean_flags(parser: configargparse.ArgParser) -> None: - """--all flag for the clean command.""" - g = parser.add_argument_group("clean") - g.add_argument( - "--all", - env_var="CLEAN_ALL", - action="store_true", - default=False, - dest="clean_all", - help="remove ALL build artifacts instead of just the selected kernel version", - ) - - -def _add_checksums_flags(parser: configargparse.ArgParser) -> None: - """--output and positional file arguments for the checksums command.""" - g = parser.add_argument_group("checksums") - g.add_argument( - "--output", - "-o", - metavar="FILE", - default=None, - help="path to write the checksum file (default: out/sha256sums-{arch}.txt)", - ) - g.add_argument( - "files", - nargs="*", - metavar="FILE", - help="files to checksum (default: standard release artifacts in out/)", - ) - - -def _add_release_flags(parser: configargparse.ArgParser) -> None: - """--release-mode and OCI registry flags for the release command.""" - _add_release_base_flags(parser) - _add_release_target_flag(parser) - _add_release_pull_output(parser) - - -def _add_release_base_flags(parser: configargparse.ArgParser) -> None: - """Core release flags shared by all release subcommands.""" - g = parser.add_argument_group("release") - g.add_argument( - "--release-mode", - env_var="RELEASE_MODE", - default="native", - choices=list(VALID_MODES), - metavar="MODE", - help="release stage execution mode", - ) - - g = parser.add_argument_group("OCI registry") - g.add_argument( - "--registry", - env_var="REGISTRY", - metavar="HOST", - default="ghcr.io", - help="OCI registry hostname", - ) - g.add_argument( - "--repository", - env_var="GITHUB_REPOSITORY", - metavar="OWNER/NAME", - default="tinkerbell/captain", - help="repository (owner/name)", - ) - g.add_argument( - "--oci-artifact-name", - env_var="OCI_ARTIFACT_NAME", - metavar="NAME", - default="artifacts", - help="OCI artifact image name", - ) - - -def _add_release_target_flag(parser: configargparse.ArgParser) -> None: - """--target flag for publish and pull (not tag).""" - g = parser.add_argument_group("target") - g.add_argument( - "--target", - env_var="TARGET", - default=None, - choices=["amd64", "arm64", "combined"], - help="artifact target (amd64, arm64, or combined; default: --arch value)", - ) - g.add_argument( - "--git-sha", - env_var="GITHUB_SHA", - metavar="SHA", - default=None, - help="git commit SHA (default: from git rev-parse HEAD)", - ) - g.add_argument( - "--version-exclude", - env_var="VERSION_EXCLUDE", - metavar="TAG", - default=None, - help="tag to exclude from git-describe version lookup", - ) - g.add_argument( - "--force", - env_var="FORCE", - action="store_true", - default=False, - help="publish even if the image already exists in the registry", - ) - - -def _add_release_pull_output(parser: configargparse.ArgParser) -> None: - """--pull-output flag (only relevant for 'release pull').""" - g = parser.add_argument_group("pull") - g.add_argument( - "--pull-output", - metavar="DIR", - default=None, - help="output directory for pulled artifacts", - ) - - -def _add_release_tag_version(parser: configargparse.ArgParser) -> None: - """Positional argument for 'release tag'.""" - parser.add_argument( - "version", - nargs="?", - default=None, - help="version tag to apply (e.g. v1.0.0)", - ) - - -def _add_qemu_flags(parser: configargparse.ArgParser) -> None: - """--qemu-append, --qemu-mem, --qemu-smp""" - g = parser.add_argument_group("qemu") - g.add_argument( - "--qemu-append", - env_var="QEMU_APPEND", - metavar="ARGS", - default="", - help="extra kernel cmdline args for qemu-test", - ) - g.add_argument( - "--qemu-mem", - env_var="QEMU_MEM", - metavar="SIZE", - default="2G", - help="QEMU RAM size", - ) - g.add_argument( - "--qemu-smp", - env_var="QEMU_SMP", - metavar="N", - default="2", - help="QEMU CPU count", - ) - - -def _add_tink_flags(parser: configargparse.ArgParser) -> None: - """Tinkerbell kernel cmdline flags + --ipam.""" - g = parser.add_argument_group("tinkerbell") - g.add_argument( - "--tink-worker-image", - env_var="TINK_WORKER_IMAGE", - metavar="IMAGE", - default="ghcr.io/tinkerbell/tink-agent:latest", - help="tink-agent container image reference", - ) - g.add_argument( - "--tink-docker-registry", - env_var="TINK_DOCKER_REGISTRY", - metavar="HOST", - default="", - help="registry host (triggers tink-agent services)", - ) - g.add_argument( - "--tink-grpc-authority", - env_var="TINK_GRPC_AUTHORITY", - metavar="ADDR", - default="", - help="tink-server gRPC endpoint (host:port)", - ) - g.add_argument( - "--tink-worker-id", - env_var="TINK_WORKER_ID", - metavar="ID", - default="", - help="machine / worker ID", - ) - g.add_argument( - "--tink-tls", - env_var="TINK_TLS", - metavar="BOOL", - default="false", - help="enable TLS to tink-server", - ) - g.add_argument( - "--tink-insecure-tls", - env_var="TINK_INSECURE_TLS", - metavar="BOOL", - default="true", - help="allow insecure TLS", - ) - g.add_argument( - "--tink-insecure-registries", - env_var="TINK_INSECURE_REGISTRIES", - metavar="LIST", - default="", - help="comma-separated insecure registries", - ) - g.add_argument( - "--tink-registry-username", - env_var="TINK_REGISTRY_USERNAME", - metavar="USER", - default="", - help="registry auth username", - ) - g.add_argument( - "--tink-registry-password", - env_var="TINK_REGISTRY_PASSWORD", - metavar="PASS", - default="", - help="registry auth password", - ) - g.add_argument( - "--tink-syslog-host", - env_var="TINK_SYSLOG_HOST", - metavar="HOST", - default="", - help="remote syslog host", - ) - g.add_argument( - "--tink-facility", - env_var="TINK_FACILITY", - metavar="CODE", - default="", - help="facility code", - ) - g.add_argument( - "--ipam", - env_var="IPAM", - metavar="PARAM", - default="", - help="static networking IPAM parameter", - ) - - -# Map command → list of flag-group adders. -_COMMAND_FLAGS: dict[str, list[Callable[..., None]]] = { - "build": [ - _add_common_flags, - _add_kernel_flags, - _add_tools_flags, - _add_mkosi_flags, - _add_iso_flags, - ], - "kernel": [_add_common_flags, _add_kernel_flags], - "tools": [_add_common_flags, _add_tools_flags], - "initramfs": [_add_common_flags, _add_kernel_flags, _add_mkosi_flags], - "iso": [_add_common_flags, _add_kernel_flags, _add_iso_flags], - "checksums": [_add_common_flags, _add_kernel_flags, _add_checksums_flags], - "release": [_add_common_flags, _add_kernel_flags, _add_release_flags], - "shell": [_add_common_flags], - "clean": [_add_common_flags, _add_kernel_flags, _add_clean_flags], - "summary": [_add_common_flags, _add_kernel_flags, _add_summary_flags], - "qemu-test": [_add_common_flags, _add_kernel_flags, _add_qemu_flags, _add_tink_flags], -} diff --git a/captain/cli/_release.py b/captain/cli/_release.py deleted file mode 100644 index 225612b..0000000 --- a/captain/cli/_release.py +++ /dev/null @@ -1,234 +0,0 @@ -"""Release subcommand — publish, pull, tag.""" - -from __future__ import annotations - -import shutil -import subprocess -from pathlib import Path - -import configargparse - -from captain import docker, oci -from captain.config import Config -from captain.log import for_stage -from captain.util import check_release_dependencies - -from ._parser import ( - _add_common_flags, - _add_kernel_flags, - _add_release_base_flags, - _add_release_pull_output, - _add_release_tag_version, - _add_release_target_flag, - _HelpFormatter, -) - -_RELEASE_SUBCOMMANDS = ("publish", "pull", "tag") - -_RELEASE_SUBCMD_INFO: dict[str, tuple[str, list]] = { - "publish": ( - "Publish artifacts as a multi-arch OCI image", - [_add_common_flags, _add_kernel_flags, _add_release_base_flags, _add_release_target_flag], - ), - "pull": ( - "Pull and extract artifacts (amd64, arm64, or combined)", - [ - _add_common_flags, - _add_kernel_flags, - _add_release_base_flags, - _add_release_target_flag, - _add_release_pull_output, - ], - ), - "tag": ( - "Tag all artifact images with a version", - [_add_common_flags, _add_kernel_flags, _add_release_base_flags, _add_release_tag_version], - ), -} - - -def _print_release_subcmd_help(sub: str, *, exit_code: int = 0) -> None: - """Print help for a release subcommand and exit.""" - desc, adders = _RELEASE_SUBCMD_INFO[sub] - columns = shutil.get_terminal_size().columns - parser = configargparse.ArgParser( - prog=f"build.py release {sub}", - description=desc, - add_env_var_help=False, - formatter_class=lambda prog: _HelpFormatter( - prog, - max_help_position=38, - width=columns, - ), - ) - for adder in adders: - adder(parser) - parser.print_help() - raise SystemExit(exit_code) - - -def _resolve_git_sha(args: object, project_dir: Path) -> str: - """Return the git SHA from args or by running git rev-parse.""" - sha = getattr(args, "git_sha", None) - if sha: - return sha - - result = subprocess.run( - ["git", "rev-parse", "HEAD"], - capture_output=True, - text=True, - check=True, - cwd=project_dir, - ) - return result.stdout.strip() - - -def _cmd_release(cfg: Config, extra_args: list[str], args: object = None) -> None: - """OCI artifact operations: publish, pull, tag.""" - rlog = for_stage("release") - - # Peel the release subcommand from extra_args. - if not extra_args: - rlog.err( - f"Missing release subcommand.\n" - f" usage: build.py release {{{','.join(_RELEASE_SUBCOMMANDS)}}}\n" - ) - raise SystemExit(2) - - sub = extra_args[0] - rest = extra_args[1:] - - if sub not in _RELEASE_SUBCOMMANDS: - rlog.err( - f"Unknown release subcommand '{sub}'.\n valid: {', '.join(_RELEASE_SUBCOMMANDS)}\n" - ) - raise SystemExit(2) - - # Handle --help / -h for the subcommand. - if "-h" in rest or "--help" in rest: - _print_release_subcmd_help(sub) - - # --- validate required args early --------------------------------- - if sub == "tag" and not rest: - rlog.err("Missing version argument.") - _print_release_subcmd_help(sub, exit_code=2) - if sub == "pull" and not getattr(args, "pull_output", None): - rlog.err("--pull-output is required for 'release pull'.") - _print_release_subcmd_help(sub, exit_code=2) - - # --- skip --------------------------------------------------------- - if cfg.release_mode == "skip": - rlog.log("RELEASE_MODE=skip — skipping release operation") - return - - # --- docker ------------------------------------------------------- - if cfg.release_mode == "docker": - docker.build_release_image(cfg, logger=rlog) - rlog.log(f"Running release {sub} (docker)...") - # Forward release-specific env vars into the container. - registry = getattr(args, "registry", "ghcr.io") - repository = getattr(args, "repository", "tinkerbell/captain") - artifact_name = getattr(args, "oci_artifact_name", "artifacts") - sha = _resolve_git_sha(args, cfg.project_dir) - env_args: list[str] = [ - "-e", - f"KERNEL_VERSION={cfg.kernel_version}", - "-e", - f"REGISTRY={registry}", - "-e", - f"GITHUB_REPOSITORY={repository}", - "-e", - f"OCI_ARTIFACT_NAME={artifact_name}", - "-e", - f"GITHUB_SHA={sha}", - ] - exclude = getattr(args, "version_exclude", None) - if exclude: - env_args += ["-e", f"VERSION_EXCLUDE={exclude}"] - if sub in ("publish", "pull"): - target = getattr(args, "target", None) or cfg.arch - env_args += ["-e", f"TARGET={target}"] - if getattr(args, "force", False): - env_args += ["-e", "FORCE=true"] - pull_output = getattr(args, "pull_output", None) - - # Build the inner command. - inner_cmd = ["/work/build.py", "release", sub] - if pull_output: - inner_cmd += ["--pull-output", pull_output] - inner_cmd += list(rest) - - try: - docker.run_in_release( - cfg, - *env_args, - "--entrypoint", - "python3", - docker.RELEASE_IMAGE, - *inner_cmd, - ) - except subprocess.CalledProcessError as exc: - raise SystemExit(exc.returncode) from None - paths_to_fix = ["/work/out"] - if pull_output: - container_pull_output = f"/work/{pull_output.lstrip('/')}" - paths_to_fix.append(container_pull_output) - docker.fix_docker_ownership(cfg, rlog, paths_to_fix) - return - - # --- native ------------------------------------------------------- - if cfg.release_mode == "native": - missing = check_release_dependencies() - if missing: - rlog.err(f"Missing release tools: {', '.join(missing)}") - rlog.err("Install them or set --release-mode=docker.") - raise SystemExit(1) - # Common OCI parameters. - registry = getattr(args, "registry", "ghcr.io") - repository = getattr(args, "repository", "tinkerbell/captain") - artifact_name = getattr(args, "oci_artifact_name", "artifacts") - exclude = getattr(args, "version_exclude", None) - sha = _resolve_git_sha(args, cfg.project_dir) - tag = oci.compute_version_tag(cfg.project_dir, sha, exclude=exclude) - - if sub == "publish": - target = getattr(args, "target", None) or cfg.arch - force = getattr(args, "force", False) - oci.publish( - cfg, - target=target, - registry=registry, - repository=repository, - artifact_name=artifact_name, - tag=tag, - sha=sha, - force=force, - logger=rlog, - ) - - elif sub == "pull": - target = getattr(args, "target", None) or cfg.arch - pull_output = getattr(args, "pull_output", None) - if pull_output is None: - rlog.err("--pull-output is required for 'release pull'.") - raise SystemExit(2) - oci.pull( - registry=registry, - repository=repository, - artifact_name=artifact_name, - tag=tag, - target=target, - output_dir=Path(pull_output), - logger=rlog, - ) - - elif sub == "tag": - version = rest[0] - oci.tag_all( - registry=registry, - repository=repository, - artifact_name=artifact_name, - src_tag=tag, - new_tag=version, - logger=rlog, - ) diff --git a/captain/cli/_release_publish.py b/captain/cli/_release_publish.py new file mode 100644 index 0000000..daa4b66 --- /dev/null +++ b/captain/cli/_release_publish.py @@ -0,0 +1,209 @@ +"""``captain release-publish`` — publish artifacts as OCI images via buildah.""" + +from __future__ import annotations + +import logging +import subprocess +from pathlib import Path + +import click + +import captain.flavor +from captain import oci +from captain.cli._main import CliContext, cli +from captain.util import check_release_dependencies + +log = logging.getLogger(__name__) + + +@cli.command( + "release-publish", + short_help="Publish build artifacts as a multi-arch OCI image.", +) +@click.option( + "--release-mode", + envvar="RELEASE_MODE", + default="native", + show_default=True, + type=click.Choice(["docker", "native", "skip"], case_sensitive=False), + metavar="MODE", + help="Release stage execution mode (docker, native, skip).", +) +@click.option( + "--registry", + envvar="REGISTRY", + default="ghcr.io", + show_default=True, + help="OCI registry hostname.", +) +@click.option( + "--repository", + envvar="GITHUB_REPOSITORY", + default="tinkerbell/captain", + show_default=True, + help="Repository path (owner/name).", +) +@click.option( + "--oci-artifact-name", + envvar="OCI_ARTIFACT_NAME", + default="artifacts", + show_default=True, + help="OCI artifact image name.", +) +@click.option( + "--target", + envvar="TARGET", + default=None, + type=click.Choice(["amd64", "arm64", "combined"], case_sensitive=False), + metavar="TARGET", + help="Artifact target: amd64, arm64, or combined (default: value of --arch); " + "combined requires trixie-full or equivalent flavor with both arch's outputs present.", +) +@click.option( + "--git-sha", + envvar="GITHUB_SHA", + default=None, + help="Git commit SHA (default: auto-detected via git rev-parse HEAD).", +) +@click.option( + "--version-exclude", + envvar="VERSION_EXCLUDE", + default=None, + help="Tag to exclude from git-describe version lookup.", +) +@click.option( + "--force", + "force", + is_flag=True, + default=False, + help="Publish even if the image already exists in the registry.", +) +@click.pass_obj +def release_publish_cmd( + cli_ctx: CliContext, + *, + release_mode: str, + registry: str, + repository: str, + oci_artifact_name: str, + target: str | None, + git_sha: str | None, + version_exclude: str | None, + force: bool, +) -> None: + """Publish build artifacts as a multi-arch OCI image to a registry. + + Uses buildah to construct OCI images from the build artifacts (kernel, + initramfs, ISO, DTBs) and pushes them to the specified registry. + + Each artifact file becomes its own layer. Deterministic tar generation + ensures byte-identical layers across runs so that registries can + deduplicate blobs. + + \b + Examples + -------- + captain release-publish + captain release-publish --arch arm64 --target arm64 + captain release-publish --target combined --force + captain release-publish --registry ghcr.io --repository tinkerbell/captain + """ + + if target is None: + target = cli_ctx.arch + assert isinstance(target, str) + + cfg = cli_ctx.make_config(release_mode=release_mode) + + # --- skip mode -------------------------------------------------------- + if cfg.release_mode == "skip": + log.info("RELEASE_MODE=skip — nothing to do.") + return + + # --- docker mode ------------------------------------------------------ + if cfg.release_mode == "docker": + from captain import docker + + docker.obtain_builder(cfg) + sha = _resolve_git_sha(git_sha, cfg.project_dir) + + env_args: list[str] = [ + "-e", + f"FLAVOR_ID={cfg.flavor_id}", + "-e", + f"REGISTRY={registry}", + "-e", + f"GITHUB_REPOSITORY={repository}", + "-e", + f"OCI_ARTIFACT_NAME={oci_artifact_name}", + "-e", + f"GITHUB_SHA={sha}", + "-e", + f"TARGET={target}", + ] + if version_exclude: + env_args += ["-e", f"VERSION_EXCLUDE={version_exclude}"] + if force: + env_args += ["-e", "FORCE=true"] + + inner_cmd = ["captain", "release-publish"] + + try: + docker.run_in_builder( + cfg, + *env_args, + "--entrypoint", + "/usr/bin/uv", + cfg.builder_image, + *(["--verbose"] if cfg.verbose_uv else ["--quiet"]), + "run", + *inner_cmd, + ) + except subprocess.CalledProcessError as exc: + raise SystemExit(exc.returncode) from None + docker.fix_docker_ownership(cfg, ["/work/out"]) + return + + # --- native mode ------------------------------------------------------ + missing = check_release_dependencies() + if missing: + log.error("Missing release tools: %s", ", ".join(missing)) + log.error("Install them or set --release-mode=docker.") + raise SystemExit(1) + + sha = _resolve_git_sha(git_sha, cfg.project_dir) + tag = oci.compute_version_tag(cfg.project_dir, sha, exclude=version_exclude) + tag = f"{tag}-{cfg.flavor_id}" + + flavor = captain.flavor.create_and_setup_flavor_for_id(cfg.flavor_id, cfg) + + oci.publish( + cfg, + flavor, + target=target, + registry=registry, + repository=repository, + artifact_name=oci_artifact_name, + tag=tag, + sha=sha, + force=force, + ) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _resolve_git_sha(sha: str | None, project_dir: Path) -> str: + """Return the provided SHA or auto-detect via ``git rev-parse HEAD``.""" + if sha: + return sha + result = subprocess.run( + ["git", "rev-parse", "HEAD"], + capture_output=True, + text=True, + check=True, + cwd=project_dir, + ) + return result.stdout.strip() diff --git a/captain/cli/_stages.py b/captain/cli/_stages.py index 4e359c2..e7082e7 100644 --- a/captain/cli/_stages.py +++ b/captain/cli/_stages.py @@ -1,102 +1,43 @@ -"""Build stage orchestration — kernel, tools, mkosi, ISO.""" +"""Build stage orchestration — tools, mkosi, ISO.""" from __future__ import annotations -from captain import docker, iso, kernel, tools -from captain.config import Config -from captain.log import for_stage -from captain.util import check_kernel_dependencies, check_mkosi_dependencies, run - - -def _build_kernel_stage(cfg: Config) -> None: - """Run the kernel build stage according to *cfg.kernel_mode*.""" - klog = for_stage("kernel") - - # --- skip --------------------------------------------------------- - if cfg.kernel_mode == "skip": - klog.log("KERNEL_MODE=skip — skipping kernel build") - return - - # --- idempotency -------------------------------------------------- - modules_dir = cfg.modules_output / "usr" / "lib" / "modules" - vmlinuz_dir = cfg.kernel_output - has_vmlinuz = vmlinuz_dir.is_dir() and any(vmlinuz_dir.glob("vmlinuz-*")) +import logging - if modules_dir.is_dir() and has_vmlinuz and not cfg.force_kernel: - klog.log("Kernel already built (use --force-kernel to rebuild)") - return - - if modules_dir.is_dir() and not has_vmlinuz: - klog.warn("Modules exist but vmlinuz is missing — rebuilding kernel") - - # --- native ------------------------------------------------------- - if cfg.kernel_mode == "native": - missing = check_kernel_dependencies(cfg.arch) - if missing: - klog.err(f"Missing kernel build tools: {', '.join(missing)}") - klog.err("Install them or set --kernel-mode=docker.") - raise SystemExit(1) - klog.log("Building kernel (native)...") - kernel.build(cfg) - return +from captain import docker, iso, tools +from captain.config import Config +from captain.util import check_mkosi_dependencies, run - # --- docker ------------------------------------------------------- - docker.build_builder(cfg, logger=klog) - klog.log("Building kernel (docker)...") - docker.run_in_builder( - cfg, - "--entrypoint", - "python3", - cfg.builder_image, - "/work/build.py", - "kernel", - ) - docker.fix_docker_ownership( - cfg, - klog, - [ - f"/work/mkosi.output/kernel/{cfg.kernel_version}/{cfg.arch}", - "/work/out", - ], - ) +log = logging.getLogger(__name__) def _build_tools_stage(cfg: Config) -> None: """Run the tools download stage according to *cfg.tools_mode*.""" - tlog = for_stage("tools") # --- skip --------------------------------------------------------- if cfg.tools_mode == "skip": - tlog.log("TOOLS_MODE=skip — skipping tools download") + log.info("TOOLS_MODE=skip — skipping tools download") return # --- native ------------------------------------------------------- if cfg.tools_mode == "native": - tlog.log("Downloading tools (nerdctl, containerd, etc.)...") + log.info("Downloading tools (nerdctl, containerd, etc.) native...") tools.download_all(cfg) return # --- docker ------------------------------------------------------- - docker.build_builder(cfg, logger=tlog) - tlog.log("Downloading tools (nerdctl, containerd, etc.)...") - docker.run_in_builder( - cfg, - "--entrypoint", - "python3", - cfg.builder_image, - "/work/build.py", - "tools", - ) - docker.fix_docker_ownership(cfg, tlog, ["/work/mkosi.output"]) + docker.obtain_builder(cfg) + log.info("Downloading tools (nerdctl, containerd, etc.) docker...") + docker.run_captain_in_builder(cfg, "tools") + docker.fix_docker_ownership(cfg, ["/work/mkosi.output"]) def _build_mkosi_stage(cfg: Config, extra_args: list[str]) -> None: """Run the mkosi image-assembly stage according to *cfg.mkosi_mode*.""" - ilog = for_stage("initramfs") # --- skip --------------------------------------------------------- if cfg.mkosi_mode == "skip": - ilog.log("MKOSI_MODE=skip — skipping image assembly") + log.info("MKOSI_MODE=skip — skipping image assembly") return mkosi_args = list(cfg.mkosi_args) + list(extra_args) @@ -105,19 +46,17 @@ def _build_mkosi_stage(cfg: Config, extra_args: list[str]) -> None: if cfg.mkosi_mode == "native": missing = check_mkosi_dependencies() if missing: - ilog.err(f"Missing mkosi tools: {', '.join(missing)}") - ilog.err("Install them or set --mkosi-mode=docker.") + log.error("Missing mkosi tools: %s", ", ".join(missing)) + log.error("Install them or set --mkosi-mode=docker.") raise SystemExit(1) - ilog.log("Building initrd with mkosi (native)...") + log.info("Building initrd with mkosi (native)...") tools_tree = str(cfg.tools_output) - modules_tree = str(cfg.modules_output) output_dir = str(cfg.initramfs_output) run( [ "mkosi", f"--architecture={cfg.arch_info.mkosi_arch}", f"--extra-tree={tools_tree}", - f"--extra-tree={modules_tree}", f"--output-dir={output_dir}", "build", *mkosi_args, @@ -127,25 +66,22 @@ def _build_mkosi_stage(cfg: Config, extra_args: list[str]) -> None: return # --- docker ------------------------------------------------------- - docker.build_builder(cfg, logger=ilog) - ilog.log("Building initrd with mkosi (docker)...") + docker.obtain_builder(cfg) + log.info("Building initrd with mkosi (docker)...") tools_tree = f"/work/mkosi.output/tools/{cfg.arch}" - modules_tree = f"/work/mkosi.output/kernel/{cfg.kernel_version}/{cfg.arch}/modules" - output_dir = f"/work/mkosi.output/initramfs/{cfg.kernel_version}/{cfg.arch}" - docker.run_mkosi( + output_dir = f"/work/mkosi.output/initramfs/{cfg.flavor_id}/{cfg.arch}" + docker.run_mkosi_in_builder( cfg, f"--extra-tree={tools_tree}", - f"--extra-tree={modules_tree}", f"--output-dir={output_dir}", + "--package-cache-dir=/cache/packages", "build", *mkosi_args, - logger=ilog, ) docker.fix_docker_ownership( cfg, - ilog, [ - f"/work/mkosi.output/initramfs/{cfg.kernel_version}/{cfg.arch}", + f"/work/mkosi.output/initramfs/{cfg.flavor_id}/{cfg.arch}", "/work/out", ], ) @@ -153,39 +89,30 @@ def _build_mkosi_stage(cfg: Config, extra_args: list[str]) -> None: def _build_iso_stage(cfg: Config) -> None: """Run the ISO build stage according to *cfg.iso_mode*.""" - isolog = for_stage("iso") # --- skip --------------------------------------------------------- if cfg.iso_mode == "skip": - isolog.log("ISO_MODE=skip — skipping ISO build") + log.info("ISO_MODE=skip — skipping ISO build") return # --- idempotency -------------------------------------------------- - iso_path = cfg.iso_output / f"captainos-{cfg.kernel_version}-{cfg.arch_info.output_arch}.iso" + iso_path = cfg.iso_output / f"captainos-{cfg.flavor_id}-{cfg.arch_info.output_arch}.iso" if iso_path.is_file() and not cfg.force_iso: - isolog.log(f"ISO already built: {iso_path} (use --force-iso to rebuild)") + log.info("ISO already built: %s (use --force-iso to rebuild)", iso_path) return # --- native ------------------------------------------------------- if cfg.iso_mode == "native": - isolog.log("Building ISO (native)...") + log.info("Building ISO (native)...") iso.build(cfg) return # --- docker ------------------------------------------------------- - docker.build_builder(cfg, logger=isolog) - isolog.log("Building ISO (docker)...") - docker.run_in_builder( - cfg, - "--entrypoint", - "python3", - cfg.builder_image, - "/work/build.py", - "iso", - ) + docker.obtain_builder(cfg) + log.info("Building ISO (docker)...") + docker.run_captain_in_builder(cfg, "iso") docker.fix_docker_ownership( cfg, - isolog, [ "/work/mkosi.output/iso", "/work/out", diff --git a/captain/cli/_tools.py b/captain/cli/_tools.py new file mode 100644 index 0000000..16e0fd3 --- /dev/null +++ b/captain/cli/_tools.py @@ -0,0 +1,63 @@ +"""``captain tools`` — download tools (containerd, runc, nerdctl, CNI plugins).""" + +from __future__ import annotations + +import logging + +import click + +from captain.cli._main import CliContext, cli +from captain.cli._stages import _build_tools_stage + +log = logging.getLogger(__name__) + + +@cli.command( + "tools", + short_help="Download tools (containerd, runc, nerdctl, CNI).", +) +@click.option( + "--tools-mode", + envvar="TOOLS_MODE", + default="docker", + show_default=True, + type=click.Choice(["docker", "native", "skip"], case_sensitive=False), + metavar="MODE", + help="Tools download stage execution mode (docker, native, skip).", +) +@click.option( + "--force-tools", + envvar="FORCE_TOOLS", + is_flag=True, + default=False, + help="Re-download tools even if outputs already exist.", +) +@click.pass_obj +def tools_cmd( + cli_ctx: CliContext, + *, + tools_mode: str, + force_tools: bool, +) -> None: + """Download tools (containerd, runc, nerdctl, CNI plugins). + + Fetches pre-built binaries for the target architecture and stages + them under ``mkosi.output/tools/{arch}/``. The tools are later + merged into the initramfs by mkosi via ``--extra-tree``. + + \b + Examples + -------- + captain tools + captain tools --arch arm64 + captain tools --tools-mode native + captain tools --force-tools + """ + + cfg = cli_ctx.make_config( + tools_mode=tools_mode, + force_tools=force_tools, + ) + + _build_tools_stage(cfg) + log.info("Tools stage complete!") diff --git a/captain/config.py b/captain/config.py index 90cd118..fee1ad3 100644 --- a/captain/config.py +++ b/captain/config.py @@ -2,20 +2,21 @@ from __future__ import annotations -import argparse -import os +import logging import sys from dataclasses import dataclass, field from pathlib import Path from captain.util import ArchInfo, get_arch_info -# Valid values for KERNEL_MODE and MKOSI_MODE. +log = logging.getLogger(__name__) + +# Valid values for ISO_MODE and MKOSI_MODE. VALID_MODES = ("docker", "native", "skip") -# The single source of truth for the default kernel version. -# Override at runtime via --kernel-version or KERNEL_VERSION env var. -DEFAULT_KERNEL_VERSION = "6.18.16" +# The single source of truth for the default flavor. +# Override at runtime via --flavor-id or FLAVOR_ID env var. +DEFAULT_FLAVOR_ID = "trixie-full" @dataclass(slots=True) @@ -28,23 +29,21 @@ class Config: # Target arch: str = "amd64" - kernel_version: str = DEFAULT_KERNEL_VERSION - kernel_config: str | None = None - kernel_src: str | None = None + flavor_id: str = DEFAULT_FLAVOR_ID # Docker + builder_registry: str | None = None + builder_repository: str | None = None builder_image: str = "captainos-builder" - no_cache: bool = False + builder_push: bool = False # Per-stage mode: "docker" | "native" | "skip" - kernel_mode: str = "docker" tools_mode: str = "docker" mkosi_mode: str = "docker" iso_mode: str = "docker" release_mode: str = "docker" # Force flags - force_kernel: bool = False force_tools: bool = False force_iso: bool = False @@ -59,97 +58,22 @@ class Config: # Derived (set in __post_init__) arch_info: ArchInfo = field(init=False) + # Call uv (eg in Docker) using its own --verbose flag + verbose_uv: bool = False + def __post_init__(self) -> None: self.arch_info = get_arch_info(self.arch) self.arch = self.arch_info.arch # normalise aliases (x86_64 → amd64, etc.) for name, value in ( - ("KERNEL_MODE", self.kernel_mode), ("TOOLS_MODE", self.tools_mode), ("MKOSI_MODE", self.mkosi_mode), ("ISO_MODE", self.iso_mode), ("RELEASE_MODE", self.release_mode), ): if value not in VALID_MODES: - print( - f"ERROR: {name}={value!r} is invalid. Valid values: {', '.join(VALID_MODES)}", - file=sys.stderr, - ) + log.error("%s=%r is invalid. Valid values: %s", name, value, ", ".join(VALID_MODES)) sys.exit(1) - @property - def needs_docker(self) -> bool: - """True if any stage requires Docker.""" - return ( - self.kernel_mode == "docker" - or self.tools_mode == "docker" - or self.mkosi_mode == "docker" - or self.iso_mode == "docker" - or self.release_mode == "docker" - ) - - @classmethod - def from_args(cls, args: argparse.Namespace, project_dir: Path) -> Config: - """Create a Config from a parsed :class:`argparse.Namespace`. - - The *args* namespace is produced by :mod:`configargparse` which - has already resolved the priority chain: - CLI flags > environment variables > defaults. - - ``getattr`` with fallbacks is used because per-subcommand - parsers only define the flags relevant to that subcommand. - """ - return cls( - project_dir=project_dir, - output_dir=project_dir / "out", - arch=getattr(args, "arch", "amd64"), - kernel_version=getattr(args, "kernel_version", DEFAULT_KERNEL_VERSION), - kernel_config=getattr(args, "kernel_config", None) or None, - kernel_src=getattr(args, "kernel_src", None) or None, - builder_image=getattr(args, "builder_image", "captainos-builder"), - no_cache=getattr(args, "no_cache", False), - kernel_mode=getattr(args, "kernel_mode", "docker"), - tools_mode=getattr(args, "tools_mode", "docker"), - mkosi_mode=getattr(args, "mkosi_mode", "docker"), - iso_mode=getattr(args, "iso_mode", "docker"), - release_mode=getattr(args, "release_mode", "docker"), - force_kernel=getattr(args, "force_kernel", False), - force_tools=getattr(args, "force_tools", False), - force_iso=getattr(args, "force_iso", False), - qemu_append=getattr(args, "qemu_append", ""), - qemu_mem=getattr(args, "qemu_mem", "2G"), - qemu_smp=getattr(args, "qemu_smp", "2"), - ) - - @classmethod - def from_env(cls, project_dir: Path) -> Config: - """Create a Config from environment variables (legacy helper). - - Prefer :meth:`from_args` in the CLI path. This method remains - for any non-CLI callers (e.g. tests, scripts) that need a - ``Config`` without going through argparse. - """ - return cls( - project_dir=project_dir, - output_dir=project_dir / "out", - arch=os.environ.get("ARCH", "amd64"), - kernel_version=os.environ.get("KERNEL_VERSION", DEFAULT_KERNEL_VERSION), - kernel_config=os.environ.get("KERNEL_CONFIG") or None, - kernel_src=os.environ.get("KERNEL_SRC") or None, - builder_image=os.environ.get("BUILDER_IMAGE", "captainos-builder"), - no_cache=os.environ.get("NO_CACHE") == "1", - kernel_mode=os.environ.get("KERNEL_MODE", "docker"), - tools_mode=os.environ.get("TOOLS_MODE", "docker"), - mkosi_mode=os.environ.get("MKOSI_MODE", "docker"), - iso_mode=os.environ.get("ISO_MODE", "docker"), - release_mode=os.environ.get("RELEASE_MODE", "docker"), - force_kernel=os.environ.get("FORCE_KERNEL") == "1", - force_tools=os.environ.get("FORCE_TOOLS") == "1", - force_iso=os.environ.get("FORCE_ISO") == "1", - qemu_append=os.environ.get("QEMU_APPEND", ""), - qemu_mem=os.environ.get("QEMU_MEM", "2G"), - qemu_smp=os.environ.get("QEMU_SMP", "2"), - ) - @property def tools_output(self) -> Path: """Per-arch staging directory for downloaded tools. @@ -160,27 +84,6 @@ def tools_output(self) -> Path: """ return self.project_dir / "mkosi.output" / "tools" / self.arch - @property - def kernel_output(self) -> Path: - """Per-version, per-arch directory for all kernel build artifacts. - - Contains the vmlinuz image (loaded separately by iPXE) and - a ``modules/`` subtree that mirrors a root filesystem layout - (``usr/lib/modules/{kver}/``) so it can be passed directly - as an ``--extra-tree=`` to mkosi. - """ - return self.project_dir / "mkosi.output" / "kernel" / self.kernel_version / self.arch - - @property - def modules_output(self) -> Path: - """Per-version, per-arch root for kernel modules. - - Returns ``kernel/{version}/{arch}/modules`` which contains a - merged-usr tree (``usr/lib/modules/{kver}/``) suitable for - passing as ``--extra-tree=`` to mkosi. - """ - return self.kernel_output / "modules" - @property def mkosi_output(self) -> Path: return self.project_dir / "mkosi.output" @@ -188,12 +91,12 @@ def mkosi_output(self) -> Path: @property def initramfs_output(self) -> Path: """Per-version, per-arch directory for mkosi initramfs output.""" - return self.project_dir / "mkosi.output" / "initramfs" / self.kernel_version / self.arch + return self.project_dir / "mkosi.output" / "initramfs" / self.flavor_id / self.arch @property def iso_output(self) -> Path: """Per-version, per-arch directory for the built ISO image.""" - return self.project_dir / "mkosi.output" / "iso" / self.kernel_version / self.arch + return self.project_dir / "mkosi.output" / "iso" / self.flavor_id / self.arch @property def iso_staging(self) -> Path: diff --git a/captain/docker.py b/captain/docker.py index f2dc03d..5ce7fd8 100644 --- a/captain/docker.py +++ b/captain/docker.py @@ -3,15 +3,18 @@ from __future__ import annotations import hashlib +import logging import os import platform from pathlib import Path +from rich.table import Table + +import captain from captain.config import Config -from captain.log import StageLogger, err, for_stage -from captain.util import run +from captain.util import detect_current_machine_arch, run -_default_log = for_stage("docker") +log = logging.getLogger(__name__) def _image_exists(image: str) -> bool: @@ -28,106 +31,115 @@ def _dockerfile_hash(cfg: Config) -> str: """Return the SHA-256 hex digest of the Dockerfile content. This is used as an image tag so that Dockerfile changes are detected - automatically. The value intentionally matches what GitHub Actions - ``hashFiles('Dockerfile')`` produces, allowing the CI - ``docker/build-push-action`` step to pre-load an image with the same - tag that ``build_builder`` will look for. + automatically. """ dockerfile = cfg.project_dir / "Dockerfile" - return hashlib.sha256(dockerfile.read_bytes()).hexdigest() + local_arch = detect_current_machine_arch() + hex_digest = hashlib.sha256(dockerfile.read_bytes()).hexdigest() + return f"{local_arch}-{hex_digest}" -def build_builder(cfg: Config, logger: StageLogger | None = None) -> None: +def obtain_builder(cfg: Config) -> None: """Build the Docker builder image when the Dockerfile has changed. The image is tagged with a content hash of the Dockerfile so that changes are detected even when the base image name stays the same. - When the matching tag already exists locally (e.g. pre-loaded by a CI - ``docker/build-push-action`` step with ``load: true``), we skip the - build entirely. Use ``NO_CACHE=1`` to force a full rebuild. """ - _log = logger or _default_log tag = _dockerfile_hash(cfg) - tagged_image = f"{cfg.builder_image}:{tag}" + remote_tagged_image = f"{cfg.builder_registry}/{cfg.builder_repository}/builder:{tag}" + local_tagged_image = f"{cfg.builder_image}:{tag}" + + log.debug( + "Checking for existing builder image with tag '%s' or remote image '%s'", + local_tagged_image, + remote_tagged_image, + ) - if not cfg.no_cache and _image_exists(tagged_image): - _log.log(f"Docker image '{cfg.builder_image}' is up to date.") + if _image_exists(local_tagged_image): + log.info("Docker image '%s' is up to date with %s.", cfg.builder_image, local_tagged_image) # Ensure the un-hashed tag exists so later docker-run calls that # reference cfg.builder_image (without the hash suffix) succeed. - # This matters when the hashed tag was pre-loaded by CI. - run(["docker", "tag", tagged_image, cfg.builder_image], check=False) + run(["docker", "tag", local_tagged_image, cfg.builder_image], check=False) return - _log.log(f"Building Docker image '{cfg.builder_image}'...") - cmd = ["docker", "build"] - if cfg.no_cache: - cmd.append("--no-cache") - cmd.extend(["-t", tagged_image, "-t", cfg.builder_image, str(cfg.project_dir)]) - run(cmd) - - -RELEASE_IMAGE = "captainos-release" - - -def _release_dockerfile_hash(cfg: Config) -> str: - """Return the SHA-256 hex digest of the Dockerfile.release content.""" - dockerfile = cfg.project_dir / "Dockerfile.release" - return hashlib.sha256(dockerfile.read_bytes()).hexdigest() - - -def build_release_image(cfg: Config, logger: StageLogger | None = None) -> None: - """Build the release Docker image from ``Dockerfile.release``.""" - _log = logger or _default_log - tag = _release_dockerfile_hash(cfg) - tagged_image = f"{RELEASE_IMAGE}:{tag}" - - if not cfg.no_cache and _image_exists(tagged_image): - _log.log(f"Docker image '{RELEASE_IMAGE}' is up to date.") - run(["docker", "tag", tagged_image, RELEASE_IMAGE]) + # Check if the remote name exists locally... (was pre-pulled somehow) + if _image_exists(remote_tagged_image): + log.info( + "Docker image '%s' already exists locally (pre-pulled). Tagging as '%s'.", + remote_tagged_image, + cfg.builder_image, + ) + run(["docker", "tag", remote_tagged_image, cfg.builder_image], check=False) return - _log.log(f"Building Docker image '{RELEASE_IMAGE}'...") - cmd = ["docker", "build", "-f", str(cfg.project_dir / "Dockerfile.release")] - if cfg.no_cache: - cmd.append("--no-cache") - cmd.extend(["-t", tagged_image, "-t", RELEASE_IMAGE, str(cfg.project_dir)]) - run(cmd) - + # Check if we can pull the remote image (exists in registry and matches our Dockerfile hash) + if ( + run( + ["docker", "pull", remote_tagged_image], + check=False, + capture=False, + ).returncode + == 0 + ): + log.info( + "Pulled Docker image '%s' from registry. Tagging as '%s'.", + remote_tagged_image, + cfg.builder_image, + ) + run(["docker", "tag", remote_tagged_image, cfg.builder_image], check=False) + return -def run_in_release(cfg: Config, *extra_args: str) -> None: - """Run a command inside the release container. + # build locally if no existing image was found. + log.info("Building Docker image '%s'...", cfg.builder_image) + run( + [ + "docker", + "buildx", + "build", + "--progress=plain", + "-t", + local_tagged_image, + "-t", + cfg.builder_image, + str(cfg.project_dir), + ] + ) - Similar to :func:`run_in_builder` but uses the lightweight release - image which has buildah, skopeo, Python, and git. - """ - docker_args: list[str] = [ - "docker", - "run", - "--rm", - # Buildah needs mount/remount capabilities for layer operations. - "--privileged", - "-v", - f"{cfg.project_dir}:/work", - "-w", - "/work", - "-e", - f"ARCH={cfg.arch}", - "-e", - "RELEASE_MODE=native", - # Chroot isolation lets buildah work inside an unprivileged container - # (no user namespaces needed — we only assemble scratch images). - "-e", - "BUILDAH_ISOLATION=chroot", - ] - # Forward host registry credentials so buildah/skopeo can authenticate. - # The caller sets these env vars on the host (e.g. via docker login or - # CI secrets); they are passed through to the container as-is. - for var in ("REGISTRY_AUTH_FILE", "REGISTRY_USERNAME", "REGISTRY_PASSWORD"): - val = os.environ.get(var) - if val: - docker_args += ["-e", f"{var}={val}"] - docker_args.extend(extra_args) - run(docker_args) + # Show the layer size distribution for the built image to help with debugging and optimization. + log.info("Docker image '%s' built successfully. Layer size distribution:", local_tagged_image) + layer_sizes_lines = run( + [ + "docker", + "history", + "--no-trunc", + "--format", + "-> {{.Size}} :: '{{.CreatedBy}}'", + local_tagged_image, + ], + capture=True, + check=True, + ) + layers = [] + for line in layer_sizes_lines.stdout.strip().splitlines(): + line = line.strip() + if line.startswith("-> "): + # remove double whitespace chars to make it easier to read + line = " ".join(line.split()) + layers.append(line) + # reverse the array to match the order + layers.reverse() + for layer in layers: + log.info("Layer info: %s", layer) + + # Optionally push the image after building it + if cfg.builder_push: + log.info( + "Pushing Docker image '%s' to registry as '%s'...", + cfg.builder_image, + remote_tagged_image, + ) + run(["docker", "tag", local_tagged_image, remote_tagged_image], check=False) + run(["docker", "push", remote_tagged_image]) def run_in_builder(cfg: Config, *extra_args: str) -> None: @@ -135,80 +147,102 @@ def run_in_builder(cfg: Config, *extra_args: str) -> None: *extra_args* are appended after the docker run flags and image name. """ + + docker_envs: dict[str, str] = { + "CAPTAIN_IN_DOCKER": "docker", + "ARCH": cfg.arch, + "FLAVOR_ID": cfg.flavor_id, + "FORCE_TOOLS": str(int(cfg.force_tools)), + "FORCE_ISO": str(int(cfg.force_iso)), + "BUILDAH_ISOLATION": "chroot", + "BUILDAH_INSECURE": os.environ.get("BUILDAH_INSECURE", ""), + "RELEASE_MODE": "native", + "TOOLS_MODE": "native", + "MKOSI_MODE": "native", + "ISO_MODE": "native", + "TERM": os.environ.get("TERM", "xterm-256color"), + "FORCE_COLOR": "1", + "COLUMNS": str(captain.env_columns), + "GITHUB_ACTIONS": os.environ.get("GITHUB_ACTIONS", ""), + # Forward host registry credentials so buildah/skopeo can authenticate. + # The caller sets these env vars on the host (e.g. via docker login or + # CI secrets); they are passed through to the container as-is. + "REGISTRY_AUTH_FILE": os.environ.get("REGISTRY_AUTH_FILE", ""), + "REGISTRY_USERNAME": os.environ.get("REGISTRY_USERNAME", ""), + "REGISTRY_PASSWORD": os.environ.get("REGISTRY_PASSWORD", ""), + } + docker_args: list[str] = [ "docker", "run", "--rm", - "--privileged", - "-v", - f"{cfg.project_dir}:/work", + "--privileged", # yes, this is required for both buildah and mkosi in the container "-w", "/work", - "-e", - f"ARCH={cfg.arch}", - "-e", - f"KERNEL_VERSION={cfg.kernel_version}", - "-e", - f"FORCE_TOOLS={int(cfg.force_tools)}", - "-e", - f"FORCE_KERNEL={int(cfg.force_kernel)}", - "-e", - f"FORCE_ISO={int(cfg.force_iso)}", - # Force all stage modes to native inside the container so - # build.py never tries to launch Docker recursively. - "-e", - "KERNEL_MODE=native", - "-e", - "TOOLS_MODE=native", - "-e", - "MKOSI_MODE=native", - "-e", - "ISO_MODE=native", - "-e", - "RELEASE_MODE=native", ] - # Mount kernel source if provided - if cfg.kernel_src is not None: - kernel_src_path = Path(cfg.kernel_src).resolve() - if not kernel_src_path.is_dir(): - err(f"KERNEL_SRC={cfg.kernel_src} does not exist") - raise SystemExit(1) - docker_args.extend(["-v", f"{kernel_src_path}:/work/kernel-src:ro"]) - docker_args.extend(["-e", "KERNEL_SRC=/work/kernel-src"]) - - # Mount kernel config override and point KERNEL_CONFIG to the container path - if cfg.kernel_config is not None: - kernel_cfg_path = Path(cfg.kernel_config) - if not kernel_cfg_path.is_absolute(): - kernel_cfg_path = (cfg.project_dir / kernel_cfg_path).resolve() - else: - kernel_cfg_path = kernel_cfg_path.resolve() - if not kernel_cfg_path.is_file(): - err(f"KERNEL_CONFIG={cfg.kernel_config} does not exist") - raise SystemExit(1) - docker_args.extend(["-v", f"{kernel_cfg_path}:/work/kernel-config:ro"]) - docker_args.extend(["-e", "KERNEL_CONFIG=/work/kernel-config"]) + if log.isEnabledFor(logging.DEBUG): + table = Table( + title="Docker Environment Variables", show_header=True, header_style="bold cyan" + ) + table.add_column("Environment Variable", style="green") + table.add_column("Value", style="yellow") + for key, value in sorted(docker_envs.items()): + table.add_row(key, value) + captain.console.print(table) + + for k, v in docker_envs.items(): + docker_args += ["-e", f"{k}={v}"] + + docker_args += ["-v", f"{cfg.project_dir}/mkosi.output:/work/mkosi.output"] + docker_args += ["-v", f"{cfg.project_dir}/out:/work/out"] + + docker_args += ["-v", f"{cfg.project_dir}/mkosi.extra:/work/mkosi.extra"] + docker_args += ["-v", f"{cfg.project_dir}/mkosi.sandbox:/work/mkosi.sandbox"] + docker_args += ["-v", f"{cfg.project_dir}/mkosi.skeleton:/work/mkosi.skeleton"] + + docker_args += ["-v", f"{cfg.project_dir}/mkosi.conf:/work/mkosi.conf"] + docker_args += ["-v", f"{cfg.project_dir}/mkosi.finalize:/work/mkosi.finalize"] + docker_args += ["-v", f"{cfg.project_dir}/mkosi.postinst:/work/mkosi.postinst"] + + docker_args += ["-v", f"{cfg.project_dir}/captain:/work/captain"] + docker_args += ["-v", f"{cfg.project_dir}/pyproject.toml:/work/pyproject.toml"] + docker_args += ["-v", f"{cfg.project_dir}/build.py:/work/build.py"] + + docker_args += ["--mount", "type=volume,source=captain-cache-packages,target=/cache/packages"] docker_args.extend(extra_args) run(docker_args) -def run_mkosi(cfg: Config, *mkosi_args: str, logger: StageLogger | None = None) -> None: +def run_captain_in_builder(cfg: Config, *extra_args: str): + log.debug("Running 'captain %s' in builder container...", extra_args) + run_in_builder( + cfg, + cfg.builder_image, + "/usr/bin/uv", + *(["--verbose"] if cfg.verbose_uv else ["--quiet"]), + "run", + "captain", + *extra_args, + ) + + +def run_mkosi_in_builder(cfg: Config, *mkosi_args: str) -> None: """Run mkosi inside the builder container.""" - ensure_binfmt(cfg, logger=logger) + ensure_binfmt(cfg) run_in_builder( cfg, cfg.builder_image, + "/usr/local/bin/mkosi", f"--architecture={cfg.arch_info.mkosi_arch}", *mkosi_args, ) -def ensure_binfmt(cfg: Config, logger: StageLogger | None = None) -> None: +def ensure_binfmt(cfg: Config) -> None: """Register binfmt_misc handlers if doing a cross-architecture build.""" - _log = logger or _default_log - host_arch = platform.machine() # e.g. "x86_64" or "aarch64" + host_arch = platform.machine() need_binfmt = False match (host_arch, cfg.arch): @@ -220,8 +254,10 @@ def ensure_binfmt(cfg: Config, logger: StageLogger | None = None) -> None: if not need_binfmt: return - _log.log( - f"Registering binfmt_misc handlers for cross-arch build ({host_arch} -> {cfg.arch})..." + log.info( + "Registering binfmt_misc handlers for cross-arch build (%s -> %s)...", + host_arch, + cfg.arch, ) result = run( [ @@ -237,11 +273,11 @@ def ensure_binfmt(cfg: Config, logger: StageLogger | None = None) -> None: capture=True, ) if result.returncode != 0: - _log.warn("Could not auto-register binfmt handlers.") - _log.warn("Run manually: docker run --privileged --rm tonistiigi/binfmt --install all") + log.warning("Could not auto-register binfmt handlers.") + log.warning("Run manually: docker run --privileged --rm tonistiigi/binfmt --install all") -def fix_docker_ownership(cfg: Config, logger, paths: list[str]) -> None: +def fix_docker_ownership(cfg: Config, paths: list[str]) -> None: """Fix ownership of Docker-created files (container runs as root). Spawns a lightweight container to ``chown -R`` the given paths @@ -254,10 +290,6 @@ def fix_docker_ownership(cfg: Config, logger, paths: list[str]) -> None: uid = os.getuid() gid = os.getgid() - # *paths* use the container mount prefix /work — translate to host. - # Check the path itself **and** every child — the top-level directory - # may already be owned by the host user while files inside it were - # created by the container (root). needs_fix: list[str] = [] for p in paths: host_path = Path(p.replace("/work", str(cfg.project_dir), 1)) @@ -278,7 +310,7 @@ def fix_docker_ownership(cfg: Config, logger, paths: list[str]) -> None: if not needs_fix: return - logger.log("Fixing ownership of Docker-created files...") + log.info("Fixing ownership of Docker-created files...") run( [ "docker", diff --git a/captain/flavor.py b/captain/flavor.py new file mode 100644 index 0000000..ea248df --- /dev/null +++ b/captain/flavor.py @@ -0,0 +1,221 @@ +"""Flavor-specific configuration.""" + +from __future__ import annotations + +import logging +import shutil +from abc import abstractmethod +from pathlib import Path +from typing import Protocol, runtime_checkable + +import jinja2 + +from captain.config import Config + +log = logging.getLogger(__name__) + + +@runtime_checkable +class BaseFlavor(Protocol): + cfg: Config + id: str + name: str + description: str + flavor_dir: Path + supported_architectures: frozenset[str] + template_map: dict[str, list[Path]] + static_map: dict[str, Path] + + def setup(self, cfg: Config, flavor_dir: Path) -> None: + if cfg is None: + raise ValueError("cfg (Config) cannot be None") + self.cfg = cfg + + if flavor_dir is None: + raise ValueError("flavor_dir (Path) cannot be None") + if not flavor_dir.is_dir(): + raise ValueError(f"flavor_dir {flavor_dir} does not exist or is not a directory") + self.flavor_dir = flavor_dir + + self.template_map = {} + self.static_map = {} + log.debug("Called BaseFlavor.setup()...") + pass + + def generate(self): + log.debug("Called BaseFlavor.generate()...") + # Before generating, cleanup known targets. @TODO make dir disposable instead + log.debug("Cleaning up old generated files in %s", self.cfg.project_dir) + shutil.rmtree(self.cfg.project_dir / "mkosi.conf", ignore_errors=True) + shutil.rmtree(self.cfg.project_dir / "mkosi.postinst", ignore_errors=True) + shutil.rmtree(self.cfg.project_dir / "mkosi.finalize", ignore_errors=True) + shutil.rmtree(self.cfg.project_dir / "mkosi.extra", ignore_errors=True) + shutil.rmtree(self.cfg.project_dir / "mkosi.sandbox", ignore_errors=True) + shutil.rmtree(self.cfg.project_dir / "mkosi.skeleton", ignore_errors=True) + + self.copy_static_files(self.cfg.project_dir) + self.render_templates(self.cfg.project_dir) # For compatibility + pass + + def specific_flavor_dir(self, flavor_id: str) -> Path: + flavor_id_underscore = flavor_id.replace("-", "_") + flavor_dir = self.cfg.project_dir / "captain" / "flavors" / flavor_id_underscore + + if not flavor_dir.is_dir(): + log.error( + "Specific Flavor dir '%s' not found. Expected to find directory %s", + flavor_id, + flavor_dir, + ) + raise SystemExit(1) + return flavor_dir + + def add_static_dir(self, dir_to_include: str, flavor_dir: Path): + extra_dir = flavor_dir / dir_to_include + if extra_dir.exists() and extra_dir.is_dir(): + for extra_file in extra_dir.rglob("*"): + if extra_file.is_file(): + relative_path = extra_file.relative_to(flavor_dir) + self.static_map[str(relative_path)] = extra_file + + def render_templates(self, output_dir: Path): + log.debug("Called BaseFlavor.render_templates() with output_dir: %s", output_dir) + # Use jinja2 to render all templates in self.template_map, writing output to output_dir + # The keys of self.template_map are the relative output paths (e.g. "mkosi.conf"), and the + # values are lists of Path objects pointing to Jinja2 template files. + # If more than one template is provided for a given output path, they should be rendered + # in order and concatenated together to produce the final output file. + for relative_output_path, template_paths in self.template_map.items(): + log.debug( + "Rendering templates for output path '%s': %s", + relative_output_path, + template_paths, + ) + rendered_content = "" + for template_path in template_paths: + log.debug("Rendering template %s", template_path) + # Here you would load the template file, render it with the appropriate context + # (e.g. using Jinja2), and append the rendered content to rendered_content. + # For example: + template = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_path.parent), + undefined=jinja2.StrictUndefined, + ).get_template(template_path.name) + rendered_content += template.render(cfg=self.cfg, flavor=self) + + output_file_path = output_dir / relative_output_path + log.debug("Writing rendered content to %s", output_file_path) + output_file_path.parent.mkdir(parents=True, exist_ok=True) + output_file_path.write_text(rendered_content) + + # Make output_file executable @TODO: we will need a way to tell + output_file_path.chmod(output_file_path.stat().st_mode | 0o111) + + def copy_static_files(self, project_dir): + # Do a plain copy of all files in self.static_map to project_dir / relative_path, where + # relative_path is the key in self.static_map + for relative_path, source_path in self.static_map.items(): + destination_path = project_dir / relative_path + log.debug("Copying static file from '%s' to '%s'", source_path, destination_path) + destination_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(source_path, destination_path) + + @abstractmethod + def has_iso(self) -> bool: + return False + + +def list_available_flavors() -> list[str]: + import importlib + import pkgutil + + package = importlib.import_module("captain.flavors") + # iter_modules finds immediate children; walk_packages recurses + ret = [] + for _finder, module_name, _is_pkg in pkgutil.walk_packages( + package.__path__, prefix=f"{package.__name__}." + ): + try: + module = importlib.import_module(module_name) + except Exception as exc: + log.debug(f"Skipping {module_name}: {exc}") + continue + + fn = getattr(module, "create_flavor", None) + if fn is not None and callable(fn): + flavor_id = module_name.rsplit(".", 1)[-1].replace("_", "-") + ret.append(flavor_id) + log.debug("Discovered flavor '%s' via module %s", flavor_id, module_name) + + return sorted(ret) + + +def create_and_setup_flavor_for_id(flavor_id: str, cfg: Config) -> BaseFlavor: + log.debug("Creating and setting up flavor for id '%s'", flavor_id) + flavor_id_underscore = flavor_id.replace("-", "_") + flavor_dir = cfg.project_dir / "captain" / "flavors" / flavor_id_underscore + + if not flavor_dir.is_dir(): + log.error( + "Flavor '%s' not found. Expected to find directory %s", + flavor_id, + flavor_dir, + ) + raise SystemExit(1) + + wanted_module = f"captain.flavors.{flavor_id_underscore}" + log.debug("Attempting to import flavor module %s from directory %s", wanted_module, flavor_dir) + + try: + module = __import__(wanted_module, fromlist=["create_flavor"]) + except ImportError as e: + log.error( + "Failed to import flavor module %s from directory %s: %s", + wanted_module, + flavor_dir, + e, + ) + raise e + + # Validate API explicitly + if not hasattr(module, "create_flavor"): + log.error("Flavor module %s does not define create_flavor()", wanted_module) + raise SystemExit(1) + + log.debug("Executing %s.create_flavor()", wanted_module) + flavor: BaseFlavor = module.create_flavor() + + if not isinstance(flavor, BaseFlavor): + log.error( + "create_flavor() in %s did not return BaseFlavor (got %r)", + wanted_module, + type(flavor), + ) + raise SystemExit(1) + + log.debug("Calling setup() on flavor %s with config: %s", flavor, cfg) + flavor.setup(cfg, flavor_dir) + + # Ensure the current arch is supported by the flavor + if cfg.arch_info.arch not in flavor.supported_architectures: + log.error( + "Flavor '%s' does not support architecture '%s'. Supported architectures: %s", + flavor.id, + cfg.arch_info.arch, + flavor.supported_architectures, + ) + raise SystemExit(1) + else: + log.debug( + "Flavor '%s' supports architecture '%s'", + flavor.id, + cfg.arch_info.arch, + ) + + log.debug( + "Flavor is setup; description: %s; supported_architectures: %s", + flavor.description, + flavor.supported_architectures, + ) + + return flavor diff --git a/captain/flavors/common_acpi/__init__.py b/captain/flavors/common_acpi/__init__.py new file mode 100644 index 0000000..b9e35eb --- /dev/null +++ b/captain/flavors/common_acpi/__init__.py @@ -0,0 +1,28 @@ +import logging +from dataclasses import dataclass +from pathlib import Path + +from captain.config import Config +from captain.flavors.common_debian import DebianCommonFlavor + +log: logging.Logger = logging.getLogger(__name__) + + +@dataclass +class TrixieACPIFlavor(DebianCommonFlavor): + id = "trixie-acpi" + name = "Trixie ACPI Common" + description = "Debian Trixie based on UEFI+ACPI machines" + supported_architectures = frozenset(["amd64", "arm64"]) + + def setup(self, cfg: Config, flavor_dir: Path) -> None: + super().setup(cfg, flavor_dir) + + this_flavor_dir = self.specific_flavor_dir("common-acpi") + + # Static files + self.add_static_dir("mkosi.extra", this_flavor_dir) + + # This flavor can produce working ISO images (generic UEFI/ACPI) + def has_iso(self) -> bool: + return True diff --git a/mkosi.extra/etc/acpi/events/powerbtn b/captain/flavors/common_acpi/mkosi.extra/etc/acpi/events/powerbtn similarity index 100% rename from mkosi.extra/etc/acpi/events/powerbtn rename to captain/flavors/common_acpi/mkosi.extra/etc/acpi/events/powerbtn diff --git a/mkosi.extra/etc/acpi/powerbtn.sh b/captain/flavors/common_acpi/mkosi.extra/etc/acpi/powerbtn.sh similarity index 100% rename from mkosi.extra/etc/acpi/powerbtn.sh rename to captain/flavors/common_acpi/mkosi.extra/etc/acpi/powerbtn.sh diff --git a/mkosi.extra/etc/modules-load.d/ipmi.conf b/captain/flavors/common_acpi/mkosi.extra/etc/modules-load.d/ipmi.conf similarity index 100% rename from mkosi.extra/etc/modules-load.d/ipmi.conf rename to captain/flavors/common_acpi/mkosi.extra/etc/modules-load.d/ipmi.conf diff --git a/captain/flavors/common_armbian/__init__.py b/captain/flavors/common_armbian/__init__.py new file mode 100644 index 0000000..388cf31 --- /dev/null +++ b/captain/flavors/common_armbian/__init__.py @@ -0,0 +1,24 @@ +import logging +from dataclasses import dataclass +from pathlib import Path + +from captain.config import Config +from captain.flavors.common_debian import DebianCommonFlavor + +log: logging.Logger = logging.getLogger(__name__) + + +@dataclass +class ArmbianCommonFlavor(DebianCommonFlavor): + id = "common-armbian" + name = "Armbian Common" + description = "Base flavor for Armbian-based distros" + + def setup(self, cfg: Config, flavor_dir: Path) -> None: + super().setup(cfg, flavor_dir) + + this_flavor_dir = self.specific_flavor_dir("common-armbian") + self.add_static_dir("mkosi.sandbox", this_flavor_dir) + + def has_iso(self) -> bool: + return False diff --git a/captain/flavors/common_armbian/mkosi.sandbox/etc/apt/sources.list.d/armbian-next.list b/captain/flavors/common_armbian/mkosi.sandbox/etc/apt/sources.list.d/armbian-next.list new file mode 100644 index 0000000..0b1c08d --- /dev/null +++ b/captain/flavors/common_armbian/mkosi.sandbox/etc/apt/sources.list.d/armbian-next.list @@ -0,0 +1,2 @@ +deb [signed-by=/usr/share/keyrings/armbian-next.gpg] https://apt-test.next.armbian.com armbian main +deb [signed-by=/usr/share/keyrings/armbian-next.gpg] https://apt-test.next.armbian.com armbian-trixie main diff --git a/captain/flavors/common_armbian/mkosi.sandbox/usr/share/keyrings/armbian-next.gpg b/captain/flavors/common_armbian/mkosi.sandbox/usr/share/keyrings/armbian-next.gpg new file mode 100644 index 0000000..1e1b9ee Binary files /dev/null and b/captain/flavors/common_armbian/mkosi.sandbox/usr/share/keyrings/armbian-next.gpg differ diff --git a/captain/flavors/common_debian/__init__.py b/captain/flavors/common_debian/__init__.py new file mode 100644 index 0000000..051db43 --- /dev/null +++ b/captain/flavors/common_debian/__init__.py @@ -0,0 +1,43 @@ +import logging +from abc import abstractmethod +from dataclasses import dataclass +from pathlib import Path + +from captain.config import Config +from captain.flavor import BaseFlavor + +log: logging.Logger = logging.getLogger(__name__) + + +@dataclass +class DebianCommonFlavor(BaseFlavor): + id = "common-debian" + name = "Debian Common" + description = "Base flavor for Debian-based distros" + + def setup(self, cfg: Config, flavor_dir: Path) -> None: + super().setup(cfg, flavor_dir) + + this_flavor_dir = self.specific_flavor_dir("common-debian") + + # Templates + self.template_map["mkosi.conf"] = [this_flavor_dir / "mkosi.conf.j2"] + + self.template_map["mkosi.postinst"] = [ + this_flavor_dir / "bash.header.sh", + this_flavor_dir / "mkosi.postinst.sh.j2", + ] + + self.template_map["mkosi.finalize"] = [ + this_flavor_dir / "bash.header.sh", + this_flavor_dir / "mkosi.finalize.sh.j2", + ] + + # Static files + self.add_static_dir("mkosi.extra", this_flavor_dir) + self.add_static_dir("mkosi.sandbox", this_flavor_dir) + self.add_static_dir("mkosi.skeleton", this_flavor_dir) + + @abstractmethod + def kernel_packages(self) -> set[str]: + pass diff --git a/captain/flavors/common_debian/bash.header.sh b/captain/flavors/common_debian/bash.header.sh new file mode 100644 index 0000000..afde1fa --- /dev/null +++ b/captain/flavors/common_debian/bash.header.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -euo pipefail + +# logger utility, output ANSI-colored messages to stderr; first argument is level (debug/info/warn/error), all other arguments are the message. +declare -A log_colors=(["debug"]="0;36" ["info"]="0;32" ["notice"]="1;32" ["warn"]="1;33" ["warning"]="1;33" ["error"]="1;31") +declare -A log_emoji=(["debug"]="🐛" ["info"]="🌿" ["notice"]="🌱" ["warn"]="🚸" ["warning"]="🚸" ["error"]="🚨") +function log() { + declare level="${1}" + shift + [[ "${level}" == "debug" && "${DEBUG}" != "yes" ]] && return # Skip debugs unless DEBUG=yes is set in the environment + # Normal output + declare color="\033[${log_colors[${level}]}m" + declare emoji="${log_emoji[${level}]}" + declare ansi_reset="\033[0m" + level=$(printf "%-5s" "${level}") # pad to 5 characters before printing + echo -e "${emoji} ${ansi_reset}[${color}${level}${ansi_reset}] ${color}${*}${ansi_reset}" >&2 +} + diff --git a/mkosi.conf b/captain/flavors/common_debian/mkosi.conf.j2 similarity index 82% rename from mkosi.conf rename to captain/flavors/common_debian/mkosi.conf.j2 index 10a5702..ce128aa 100644 --- a/mkosi.conf +++ b/captain/flavors/common_debian/mkosi.conf.j2 @@ -3,15 +3,15 @@ Distribution=debian Release=trixie [Output] +ManifestFormat=json Format=cpio CompressOutput=zstd CompressLevel=19 OutputDirectory=mkosi.output [Build] -ToolsTree=yes Incremental=yes -CacheDirectory=mkosi.cache +CacheDirectory=mkosi.cache.for.incremental [Content] Bootable=no @@ -26,14 +26,9 @@ WithDocs=no # rootfs → tmpfs via switch_root before exec'ing systemd. This makes # pivot_root(2) work for container runtimes (runc). -# Pre-built kernel modules and tools are injected via separate --extra-tree= -# flags on the CLI: -# --extra-tree=mkosi.output/tools/{arch}/ (tools) -# --extra-tree=mkosi.output/kernel/{ver}/{arch}/modules (kernel modules) -# This keeps mkosi.conf architecture-neutral so both amd64 and arm64 builds -# can coexist under mkosi.output/. - Packages= + {{ ' '.join(flavor.kernel_packages()) }} + tiny-initramfs # a tiny initrd builder to account for the linux-image dependency # systemd and core systemd systemd-sysv # Provides poweroff, shutdown, reboot, halt, etc. diff --git a/mkosi.extra/etc/cni/net.d/10-bridge.conflist b/captain/flavors/common_debian/mkosi.extra/etc/cni/net.d/10-bridge.conflist similarity index 100% rename from mkosi.extra/etc/cni/net.d/10-bridge.conflist rename to captain/flavors/common_debian/mkosi.extra/etc/cni/net.d/10-bridge.conflist diff --git a/mkosi.extra/etc/containerd/config.toml b/captain/flavors/common_debian/mkosi.extra/etc/containerd/config.toml similarity index 100% rename from mkosi.extra/etc/containerd/config.toml rename to captain/flavors/common_debian/mkosi.extra/etc/containerd/config.toml diff --git a/mkosi.extra/etc/locale.conf b/captain/flavors/common_debian/mkosi.extra/etc/locale.conf similarity index 100% rename from mkosi.extra/etc/locale.conf rename to captain/flavors/common_debian/mkosi.extra/etc/locale.conf diff --git a/mkosi.extra/etc/machine-id b/captain/flavors/common_debian/mkosi.extra/etc/machine-id similarity index 100% rename from mkosi.extra/etc/machine-id rename to captain/flavors/common_debian/mkosi.extra/etc/machine-id diff --git a/mkosi.extra/etc/motd b/captain/flavors/common_debian/mkosi.extra/etc/motd similarity index 100% rename from mkosi.extra/etc/motd rename to captain/flavors/common_debian/mkosi.extra/etc/motd diff --git a/mkosi.extra/etc/nerdctl/nerdctl.toml b/captain/flavors/common_debian/mkosi.extra/etc/nerdctl/nerdctl.toml similarity index 100% rename from mkosi.extra/etc/nerdctl/nerdctl.toml rename to captain/flavors/common_debian/mkosi.extra/etc/nerdctl/nerdctl.toml diff --git a/mkosi.extra/etc/os-release b/captain/flavors/common_debian/mkosi.extra/etc/os-release similarity index 100% rename from mkosi.extra/etc/os-release rename to captain/flavors/common_debian/mkosi.extra/etc/os-release diff --git a/mkosi.extra/etc/rsyslog.conf b/captain/flavors/common_debian/mkosi.extra/etc/rsyslog.conf similarity index 100% rename from mkosi.extra/etc/rsyslog.conf rename to captain/flavors/common_debian/mkosi.extra/etc/rsyslog.conf diff --git a/mkosi.extra/etc/sysctl.d/99-ip-forward.conf b/captain/flavors/common_debian/mkosi.extra/etc/sysctl.d/99-ip-forward.conf similarity index 100% rename from mkosi.extra/etc/sysctl.d/99-ip-forward.conf rename to captain/flavors/common_debian/mkosi.extra/etc/sysctl.d/99-ip-forward.conf diff --git a/mkosi.extra/etc/sysctl.d/99-quiet-audit.conf b/captain/flavors/common_debian/mkosi.extra/etc/sysctl.d/99-quiet-audit.conf similarity index 100% rename from mkosi.extra/etc/sysctl.d/99-quiet-audit.conf rename to captain/flavors/common_debian/mkosi.extra/etc/sysctl.d/99-quiet-audit.conf diff --git a/mkosi.extra/etc/systemd/network/80-dhcp.network b/captain/flavors/common_debian/mkosi.extra/etc/systemd/network/80-dhcp.network similarity index 100% rename from mkosi.extra/etc/systemd/network/80-dhcp.network rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/network/80-dhcp.network diff --git a/mkosi.extra/etc/systemd/resolved.conf.d/no-stub.conf b/captain/flavors/common_debian/mkosi.extra/etc/systemd/resolved.conf.d/no-stub.conf similarity index 100% rename from mkosi.extra/etc/systemd/resolved.conf.d/no-stub.conf rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/resolved.conf.d/no-stub.conf diff --git a/mkosi.extra/etc/systemd/system/captainos-banner.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/captainos-banner.service similarity index 100% rename from mkosi.extra/etc/systemd/system/captainos-banner.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/captainos-banner.service diff --git a/mkosi.extra/etc/systemd/system/captainos-static-network.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/captainos-static-network.service similarity index 100% rename from mkosi.extra/etc/systemd/system/captainos-static-network.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/captainos-static-network.service diff --git a/mkosi.extra/etc/systemd/system/containerd.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/containerd.service similarity index 100% rename from mkosi.extra/etc/systemd/system/containerd.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/containerd.service diff --git a/mkosi.extra/etc/systemd/system/default.target b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/default.target similarity index 100% rename from mkosi.extra/etc/systemd/system/default.target rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/default.target diff --git a/mkosi.extra/etc/systemd/system/initrd-cleanup.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-cleanup.service similarity index 100% rename from mkosi.extra/etc/systemd/system/initrd-cleanup.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-cleanup.service diff --git a/mkosi.extra/etc/systemd/system/initrd-parse-etc.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-parse-etc.service similarity index 100% rename from mkosi.extra/etc/systemd/system/initrd-parse-etc.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-parse-etc.service diff --git a/mkosi.extra/etc/systemd/system/initrd-switch-root.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-switch-root.service similarity index 100% rename from mkosi.extra/etc/systemd/system/initrd-switch-root.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-switch-root.service diff --git a/mkosi.extra/etc/systemd/system/initrd-switch-root.target b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-switch-root.target similarity index 100% rename from mkosi.extra/etc/systemd/system/initrd-switch-root.target rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-switch-root.target diff --git a/mkosi.extra/etc/systemd/system/initrd-udevadm-cleanup-db.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-udevadm-cleanup-db.service similarity index 100% rename from mkosi.extra/etc/systemd/system/initrd-udevadm-cleanup-db.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd-udevadm-cleanup-db.service diff --git a/mkosi.extra/etc/systemd/system/initrd.target b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd.target similarity index 100% rename from mkosi.extra/etc/systemd/system/initrd.target rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/initrd.target diff --git a/mkosi.extra/etc/systemd/system/rsyslog-hostname-reload.path b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/rsyslog-hostname-reload.path similarity index 100% rename from mkosi.extra/etc/systemd/system/rsyslog-hostname-reload.path rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/rsyslog-hostname-reload.path diff --git a/mkosi.extra/etc/systemd/system/rsyslog-hostname-reload.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/rsyslog-hostname-reload.service similarity index 100% rename from mkosi.extra/etc/systemd/system/rsyslog-hostname-reload.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/rsyslog-hostname-reload.service diff --git a/mkosi.extra/etc/systemd/system/rsyslog.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/rsyslog.service similarity index 100% rename from mkosi.extra/etc/systemd/system/rsyslog.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/rsyslog.service diff --git a/mkosi.extra/etc/systemd/system/serial-getty@.service.d/autologin.conf b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/serial-getty@.service.d/autologin.conf similarity index 100% rename from mkosi.extra/etc/systemd/system/serial-getty@.service.d/autologin.conf rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/serial-getty@.service.d/autologin.conf diff --git a/mkosi.extra/etc/systemd/system/systemd-firstboot.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/systemd-firstboot.service similarity index 100% rename from mkosi.extra/etc/systemd/system/systemd-firstboot.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/systemd-firstboot.service diff --git a/mkosi.extra/etc/systemd/system/tink-agent-setup.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/tink-agent-setup.service similarity index 100% rename from mkosi.extra/etc/systemd/system/tink-agent-setup.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/tink-agent-setup.service diff --git a/mkosi.extra/etc/systemd/system/tink-agent.service b/captain/flavors/common_debian/mkosi.extra/etc/systemd/system/tink-agent.service similarity index 100% rename from mkosi.extra/etc/systemd/system/tink-agent.service rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/system/tink-agent.service diff --git a/mkosi.extra/etc/systemd/timesyncd.conf b/captain/flavors/common_debian/mkosi.extra/etc/systemd/timesyncd.conf similarity index 100% rename from mkosi.extra/etc/systemd/timesyncd.conf rename to captain/flavors/common_debian/mkosi.extra/etc/systemd/timesyncd.conf diff --git a/mkosi.extra/etc/timezone b/captain/flavors/common_debian/mkosi.extra/etc/timezone similarity index 100% rename from mkosi.extra/etc/timezone rename to captain/flavors/common_debian/mkosi.extra/etc/timezone diff --git a/mkosi.extra/init b/captain/flavors/common_debian/mkosi.extra/init similarity index 100% rename from mkosi.extra/init rename to captain/flavors/common_debian/mkosi.extra/init diff --git a/mkosi.extra/root/.bashrc b/captain/flavors/common_debian/mkosi.extra/root/.bashrc similarity index 100% rename from mkosi.extra/root/.bashrc rename to captain/flavors/common_debian/mkosi.extra/root/.bashrc diff --git a/mkosi.extra/root/.profile b/captain/flavors/common_debian/mkosi.extra/root/.profile similarity index 100% rename from mkosi.extra/root/.profile rename to captain/flavors/common_debian/mkosi.extra/root/.profile diff --git a/mkosi.extra/usr/local/bin/captainos-banner b/captain/flavors/common_debian/mkosi.extra/usr/local/bin/captainos-banner similarity index 100% rename from mkosi.extra/usr/local/bin/captainos-banner rename to captain/flavors/common_debian/mkosi.extra/usr/local/bin/captainos-banner diff --git a/mkosi.extra/usr/local/bin/captainos-static-network b/captain/flavors/common_debian/mkosi.extra/usr/local/bin/captainos-static-network similarity index 100% rename from mkosi.extra/usr/local/bin/captainos-static-network rename to captain/flavors/common_debian/mkosi.extra/usr/local/bin/captainos-static-network diff --git a/mkosi.extra/usr/local/bin/rsyslog-start b/captain/flavors/common_debian/mkosi.extra/usr/local/bin/rsyslog-start similarity index 100% rename from mkosi.extra/usr/local/bin/rsyslog-start rename to captain/flavors/common_debian/mkosi.extra/usr/local/bin/rsyslog-start diff --git a/mkosi.extra/usr/local/bin/tink-agent-setup b/captain/flavors/common_debian/mkosi.extra/usr/local/bin/tink-agent-setup similarity index 100% rename from mkosi.extra/usr/local/bin/tink-agent-setup rename to captain/flavors/common_debian/mkosi.extra/usr/local/bin/tink-agent-setup diff --git a/mkosi.extra/usr/local/bin/tink-agent-start b/captain/flavors/common_debian/mkosi.extra/usr/local/bin/tink-agent-start similarity index 100% rename from mkosi.extra/usr/local/bin/tink-agent-start rename to captain/flavors/common_debian/mkosi.extra/usr/local/bin/tink-agent-start diff --git a/mkosi.finalize b/captain/flavors/common_debian/mkosi.finalize.sh.j2 similarity index 55% rename from mkosi.finalize rename to captain/flavors/common_debian/mkosi.finalize.sh.j2 index 1057bcd..c0c67d4 100755 --- a/mkosi.finalize +++ b/captain/flavors/common_debian/mkosi.finalize.sh.j2 @@ -3,7 +3,10 @@ # The root filesystem is writable here. set -euo pipefail -echo "==> CaptainOS finalize: unlocking root account..." +log info "Showing full contents of environment variables for debugging:" +env + +log info "CaptainOS finalize: unlocking root account..." # Unlock root with empty password # Replace the locked root entry (! or * in password field) with empty hash @@ -13,38 +16,38 @@ if [[ -f "$BUILDROOT/etc/shadow" ]]; then fi # Ensure /etc/initrd-release does not exist -rm -f "$BUILDROOT/etc/initrd-release" -echo " /etc/initrd-release removed" +log info "Ensure no /etc/initrd-release exists..." +rm -fv "$BUILDROOT/etc/initrd-release" # Remove systemd-firstboot completely -rm -f "$BUILDROOT/usr/lib/systemd/system/systemd-firstboot.service" -rm -f "$BUILDROOT/usr/lib/systemd/system/sysinit.target.wants/systemd-firstboot.service" -rm -f "$BUILDROOT/usr/bin/systemd-firstboot" -echo " systemd-firstboot removed" +log info "Completely remove systemd-firstboot..." +rm -fv "$BUILDROOT/usr/lib/systemd/system/systemd-firstboot.service" +rm -fv "$BUILDROOT/usr/lib/systemd/system/sysinit.target.wants/systemd-firstboot.service" +rm -fv "$BUILDROOT/usr/bin/systemd-firstboot" # Point /etc/resolv.conf at the non-stub resolv.conf written by # systemd-resolved. This file contains upstream (DHCP-provided) # nameservers instead of 127.0.0.53. tink-agent bind-mounts the host's # /etc/resolv.conf into action containers so they need real, routable # nameservers (the stub address is unreachable from container namespaces). -ln -sf /run/systemd/resolve/resolv.conf "$BUILDROOT/etc/resolv.conf" -echo " /etc/resolv.conf -> /run/systemd/resolve/resolv.conf" +log info "Handle resolv.conf..." +ln -sfv /run/systemd/resolve/resolv.conf "$BUILDROOT/etc/resolv.conf" # Ensure /init (switch_root script) is executable +log info "Ensure /init is executable..." if [[ -f "$BUILDROOT/init" ]]; then - chmod +x "$BUILDROOT/init" - echo " /init made executable" + chmod -v +x "$BUILDROOT/init" fi # Ensure tool binaries are executable. # GitHub Actions artifact upload/download and some archive tools strip # the execute bit. Re-apply +x to every tool directory so containerd, # runc, nerdctl, and CNI plugins can actually run. +log info "Ensure tool binaries are executable..." for dir in usr/local/bin opt/cni/bin; do target="$BUILDROOT/$dir" if [[ -d "$target" ]]; then - find "$target" -type f -exec chmod +x {} + - echo " +x restored on $dir/*" + find "$target" -type f -exec chmod -v +x {} + fi done @@ -55,10 +58,12 @@ done # the initramfs stays small without requiring a kernel recompile. # --------------------------------------------------------------------------- MODDIR="$BUILDROOT/usr/lib/modules" + if [[ -d "$MODDIR" ]]; then - echo "==> Trimming unnecessary kernel modules..." + log info "Trimming unnecessary kernel modules..." BEFORE=$(du -sb "$MODDIR" | awk '{print $1}') + # @TODO: this has to be templated, for flavors like fat or full we should keep most modules. # Module directory patterns to remove (relative to kernel/ inside the # modules tree). Each entry is passed to 'find -path'. EXCLUDE_PATTERNS=( @@ -78,16 +83,23 @@ if [[ -d "$MODDIR" ]]; then "*/kernel/virt" # Niche NIC driver "*/kernel/drivers/net/ethernet/netronome" + # Sound stuff + "*/kernel/sound" + # Media stuff + "*/kernel/drivers/media" + # Wireless net drivers + "*/kernel/drivers/net/wireless" ) for pattern in "${EXCLUDE_PATTERNS[@]}"; do - find "$MODDIR" -path "$pattern" -type d -exec rm -rf {} + 2>/dev/null || true + log info " Removing modules matching pattern: '${pattern}'" + find "$MODDIR" -path "$pattern" -type d -exec rm -rf {} + || true done AFTER=$(du -sb "$MODDIR" | awk '{print $1}') SAVED=$(( (BEFORE - AFTER) / 1024 / 1024 )) [[ $SAVED -lt 0 ]] && SAVED=0 - echo " Removed ${SAVED}MB of unnecessary kernel modules" + log info " Removed ${SAVED}MB of unnecessary kernel modules" # Regenerate module dependency files after removal for all kernel versions for kdir in "$MODDIR"/*; do @@ -97,4 +109,27 @@ if [[ -d "$MODDIR" ]]; then done fi -echo "==> CaptainOS finalize complete." +log info "Listing 20 biggest remaining kernel modules du: ${MODDIR}" +du -h -x -d 5 "$MODDIR" | sort -h | tail -n20 || true + +log info "Removing /boot from the image to save space" +rm -rf "${BUILDROOT:?}"/boot + +DTB_SRC_DIR=$(echo -n "${BUILDROOT}"/usr/lib/linux-image-*) +if [[ -d "$DTB_SRC_DIR" ]]; then + log info "Exporting kernel DTBs; DTB_DIR: ${DTB_SRC_DIR}" + mkdir -pv "${BUILDROOT}/root/dtb" + # Move every .dtb* file in the DTB_SRC_DIR to "${BUILDROOT}/root/dtb" -- maintain the directory structure + find "${DTB_SRC_DIR}" -type f -name "*.dtb*" -exec sh -c 'for f; do mkdir -p "${0}/$(dirname "${f#'"${DTB_SRC_DIR}"'/}")" && mv "$f" "${0}/${f#'"${DTB_SRC_DIR}"'/}" ; done' "${BUILDROOT}/root/dtb" {} + || true + + # Now simply output the dtb directory to mkosi's OUTPUTDIR + log info "Copying DTBs to mkosi output directory: ${OUTPUTDIR}/dtb" + mv "${BUILDROOT}/root/dtb" "${OUTPUTDIR}/dtb" +else + log info "No DTB source directory found at ${DTB_SRC_DIR}; skipping DTB export" +fi + +log info "Final contents of root filesystem (up to depth 4, top 30):" +du -h -x -d 4 "$BUILDROOT" | sort -h | tail -n30 || true + +log info "CaptainOS finalize complete." diff --git a/captain/flavors/common_debian/mkosi.postinst.sh.j2 b/captain/flavors/common_debian/mkosi.postinst.sh.j2 new file mode 100755 index 0000000..e090e19 --- /dev/null +++ b/captain/flavors/common_debian/mkosi.postinst.sh.j2 @@ -0,0 +1,77 @@ +#!/bin/bash +# mkosi.postinst — Post-installation script for the CaptainOS initrd. +# Runs inside the image chroot after packages are installed. +# NOTE: The filesystem is mostly read-only here. Use mkosi.extra/ for +# creating directories and dropping config files. +set -euo pipefail + +log info "CaptainOS post-install: dump env at $(hostname)..." +env + +log info "CaptainOS post-install: show mounts at $(hostname)..." +mount || true + +# Ensure CA certificate bundle is generated (dpkg triggers may not +# fire reliably in mkosi's chroot). +log info "Generating fresh CA certificate bundle..." +update-ca-certificates --fresh 2>/dev/null || true + +# @TODO: this is the wrong place to do any systemd enablement/disablement/masking: mkosi will +# after this still apply systemd "presets" which will change the game completely. +# This is better done in a finalize.chroot script which would run after that. +# alternatively: adopt/introduce systemd presets, but that's a can of worms. + +log info "Listing all systemd units..." +systemctl list-unit-files --no-pager || true + +log info "CaptainOS post-install: configuring services..." +# Enable core services +declare -a units_to_enable=( + systemd-networkd systemd-resolved systemd-timesyncd containerd captainos-banner + systemd-time-wait-sync # wait for time sync + captainos-static-network tink-agent-setup tink-agent rsyslog rsyslog-hostname-reload.path +) +declare unit +for unit in "${units_to_enable[@]}"; do + log info "Enabling systemd unit ${unit}..." + if systemctl enable "${unit}"; then + log info " ${unit} enabled successfully." + else + log warning " Failed to enable ${unit}, but continuing anyway..." + fi +done + +# Root password is set via mkosi.conf RootPassword= setting + +# Disable unnecessary systemd units to speed up boot +declare -a units_to_disable=( + apt-daily.timer + apt-daily-upgrade.timer + e2scrub_all.timer + e2scrub_reap.service + fstrim.timer + logrotate.timer + man-db.timer + remote-fs.target + systemd-firstboot.service +) +for unit in "${units_to_disable[@]}"; do + log info "Disabling and masking systemd unit ${unit}..." + if systemctl disable "${unit}"; then + log info " ${unit} disabled successfully." + else + log warning " Failed to disable ${unit}, but continuing anyway..." + fi + + if systemctl mask "${unit}"; then + log info " ${unit} masked successfully." + else + log warning " Failed to mask ${unit}, but continuing anyway..." + fi +done + +# Set default target to multi-user (no graphical) +log info "Setting default systemd target to multi-user.target (no graphical)..." +systemctl set-default multi-user.target || true + +log info "CaptainOS post-install complete." diff --git a/captain/flavors/common_debian/mkosi.sandbox/etc/apt/apt.conf.d/01nopty b/captain/flavors/common_debian/mkosi.sandbox/etc/apt/apt.conf.d/01nopty new file mode 100644 index 0000000..551c0a6 --- /dev/null +++ b/captain/flavors/common_debian/mkosi.sandbox/etc/apt/apt.conf.d/01nopty @@ -0,0 +1,4 @@ +Dpkg::Use-Pty "0"; +APT::Quiet "2"; +Dpkg::Progress-Fancy "false"; +APT::Color "true"; diff --git a/captain/flavors/common_debian/mkosi.skeleton/etc/apt/apt.conf.d/01nopty b/captain/flavors/common_debian/mkosi.skeleton/etc/apt/apt.conf.d/01nopty new file mode 100644 index 0000000..551c0a6 --- /dev/null +++ b/captain/flavors/common_debian/mkosi.skeleton/etc/apt/apt.conf.d/01nopty @@ -0,0 +1,4 @@ +Dpkg::Use-Pty "0"; +APT::Quiet "2"; +Dpkg::Progress-Fancy "false"; +APT::Color "true"; diff --git a/captain/flavors/trixie_full/__init__.py b/captain/flavors/trixie_full/__init__.py new file mode 100644 index 0000000..9b81c31 --- /dev/null +++ b/captain/flavors/trixie_full/__init__.py @@ -0,0 +1,22 @@ +import logging +from dataclasses import dataclass + +from captain.flavor import BaseFlavor +from captain.flavors.common_acpi import TrixieACPIFlavor + +log: logging.Logger = logging.getLogger(__name__) + + +def create_flavor() -> BaseFlavor: + return TrixieFullFlavor() + + +@dataclass +class TrixieFullFlavor(TrixieACPIFlavor): + id = "trixie-full" + name = "Trixie Full" + description = "Debian Trixie based with linux-image-generic standard Debian kernel" + supported_architectures = frozenset(["amd64", "arm64"]) + + def kernel_packages(self) -> set[str]: + return {"linux-image-generic"} diff --git a/captain/flavors/trixie_meson64/__init__.py b/captain/flavors/trixie_meson64/__init__.py new file mode 100644 index 0000000..f3dd09e --- /dev/null +++ b/captain/flavors/trixie_meson64/__init__.py @@ -0,0 +1,22 @@ +import logging +from dataclasses import dataclass + +from captain.flavor import BaseFlavor +from captain.flavors.common_armbian import ArmbianCommonFlavor + +log: logging.Logger = logging.getLogger(__name__) + + +def create_flavor() -> BaseFlavor: + return TrixieMeson64Flavor() + + +@dataclass +class TrixieMeson64Flavor(ArmbianCommonFlavor): + id = "trixie-meson64" + name = "Trixie for Meson (Amlogic) 64-bit ARM machines" + description = "Debian Trixie based with Armbian's meson64-edge kernel" + supported_architectures = frozenset(["arm64"]) # does NOT support amd64 + + def kernel_packages(self) -> set[str]: + return {"linux-image-edge-meson64"} diff --git a/captain/flavors/trixie_rockchip64/__init__.py b/captain/flavors/trixie_rockchip64/__init__.py new file mode 100644 index 0000000..c49b5ab --- /dev/null +++ b/captain/flavors/trixie_rockchip64/__init__.py @@ -0,0 +1,22 @@ +import logging +from dataclasses import dataclass + +from captain.flavor import BaseFlavor +from captain.flavors.common_armbian import ArmbianCommonFlavor + +log: logging.Logger = logging.getLogger(__name__) + + +def create_flavor() -> BaseFlavor: + return TrixieRockchip64Flavor() + + +@dataclass +class TrixieRockchip64Flavor(ArmbianCommonFlavor): + id = "trixie-rockchip64" + name = "Trixie for Rockchip 64-bit ARM machines" + description = "Debian Trixie based with Armbian's rockchip64-edge kernel" + supported_architectures = frozenset(["arm64"]) # does NOT support amd64 + + def kernel_packages(self) -> set[str]: + return {"linux-image-edge-rockchip64"} diff --git a/captain/iso.py b/captain/iso.py index 8275b32..fb57777 100644 --- a/captain/iso.py +++ b/captain/iso.py @@ -2,15 +2,15 @@ from __future__ import annotations +import logging import shutil import textwrap from pathlib import Path from captain.config import Config -from captain.log import for_stage from captain.util import ensure_dir, run -_log = for_stage("iso") +log = logging.getLogger(__name__) # GRUB platform directory name per architecture. _GRUB_PLATFORM = { @@ -41,21 +41,19 @@ def _grub_cfg(arch: str) -> str: def _find_vmlinuz(cfg: Config) -> Path: """Locate the vmlinuz kernel image.""" - vmlinuz_dir = cfg.kernel_output - candidates = sorted(vmlinuz_dir.glob("vmlinuz-*")) if vmlinuz_dir.is_dir() else [] - if not candidates: - _log.err(f"No vmlinuz found in {vmlinuz_dir}") - _log.err("Build the kernel first: ./build.py kernel") + vmlinuz_files = sorted(cfg.initramfs_output.glob("*.vmlinuz*")) + if not vmlinuz_files: + log.error("No vmlinuz found in %s", cfg.initramfs_output) raise SystemExit(1) - return candidates[0] + return vmlinuz_files[0] def _find_initramfs(cfg: Config) -> Path: """Locate the initramfs CPIO image.""" cpio_files = sorted(cfg.initramfs_output.glob("*.cpio*")) if not cpio_files: - _log.err(f"No initramfs CPIO found in {cfg.initramfs_output}") - _log.err("Build the initramfs first: ./build.py initramfs") + log.error("No initramfs CPIO found in %s", cfg.initramfs_output) + log.error("Build the initramfs first: ./build.py initramfs") raise SystemExit(1) return cpio_files[0] @@ -80,10 +78,9 @@ def build(cfg: Config) -> None: grub_platform = _GRUB_PLATFORM.get(cfg.arch) if grub_platform is None: - _log.err(f"Unsupported architecture for ISO build: {cfg.arch}") + log.error("Unsupported architecture for ISO build: %s", cfg.arch) raise SystemExit(1) - # Prepare staging directory staging = cfg.iso_staging if staging.exists(): shutil.rmtree(staging) @@ -91,23 +88,20 @@ def build(cfg: Config) -> None: boot_dir = ensure_dir(staging / "boot") grub_dir = ensure_dir(boot_dir / "grub") - _log.log(f"Staging ISO filesystem at {staging}") + log.info("Staging ISO filesystem at %s", staging) - # Copy kernel and initramfs shutil.copy2(vmlinuz, boot_dir / "vmlinuz") shutil.copy2(initramfs, boot_dir / "initramfs") - # Write GRUB configuration (grub_dir / "grub.cfg").write_text(_grub_cfg(cfg.arch)) - # Build the ISO iso_dir = ensure_dir(cfg.iso_output) - iso_path = iso_dir / f"captainos-{cfg.kernel_version}-{cfg.arch_info.output_arch}.iso" + iso_path = iso_dir / f"captainos-{cfg.flavor_id}-{cfg.arch_info.output_arch}.iso" - _log.log(f"Building ISO with grub-mkrescue ({grub_platform})...") + log.info("Building ISO with grub-mkrescue (%s)...", grub_platform) grub_mkrescue = shutil.which("grub-mkrescue") if grub_mkrescue is None: - _log.err("grub-mkrescue not found. Install grub-common or use ISO_MODE=docker.") + log.error("grub-mkrescue not found. Install grub-common or use ISO_MODE=docker.") raise SystemExit(1) run( @@ -121,4 +115,4 @@ def build(cfg: Config) -> None: ) size_mb = iso_path.stat().st_size / (1024 * 1024) - _log.log(f"ISO created: {iso_path} ({size_mb:.1f}M)") + log.info("ISO created: %s (%.1fM)", iso_path, size_mb) diff --git a/captain/kernel.py b/captain/kernel.py deleted file mode 100644 index 9aceba3..0000000 --- a/captain/kernel.py +++ /dev/null @@ -1,311 +0,0 @@ -"""Kernel download, configuration, compilation, and installation. - -Heavy lifting (make, strip) is still done via subprocess — only the -orchestration is in Python. Called directly by ``cli._build_kernel_stage`` -in both native and Docker modes (inside the container ``build.py kernel`` -re-enters via the CLI with all modes forced to native). -""" - -from __future__ import annotations - -import os -import re -import shutil -import tarfile -import urllib.error -import urllib.request -from pathlib import Path - -from captain.config import Config -from captain.log import for_stage -from captain.util import ensure_dir, run, safe_extractall - -_log = for_stage("kernel") - -_DOWNLOAD_TIMEOUT = 60 # seconds - - -def _urlretrieve_with_timeout( - url: str, - filename: Path | str, - *, - reporthook: object = None, - timeout: int = _DOWNLOAD_TIMEOUT, -) -> None: - """Like urllib.request.urlretrieve but with a socket timeout.""" - req = urllib.request.Request(url) - with urllib.request.urlopen(req, timeout=timeout) as resp: - headers = resp.info() - total = int(headers.get("Content-Length", -1)) - block_size = 8192 - block_num = 0 - with open(filename, "wb") as out: - while True: - buf = resp.read(block_size) - if not buf: - break - out.write(buf) - block_num += 1 - if reporthook is not None: - reporthook(block_num, block_size, total) # type: ignore[operator] - - -def _progress_hook(block_num: int, block_size: int, total_size: int) -> None: - """Simple download progress indicator.""" - downloaded = block_num * block_size - if total_size > 0: - pct = min(100, downloaded * 100 // total_size) - mb = downloaded / (1024 * 1024) - total_mb = total_size / (1024 * 1024) - print(f"\r {mb:.1f}/{total_mb:.1f} MB ({pct}%)", end="", flush=True) - else: - mb = downloaded / (1024 * 1024) - print(f"\r {mb:.1f} MB", end="", flush=True) - - -def download_kernel(version: str, dest_dir: Path) -> Path: - """Download and extract a kernel tarball. Returns the source directory.""" - src_dir = dest_dir / f"linux-{version}" - if src_dir.is_dir(): - _log.log(f"Using cached kernel source at {src_dir}") - return src_dir - - major = version.split(".")[0] - url = f"https://cdn.kernel.org/pub/linux/kernel/v{major}.x/linux-{version}.tar.xz" - tarball = dest_dir / f"linux-{version}.tar.xz" - - _log.log(f"Downloading kernel {version}...") - _log.log(f" URL: {url}") - ensure_dir(dest_dir) - try: - _urlretrieve_with_timeout(url, tarball, reporthook=_progress_hook) - except urllib.error.HTTPError as exc: - print() # newline after progress - _log.err(f"Download failed: {exc} — {url}") - raise SystemExit(1) from None - except urllib.error.URLError as exc: - print() # newline after progress - _log.err(f"Download failed: {exc.reason} — {url}") - raise SystemExit(1) from None - print() # newline after progress - - _log.log("Extracting kernel source...") - with tarfile.open(tarball, "r:xz") as tf: - safe_extractall(tf, path=dest_dir) - tarball.unlink() - - return src_dir - - -def _kernel_branch(version: str) -> str: - """Derive the stable branch prefix from a full kernel version. - - ``"6.18.16"`` → ``"6.18.y"`` - """ - parts = version.split(".") - if len(parts) < 2: - _log.err(f"Invalid kernel version format: {version}") - raise SystemExit(1) - return f"{parts[0]}.{parts[1]}.y" - - -def _find_defconfig(cfg: Config) -> Path: - """Locate the defconfig for the current kernel version and architecture. - - When ``cfg.kernel_config`` is set, that path is used directly. - Otherwise returns ``kernel.configs/{major}.{minor}.y.{arch}``. - Exits with a helpful error if no matching config file is found. - """ - if cfg.kernel_config: - explicit = Path(cfg.kernel_config) - if not explicit.is_absolute(): - explicit = cfg.project_dir / explicit - if explicit.is_file(): - return explicit - _log.err(f"Kernel config not found: {explicit}") - raise SystemExit(1) - - ai = cfg.arch_info - branch = _kernel_branch(cfg.kernel_version) - defconfig = cfg.project_dir / "kernel.configs" / f"{branch}.{ai.arch}" - if defconfig.is_file(): - return defconfig - - # List available branches for a helpful error message. - configs_dir = cfg.project_dir / "kernel.configs" - available = sorted( - { - p.name.rsplit(".", 1)[0] - for p in configs_dir.glob(f"*.{ai.arch}") - if not p.name.startswith(".") - } - ) - avail_str = ", ".join(available) if available else "(none)" - _log.err( - f"No kernel config found for {branch} on {ai.arch}\n" - f" Expected: {defconfig}\n" - f" Available branches for {ai.arch}: {avail_str}" - ) - raise SystemExit(1) - - -def configure_kernel(cfg: Config, src_dir: Path) -> None: - """Apply defconfig and run olddefconfig.""" - ai = cfg.arch_info - defconfig = _find_defconfig(cfg) - - make_env = {"ARCH": ai.kernel_arch} - if ai.cross_compile: - make_env["CROSS_COMPILE"] = ai.cross_compile - - _log.log(f"Using defconfig: {defconfig}") - shutil.copy2(defconfig, src_dir / ".config") - run(["make", "olddefconfig"], env=make_env, cwd=src_dir) - # Save the resolved config for debugging - branch = _kernel_branch(cfg.kernel_version) - resolved = cfg.project_dir / "kernel.configs" / f".config.resolved.{branch}.{ai.arch}" - shutil.copy2(src_dir / ".config", resolved) - _log.log(f"Resolved config saved to kernel.configs/.config.resolved.{branch}.{ai.arch}") - - # Increase COMMAND_LINE_SIZE on x86_64 (Tinkerbell needs large cmdlines) - if ai.kernel_arch == "x86_64": - _log.log("Increasing COMMAND_LINE_SIZE to 4096 (x86_64)...") - setup_h = src_dir / "arch" / "x86" / "include" / "asm" / "setup.h" - text = setup_h.read_text() - new_text = re.sub( - r"#define COMMAND_LINE_SIZE\s+2048", - "#define COMMAND_LINE_SIZE 4096", - text, - ) - if new_text == text: - _log.warn("COMMAND_LINE_SIZE patch did not match — the kernel default may have changed") - setup_h.write_text(new_text) - - -def build_kernel(cfg: Config, src_dir: Path) -> str: - """Compile the kernel image and modules. Returns the built kernel version string.""" - ai = cfg.arch_info - nproc = os.cpu_count() or 1 - - make_env = {"ARCH": ai.kernel_arch} - if ai.cross_compile: - make_env["CROSS_COMPILE"] = ai.cross_compile - - _log.log(f"Building kernel with {nproc} jobs...") - run( - ["make", f"-j{nproc}", ai.image_target, "modules"], - env=make_env, - cwd=src_dir, - ) - - # Determine actual kernel version from build - result = run( - ["make", "-s", "kernelrelease"], - env={"ARCH": ai.kernel_arch}, - capture=True, - cwd=src_dir, - ) - built_kver = result.stdout.strip() - _log.log(f"Built kernel version: {built_kver}") - return built_kver - - -def install_kernel(cfg: Config, src_dir: Path, built_kver: str) -> None: - """Install modules and vmlinuz into mkosi.output/kernel/{version}/{arch}/.""" - ai = cfg.arch_info - modules_root = cfg.modules_output - - make_env = {"ARCH": ai.kernel_arch} - if ai.cross_compile: - make_env["CROSS_COMPILE"] = ai.cross_compile - - # Install modules into the modules subtree. - # make modules_install writes to {INSTALL_MOD_PATH}/lib/modules/{kver}/. - _log.log("Installing modules...") - run( - ["make", f"INSTALL_MOD_PATH={modules_root}", "modules_install"], - env=make_env, - cwd=src_dir, - ) - - # Strip debug symbols from modules - _log.log("Stripping debug symbols from modules...") - strip_cmd = f"{ai.strip_prefix}strip" - for ko in modules_root.rglob("*.ko"): - run([strip_cmd, "--strip-unneeded", str(ko)], check=False) - - # Compress modules with zstd (the defconfig sets CONFIG_MODULE_COMPRESS_ZSTD - # and CONFIG_MODULE_DECOMPRESS so the kernel can load .ko.zst at runtime). - # We compress explicitly here because the build container's modules_install - # may not always invoke zstd, and stripping must happen before compression. - _log.log("Compressing kernel modules with zstd...") - for ko in modules_root.rglob("*.ko"): - run(["zstd", "--rm", "-q", "-19", str(ko)], check=True) - - # Clean up build/source symlinks - mod_base = modules_root / "lib" / "modules" / built_kver - (mod_base / "build").unlink(missing_ok=True) - (mod_base / "source").unlink(missing_ok=True) - - # Move modules from /lib/modules to /usr/lib/modules (merged-usr) - usr_moddir = ensure_dir(modules_root / "usr" / "lib" / "modules" / built_kver) - if mod_base.is_dir(): - for item in mod_base.iterdir(): - dest = usr_moddir / item.name - if dest.exists(): - if dest.is_dir(): - shutil.rmtree(dest) - else: - dest.unlink() - shutil.move(str(item), str(dest)) - # Remove /lib tree - shutil.rmtree(modules_root / "lib", ignore_errors=True) - - # Regenerate module dependency metadata for the compressed .ko.zst files. - _log.log("Running depmod for compressed modules...") - run( - ["depmod", "-a", "-b", str(modules_root / "usr"), built_kver], - check=True, - ) - - # Place vmlinuz alongside modules under kernel_output. iPXE loads - # the kernel image separately — it must NOT end up in the initramfs. - kernel_image = src_dir / ai.kernel_image_path - vmlinuz_dir = ensure_dir(cfg.kernel_output) - - # Remove stale vmlinuz images from prior builds so artifact collection - # never picks an outdated kernel. - for old in vmlinuz_dir.glob("vmlinuz-*"): - old.unlink(missing_ok=True) - - shutil.copy2(kernel_image, vmlinuz_dir / f"vmlinuz-{built_kver}") - - _log.log("Kernel build complete:") - vmlinuz = vmlinuz_dir / f"vmlinuz-{built_kver}" - vmlinuz_size = vmlinuz.stat().st_size / (1024 * 1024) - _log.log(f" Image: {vmlinuz} ({vmlinuz_size:.1f}M)") - _log.log(f" Modules: {usr_moddir}/") - _log.log(f" Version: {built_kver}") - _log.log(f" Output: {cfg.kernel_output}") - - -def build(cfg: Config) -> None: - """Full kernel build pipeline — download, configure, build, install.""" - # Clean previous kernel output to ensure idempotency. - # Only the kernel directory is wiped — tools are left intact. - if cfg.kernel_output.exists(): - shutil.rmtree(cfg.kernel_output) - ensure_dir(cfg.kernel_output) - - build_dir = Path("/var/tmp/kernel-build") - - # Obtain kernel source - if cfg.kernel_src and Path(cfg.kernel_src).is_dir(): - _log.log(f"Using provided kernel source at {cfg.kernel_src}") - src_dir = Path(cfg.kernel_src) - else: - src_dir = download_kernel(cfg.kernel_version, build_dir) - - configure_kernel(cfg, src_dir) - built_kver = build_kernel(cfg, src_dir) - install_kernel(cfg, src_dir, built_kver) diff --git a/captain/log.py b/captain/log.py deleted file mode 100644 index c0439b5..0000000 --- a/captain/log.py +++ /dev/null @@ -1,48 +0,0 @@ -"""Colored logging helpers matching the original build.sh output style. - -Use :func:`for_stage` to create a stage-scoped logger whose prefix -includes the stage name (e.g. ``[captainos-kernel]``). The module-level -:func:`log`, :func:`warn`, and :func:`err` convenience functions use a -plain ``[captainos]`` prefix for cross-cutting messages. -""" - -from __future__ import annotations - -import sys - -RED = "\033[0;31m" -GREEN = "\033[0;32m" -YELLOW = "\033[1;33m" -NC = "\033[0m" - - -class StageLogger: - """Logger that tags output with an optional stage name.""" - - __slots__ = ("_prefix",) - - def __init__(self, stage: str = "") -> None: - tag = f"captainos-{stage}" if stage else "captainos" - self._prefix = f"[{tag}]" - - def log(self, *args: object) -> None: - print(f"{GREEN}{self._prefix}{NC}", *args, flush=True) - - def warn(self, *args: object) -> None: - print(f"{YELLOW}{self._prefix}{NC}", *args, flush=True) - - def err(self, *args: object) -> None: - print(f"{RED}{self._prefix}{NC}", *args, file=sys.stderr, flush=True) - - -def for_stage(stage: str) -> StageLogger: - """Return a :class:`StageLogger` whose prefix includes *stage*.""" - return StageLogger(stage) - - -# Module-level convenience functions (un-staged [captainos] prefix). -_default = StageLogger() - -log = _default.log -warn = _default.warn -err = _default.err diff --git a/captain/oci/_build.py b/captain/oci/_build.py index 657f451..ac846d6 100644 --- a/captain/oci/_build.py +++ b/captain/oci/_build.py @@ -3,15 +3,16 @@ from __future__ import annotations import contextlib -import shutil +import logging import tarfile from datetime import datetime from pathlib import Path from captain import artifacts, buildah -from captain.log import StageLogger from captain.util import get_arch_info +log = logging.getLogger(__name__) + def _deterministic_tar(file_path: Path, output_dir: Path) -> Path: """Create a tar containing a single file with deterministic metadata. @@ -38,36 +39,30 @@ def _collect_arch_artifacts( project_dir: Path, out: Path, arch: str, - kernel_version: str, - logger: StageLogger, + flavor_id: str, + has_iso: bool, ) -> list[Path]: """Collect and return the artifact files for a single architecture. Returns [vmlinuz, initramfs, iso, checksums] paths in *out*. """ - # Collect kernel - vmlinuz_dir = project_dir / "mkosi.output" / "kernel" / kernel_version / arch - vmlinuz_files = sorted(vmlinuz_dir.glob("vmlinuz-*")) if vmlinuz_dir.is_dir() else [] oarch = get_arch_info(arch).output_arch - vmlinuz_dst = out / f"vmlinuz-{kernel_version}-{oarch}" - if vmlinuz_files: - shutil.copy2(vmlinuz_files[0], vmlinuz_dst) - logger.log(f"kernel: {vmlinuz_dst}") - else: - logger.warn(f"No kernel image found for {arch}") arch_files = [ - out / f"vmlinuz-{kernel_version}-{oarch}", - out / f"initramfs-{kernel_version}-{oarch}", - out / f"captainos-{kernel_version}-{oarch}.iso", + out / f"vmlinuz-{flavor_id}-{oarch}", + out / f"initramfs-{flavor_id}-{oarch}", ] - checksums_path = out / f"sha256sums-{kernel_version}-{oarch}.txt" - artifacts.collect_checksums(arch_files, checksums_path, logger=logger) + + if has_iso: + arch_files += [out / f"captainos-{flavor_id}-{oarch}.iso"] + + checksums_path = out / f"sha256sums-{flavor_id}-{oarch}.txt" + artifacts.collect_checksums(arch_files, checksums_path) push_files = [*arch_files, checksums_path] for f in push_files: if not f.is_file(): - logger.err(f"Missing artifact: {f}") + log.error("Missing artifact: %s", f) raise SystemExit(1) return push_files @@ -77,7 +72,6 @@ def _build_platform_image( platform: str, sha: str, repository: str, - logger: StageLogger, *, created: str, tag: str, @@ -118,8 +112,8 @@ def _build_platform_image( intermediates: list[str] = [] for i, tar_path in enumerate(layer_tars): is_last = i == len(layer_tars) - 1 - ctr = buildah.from_image(current, platform=platform, logger=logger) - buildah.add(ctr, [tar_path], logger=logger) + ctr = buildah.from_image(current, platform=platform) + buildah.add(ctr, [tar_path]) if is_last: buildah.config( ctr, @@ -127,15 +121,14 @@ def _build_platform_image( arch=arch, annotations=oci_metadata, labels=oci_metadata, - logger=logger, ) prev = current - current = buildah.commit(ctr, timestamp=epoch, logger=logger) + current = buildah.commit(ctr, timestamp=epoch) if prev != base: intermediates.append(prev) for img in intermediates: with contextlib.suppress(Exception): - buildah.rmi(img, logger=logger) + buildah.rmi(img) return current diff --git a/captain/oci/_common.py b/captain/oci/_common.py index 4bc4a8d..9bb5afc 100644 --- a/captain/oci/_common.py +++ b/captain/oci/_common.py @@ -5,10 +5,6 @@ import subprocess from pathlib import Path -from captain.log import for_stage - -_default_log = for_stage("release") - _ARCHES = ("amd64", "arm64") diff --git a/captain/oci/_publish.py b/captain/oci/_publish.py index 75bd9fc..eebf520 100644 --- a/captain/oci/_publish.py +++ b/captain/oci/_publish.py @@ -3,23 +3,26 @@ from __future__ import annotations import contextlib +import logging +import tarfile from datetime import datetime, timezone from pathlib import Path from uuid import uuid4 from captain import buildah, skopeo from captain.config import Config -from captain.log import StageLogger -from captain.util import ensure_dir +from captain.flavor import BaseFlavor +from captain.util import ensure_dir, get_arch_info from ._build import _build_platform_image, _collect_arch_artifacts, _deterministic_tar -from ._common import _ARCHES, _default_log, _image_ref +from ._common import _ARCHES, _image_ref + +log = logging.getLogger(__name__) def _create_push_cleanup( image_ids: list[str], dest_ref: str, - logger: StageLogger, ) -> None: """Create a manifest list from *image_ids*, push it to *dest_ref*, and clean up. @@ -30,17 +33,17 @@ def _create_push_cleanup( temp_name = f"captain-local-{uuid4().hex[:12]}" manifest_id: str | None = None try: - manifest_id = buildah.manifest_create(temp_name, logger=logger) + manifest_id = buildah.manifest_create(temp_name) for image_id in image_ids: - buildah.manifest_add(manifest_id, image_id, logger=logger) - buildah.manifest_push(manifest_id, dest_ref, logger=logger) + buildah.manifest_add(manifest_id, image_id) + buildah.manifest_push(manifest_id, dest_ref) finally: if manifest_id is not None: with contextlib.suppress(Exception): - buildah.rmi(manifest_id, logger=logger) + buildah.rmi(manifest_id) for image_id in image_ids: with contextlib.suppress(Exception): - buildah.rmi(image_id, logger=logger) + buildah.rmi(image_id) def _publish_single_arch( @@ -52,7 +55,6 @@ def _publish_single_arch( repository: str, artifact_name: str, created: str, - logger: StageLogger, ) -> None: """Build a per-arch multi-arch index and push it. @@ -66,14 +68,13 @@ def _publish_single_arch( f"linux/{platform_arch}", sha, repository, - logger, created=created, tag=tag, artifact_name=artifact_name, ) image_ids.append(image_id) - _create_push_cleanup(image_ids, ref, logger) + _create_push_cleanup(image_ids, ref) def _publish_combined( @@ -86,7 +87,6 @@ def _publish_combined( sha: str, created: str, force: bool = False, - logger: StageLogger, ) -> bool: """Build and push the combined multi-arch image. @@ -104,19 +104,20 @@ def _publish_combined( combined_ref = _image_ref(registry, repository, artifact_name, tag) # Skip if the combined image already exists. - if not force and skopeo.image_exists(combined_ref, logger=logger): - logger.log(f"{combined_ref} already exists — skipping (use --force to overwrite)") + if not force and skopeo.image_exists(combined_ref): + log.info("%s already exists — skipping (use --force to overwrite)", combined_ref) return False # Ensure per-arch images exist in the registry. for arch in _ARCHES: per_arch_tag = f"{tag}-{arch}" per_arch_ref = _image_ref(registry, repository, artifact_name, per_arch_tag) - if skopeo.image_exists(per_arch_ref, logger=logger): - logger.log(f"Found {per_arch_ref} in registry — will reuse layers for combined image") + if skopeo.image_exists(per_arch_ref): + log.info("Found %s in registry — will reuse layers for combined image", per_arch_ref) else: - logger.log( - f"{per_arch_ref} not found in registry — building and pushing before combined image" + log.info( + "%s not found in registry — building and pushing before combined image", + per_arch_ref, ) _publish_single_arch( layer_tars=arch_layer_tars[arch], @@ -126,7 +127,6 @@ def _publish_combined( repository=repository, artifact_name=artifact_name, created=created, - logger=logger, ) # Build the combined image using per-arch registry images as bases. @@ -140,7 +140,6 @@ def _publish_combined( f"linux/{arch}", sha, repository, - logger, created=created, tag=tag, artifact_name=artifact_name, @@ -148,12 +147,13 @@ def _publish_combined( ) image_ids.append(image_id) - _create_push_cleanup(image_ids, combined_ref, logger) + _create_push_cleanup(image_ids, combined_ref) return True def publish( cfg: Config, + flavor: BaseFlavor, *, target: str, registry: str, @@ -162,7 +162,6 @@ def publish( tag: str, sha: str, force: bool = False, - logger: StageLogger | None = None, ) -> None: """Collect artifacts and publish a multi-arch OCI index. @@ -177,15 +176,14 @@ def publish( (unless *force* is ``True``). For per-arch targets this prevents overwriting images that the combined image depends on. """ - _log = logger or _default_log arches = list(_ARCHES) if target == "combined" else [target] tag_suffix = "" if target == "combined" else f"-{target}" full_tag = f"{tag}{tag_suffix}" final_ref = _image_ref(registry, repository, artifact_name, full_tag) # For per-arch targets, skip if the image already exists. - if target != "combined" and not force and skopeo.image_exists(final_ref, logger=_log): - _log.log(f"{final_ref} already exists — skipping (use --force to overwrite)") + if target != "combined" and not force and skopeo.image_exists(final_ref): + log.info("%s already exists — skipping (use --force to overwrite)", final_ref) return out = ensure_dir(cfg.output_dir) @@ -195,18 +193,28 @@ def publish( arch_files: dict[str, list[Path]] = {} for arch in arches: arch_files[arch] = _collect_arch_artifacts( - cfg.project_dir, - out, - arch, - cfg.kernel_version, - _log, + cfg.project_dir, out, arch, cfg.flavor_id, has_iso=flavor.has_iso() ) # Create deterministic layer tars (shared across manifest pushes). arch_layer_tars: dict[str, list[Path]] = {} for arch, files in arch_files.items(): + log.info("Creating layer tars for %s... files: %s", arch, files) arch_layer_tars[arch] = [_deterministic_tar(f, out) for f in files] + # A single layer for all DTBs, if any; those are highly compressible together. + dtb_dir_in = out / f"dtb-{cfg.flavor_id}-{get_arch_info(arch).output_arch}" + if not dtb_dir_in.is_dir(): + log.warning("No dtbs directory found for %s: %s", arch, dtb_dir_in) + else: + log.info(f"Found DTB directory for {arch}: {dtb_dir_in}") + all_dtb_files: list[Path] = sorted(dtb_dir_in.glob("**/*.dtb*")) + dtb_tar_path = out / f"dtbs-{cfg.flavor_id}-{arch}.tar" + with tarfile.open(dtb_tar_path, "w") as tar: + for f in all_dtb_files: + tar.add(f, arcname=f.relative_to(out)) + arch_layer_tars[arch].append(dtb_tar_path) + pushed = True try: if target == "combined": @@ -219,7 +227,6 @@ def publish( sha=sha, created=created, force=force, - logger=_log, ) else: _publish_single_arch( @@ -230,7 +237,6 @@ def publish( repository=repository, artifact_name=artifact_name, created=created, - logger=_log, ) finally: for tars in arch_layer_tars.values(): @@ -245,12 +251,12 @@ def publish( for arch in arches: artifact_names.extend(f.name for f in arch_files.get(arch, [])) platforms = [f"linux/{a}" for a in _ARCHES] - _log.log("") - _log.log("Publish complete") - _log.log(f" Image: {final_ref}") - _log.log(f" Target: {target}") - _log.log(f" Platforms: {', '.join(platforms)}") - _log.log(f" Layers: {len(artifact_names)}") - _log.log(" Artifacts:") + log.info("") + log.info("Publish complete") + log.info(" Image: %s", final_ref) + log.info(" Target: %s", target) + log.info(" Platforms: %s", ", ".join(platforms)) + log.info(" Layers: %d", len(artifact_names)) + log.info(" Artifacts:") for name in artifact_names: - _log.log(f" - {name}") + log.info(" - %s", name) diff --git a/captain/oci/_pull.py b/captain/oci/_pull.py index 75860e0..13165f6 100644 --- a/captain/oci/_pull.py +++ b/captain/oci/_pull.py @@ -2,12 +2,14 @@ from __future__ import annotations +import logging from pathlib import Path from captain import skopeo -from captain.log import StageLogger -from ._common import _ARCHES, _default_log, _image_ref +from ._common import _ARCHES, _image_ref + +log = logging.getLogger(__name__) def pull( @@ -18,7 +20,6 @@ def pull( tag: str, target: str, output_dir: Path, - logger: StageLogger | None = None, ) -> None: """Pull and extract OCI artifacts. @@ -26,20 +27,19 @@ def pull( suffix is ``-{target}`` for single architectures, or bare ``{tag}`` for ``"combined"``. """ - _log = logger or _default_log tag_suffix = "" if target == "combined" else f"-{target}" ref = _image_ref(registry, repository, artifact_name, f"{tag}{tag_suffix}") - skopeo.export_image(ref, output_dir, logger=_log) + skopeo.export_image(ref, output_dir) # Recap extracted = sorted(f.name for f in Path(output_dir).iterdir() if f.is_file()) - _log.log("") - _log.log("Pull complete") - _log.log(f" Image: {ref}") - _log.log(f" Target: {target}") - _log.log(" Artifacts:") + log.info("") + log.info("Pull complete") + log.info(" Image: %s", ref) + log.info(" Target: %s", target) + log.info(" Artifacts:") for name in extracted: - _log.log(f" - {name}") + log.info(" - %s", name) def tag_image( @@ -49,14 +49,12 @@ def tag_image( artifact_name: str, src_tag: str, new_tag: str, - logger: StageLogger | None = None, ) -> None: """Tag an existing OCI artifact image with a new version.""" - _log = logger or _default_log src_ref = _image_ref(registry, repository, artifact_name, src_tag) dest_ref = _image_ref(registry, repository, artifact_name, new_tag) - skopeo.copy(src_ref, dest_ref, logger=_log) - _log.log(f"Tagged {src_ref} → {new_tag}") + skopeo.copy(src_ref, dest_ref) + log.info("Tagged %s → %s", src_ref, new_tag) def tag_all( @@ -67,10 +65,8 @@ def tag_all( src_tag: str, new_tag: str, arches: list[str] | None = None, - logger: StageLogger | None = None, ) -> None: """Tag all artifact images (per-arch + combined) with a new version.""" - _log = logger or _default_log arches = arches or list(_ARCHES) for a in arches: tag_image( @@ -79,7 +75,6 @@ def tag_all( artifact_name=artifact_name, src_tag=f"{src_tag}-{a}", new_tag=f"{new_tag}-{a}", - logger=_log, ) # Tag the combined image (no arch suffix). tag_image( @@ -88,14 +83,13 @@ def tag_all( artifact_name=artifact_name, src_tag=src_tag, new_tag=new_tag, - logger=_log, ) # Recap image = f"{registry}/{repository}/{artifact_name}" - _log.log("") - _log.log("Tag complete") - _log.log(f" Image: {image}") + log.info("") + log.info("Tag complete") + log.info(" Image: %s", image) for a in arches: - _log.log(f" {src_tag}-{a} → {new_tag}-{a}") - _log.log(f" {src_tag} → {new_tag}") + log.info(" %s-%s → %s-%s", src_tag, a, new_tag, a) + log.info(" %s → %s", src_tag, new_tag) diff --git a/captain/qemu.py b/captain/qemu.py index 942cf93..ae0da4a 100644 --- a/captain/qemu.py +++ b/captain/qemu.py @@ -3,13 +3,13 @@ from __future__ import annotations import argparse +import logging import sys from captain.config import Config -from captain.log import for_stage from captain.util import run -_log = for_stage("qemu") +log = logging.getLogger(__name__) # Tinkerbell kernel cmdline parameters. # Maps the argparse dest name → kernel cmdline key. @@ -40,9 +40,9 @@ def _tink_cmdline(args: argparse.Namespace) -> str: # Kernel cmdline is space-delimited; whitespace in values would # split them into multiple arguments and silently change meaning. if any(ch.isspace() for ch in value): - _log.err( - f"--{attr.replace('_', '-')} must not contain whitespace; " - "cannot safely add it to the kernel cmdline." + log.error( + "--%s must not contain whitespace; cannot safely add it to the kernel cmdline.", + attr.replace("_", "-"), ) sys.exit(1) parts.append(f"{cmdline_key}={value}") @@ -51,7 +51,7 @@ def _tink_cmdline(args: argparse.Namespace) -> str: ipam = getattr(args, "ipam", "") or "" if ipam: if any(ch.isspace() for ch in ipam): - _log.err("--ipam must not contain whitespace.") + log.error("--ipam must not contain whitespace.") sys.exit(1) parts.append(f"ipam={ipam}") @@ -65,8 +65,8 @@ def run_qemu(cfg: Config, args: argparse.Namespace | None = None) -> None: :mod:`configargparse`. When provided, Tinkerbell kernel cmdline parameters are drawn from it instead of the environment. """ - kernel = cfg.output_dir / f"vmlinuz-{cfg.kernel_version}-{cfg.arch_info.output_arch}" - initrd = cfg.output_dir / f"initramfs-{cfg.kernel_version}-{cfg.arch_info.output_arch}" + kernel = cfg.output_dir / f"vmlinuz-{cfg.flavor_id}-{cfg.arch_info.output_arch}" + initrd = cfg.output_dir / f"initramfs-{cfg.flavor_id}-{cfg.arch_info.output_arch}" missing: list[str] = [] if not kernel.is_file(): @@ -74,27 +74,26 @@ def run_qemu(cfg: Config, args: argparse.Namespace | None = None) -> None: if not initrd.is_file(): missing.append(str(initrd)) if missing: - _log.err("Build artifacts not found:") + log.error("Build artifacts not found:") for m in missing: - _log.err(f" {m}") - _log.err(f"Run './build.py --kernel-version {cfg.kernel_version}' first.") + log.error(" %s", m) sys.exit(1) tink = _tink_cmdline(args) if args is not None else "" if args is not None and not any( getattr(args, v, None) for v in ("tink_worker_image", "tink_docker_registry") ): - _log.warn( + log.warning( "Neither --tink-worker-image nor --tink-docker-registry is set. " "tink-agent services will not start." ) - _log.log("Booting CaptainOS in QEMU (Ctrl-A X to exit)...") + log.info("Booting CaptainOS in QEMU (Ctrl-A X to exit)...") qemu_cmd = cfg.arch_info.qemu_binary append = f"console=ttyS0 audit=0 {tink} {cfg.qemu_append}".strip() - _log.log(f"Kernel cmdline: {append}") + log.info("Kernel cmdline: %s", append) run( [ qemu_cmd, diff --git a/captain/skopeo.py b/captain/skopeo.py index be535de..a3b5712 100644 --- a/captain/skopeo.py +++ b/captain/skopeo.py @@ -8,23 +8,18 @@ from __future__ import annotations import json +import logging import tarfile from pathlib import Path -from captain.log import StageLogger, for_stage from captain.util import run, safe_extractall -_default_log = for_stage("skopeo") +log = logging.getLogger(__name__) -def image_exists( - image_ref: str, - *, - logger: StageLogger | None = None, -) -> bool: +def image_exists(image_ref: str) -> bool: """Return ``True`` if *image_ref* exists in the remote registry.""" - _log = logger or _default_log - _log.log(f"Checking registry for {image_ref}") + log.info("Checking registry for %s", image_ref) result = run( ["skopeo", "inspect", f"docker://{image_ref}"], capture=True, @@ -33,14 +28,9 @@ def image_exists( return result.returncode == 0 -def inspect_digest( - image_ref: str, - *, - logger: StageLogger | None = None, -) -> str: +def inspect_digest(image_ref: str) -> str: """Return the manifest digest (``sha256:…``) of *image_ref*.""" - _log = logger or _default_log - _log.log(f"skopeo inspect digest {image_ref}") + log.info("skopeo inspect digest %s", image_ref) result = run( [ "skopeo", @@ -54,12 +44,7 @@ def inspect_digest( return result.stdout.strip() -def copy( - src: str, - dest: str, - *, - logger: StageLogger | None = None, -) -> None: +def copy(src: str, dest: str) -> None: """Copy an image from *src* to *dest*. *src* and *dest* are plain image references (e.g. @@ -67,8 +52,7 @@ def copy( added automatically. Typically used for retagging: the source and destination differ only in the tag component. """ - _log = logger or _default_log - _log.log(f"skopeo copy {src} → {dest}") + log.info("skopeo copy %s → %s", src, dest) run(["skopeo", "copy", "--all", f"docker://{src}", f"docker://{dest}"]) @@ -77,7 +61,6 @@ def copy_to_dir( output_dir: Path, *, platform: str | None = None, - logger: StageLogger | None = None, ) -> Path: """Download *image_ref* to a local directory. @@ -86,7 +69,6 @@ def copy_to_dir( Returns *output_dir*. """ - _log = logger or _default_log output_dir.mkdir(parents=True, exist_ok=True) cmd: list[str] = ["skopeo", "copy"] if platform: @@ -94,7 +76,7 @@ def copy_to_dir( if len(parts) == 2: cmd += ["--override-os", parts[0], "--override-arch", parts[1]] cmd += [f"docker://{image_ref}", f"dir:{output_dir}"] - _log.log(f"skopeo copy {image_ref} → dir:{output_dir}") + log.info("skopeo copy %s → dir:%s", image_ref, output_dir) run(cmd) return output_dir @@ -104,7 +86,6 @@ def export_image( output_dir: Path, *, platform: str | None = None, - logger: StageLogger | None = None, ) -> None: """Download and extract all layers from *image_ref* into *output_dir*. @@ -114,12 +95,11 @@ def export_image( """ import tempfile - _log = logger or _default_log output_dir.mkdir(parents=True, exist_ok=True) with tempfile.TemporaryDirectory(prefix="skopeo-export-") as tmp: tmp_dir = Path(tmp) - copy_to_dir(image_ref, tmp_dir, platform=platform, logger=_log) + copy_to_dir(image_ref, tmp_dir, platform=platform) # Parse manifest to find layer blob digests. manifest_path = tmp_dir / "manifest.json" @@ -137,6 +117,6 @@ def export_image( if not blob_file.exists(): raise FileNotFoundError(f"Layer blob not found: {digest_str}") - _log.log(f"Extracting layer {digest_str[:20]}…") + log.info("Extracting layer %s…", digest_str[:20]) with tarfile.open(blob_file, "r:*") as tf: safe_extractall(tf, output_dir) diff --git a/captain/tools.py b/captain/tools.py index 315f04f..d4f6fac 100644 --- a/captain/tools.py +++ b/captain/tools.py @@ -8,6 +8,7 @@ from __future__ import annotations import io +import logging import os import stat import tarfile @@ -16,10 +17,9 @@ from pathlib import Path from captain.config import Config -from captain.log import for_stage from captain.util import ensure_dir, safe_extractall -_log = for_stage("tools") +log = logging.getLogger(__name__) @dataclass(slots=True) @@ -91,7 +91,7 @@ def _check_binary(dest_dir: Path, tool: ToolSpec) -> str | None: def _download_tarball(url: str, dest_dir: Path, members: list[str]) -> None: """Download a gzipped tarball and extract specific members.""" - _log.log(f" Downloading {url}") + log.info(" Downloading %s", url) with urllib.request.urlopen(url, timeout=60) as resp: data = resp.read() @@ -114,7 +114,7 @@ def _download_tarball(url: str, dest_dir: Path, members: list[str]) -> None: def _download_binary(url: str, dest: Path) -> None: """Download a single binary file.""" - _log.log(f" Downloading {url}") + log.info(" Downloading %s", url) with urllib.request.urlopen(url, timeout=60) as resp: dest.parent.mkdir(parents=True, exist_ok=True) with open(dest, "wb") as f: @@ -127,11 +127,11 @@ def download_tool(tool: ToolSpec, arch: str, output_base: Path, force: bool) -> dest_dir = ensure_dir(output_base / tool.dest) if not force and _check_binary(dest_dir, tool): - _log.log(f"{tool.name} already present (set FORCE_TOOLS=1 to re-download)") + log.info("%s already present (set FORCE_TOOLS=1 to re-download)", tool.name) return url = tool.url_template.format(version=tool.version, arch=arch) - _log.log(f"Installing {tool.name} {tool.version} ({arch})...") + log.info("Installing %s %s (%s)...", tool.name, tool.version, arch) if tool.members is not None: # Tarball with selective extraction @@ -145,16 +145,16 @@ def download_tool(tool: ToolSpec, arch: str, output_base: Path, force: bool) -> p = dest_dir / name if p.exists(): p.unlink() - _log.log(f" Removed leftover: {p.name}") + log.info(" Removed leftover: %s", p.name) # Report installed files if tool.members: for m in tool.members: p = dest_dir / m if p.exists(): - _log.log(f" {tool.name}: {p}") + log.info(" %s: %s", tool.name, p) elif tool.binary_name: - _log.log(f" {tool.name}: {dest_dir / tool.binary_name}") + log.info(" %s: %s", tool.name, dest_dir / tool.binary_name) def download_all(cfg: Config) -> None: @@ -165,9 +165,5 @@ def download_all(cfg: Config) -> None: for tool in TOOLS: download_tool(tool, arch, output_base, cfg.force_tools) - _log.log("Tool download complete.") - - # NOTE: UPX compression is not used. The initramfs is a zstd-compressed - # CPIO archive, and zstd compresses raw ELF binaries better than UPX-packed - # ones (UPX output looks like random data to zstd, defeating its compression). - _log.log("All tools ready.") + log.info("Tool download complete.") + log.info("All tools ready.") diff --git a/captain/util.py b/captain/util.py index 38c3c16..5ed8aea 100644 --- a/captain/util.py +++ b/captain/util.py @@ -2,14 +2,22 @@ from __future__ import annotations +import logging import os +import platform import subprocess import sys import tarfile from dataclasses import dataclass from pathlib import Path -from captain.log import err +from rich.panel import Panel +from rich.rule import Rule +from rich.syntax import Syntax + +import captain + +log = logging.getLogger(__name__) @dataclass(slots=True) @@ -18,14 +26,9 @@ class ArchInfo: arch: str # canonical name: amd64 | arm64 output_arch: str # user-facing name in artifact filenames: x86_64 | aarch64 - kernel_arch: str # kernel ARCH value - cross_compile: str # CROSS_COMPILE prefix (empty for native) - image_target: str # kernel image make target - kernel_image_path: str # relative path to built kernel image dl_arch: str # architecture name in download URLs mkosi_arch: str # mkosi --architecture value qemu_binary: str # QEMU system emulator binary - strip_prefix: str # prefix for strip command def get_arch_info(arch: str) -> ArchInfo: @@ -35,33 +38,33 @@ def get_arch_info(arch: str) -> ArchInfo: return ArchInfo( arch="amd64", output_arch="x86_64", - kernel_arch="x86_64", - cross_compile="", - image_target="bzImage", - kernel_image_path="arch/x86/boot/bzImage", dl_arch="amd64", mkosi_arch="x86-64", qemu_binary="qemu-system-x86_64", - strip_prefix="", ) case "arm64" | "aarch64": return ArchInfo( arch="arm64", output_arch="aarch64", - kernel_arch="arm64", - cross_compile="aarch64-linux-gnu-", - image_target="Image", - kernel_image_path="arch/arm64/boot/Image", dl_arch="arm64", mkosi_arch="arm64", qemu_binary="qemu-system-aarch64", - strip_prefix="aarch64-linux-gnu-", ) case _: - err(f"Unsupported architecture: {arch}") + log.error("Unsupported architecture: %s", arch) sys.exit(1) +def detect_current_machine_arch() -> str: + machine = platform.machine().lower() + if machine in ("aarch64", "arm64"): + return "arm64" + elif machine in ("x86_64", "amd64"): + return "amd64" + else: + raise RuntimeError(f"Unsupported architecture: {machine}") + + def run( cmd: list[str], *, @@ -74,7 +77,21 @@ def run( run_env: dict[str, str] | None = None if env is not None: run_env = {**os.environ, **env} - return subprocess.run( + + # If not capturing, and debugging, emit a Rich separator line, for visual clarity. + if not capture and log.isEnabledFor(logging.DEBUG): + syntax = Syntax( + " ".join(cmd), "bash", theme="monokai", word_wrap=True, background_color="default" + ) + panel = Panel( + syntax, + title="Executing shell command", + width=captain.console.width, + ) + captain.console.print(panel) + captain.console.print(Rule(f"⮕ Starting subprocess: {cmd} ⮕", style="green")) + + proc = subprocess.run( cmd, check=check, capture_output=capture, @@ -83,6 +100,14 @@ def run( cwd=cwd, ) + if capture: + return proc + + if log.isEnabledFor(logging.DEBUG): + captain.console.print(Rule(f"⮕ Finished subprocess: {cmd} ⮕", style="green")) + + return proc + def ensure_dir(path: Path) -> Path: """Create a directory (and parents) if it doesn't exist, return the path.""" @@ -137,17 +162,6 @@ def _missing(cmds: list[str]) -> list[str]: return [cmd for cmd in cmds if _shutil.which(cmd) is None] -def check_kernel_dependencies(arch: str) -> list[str]: - """Check host tools required for a native kernel build. - - Returns a list of missing command names (empty if all found). - """ - required = ["make", "gcc", "flex", "bison", "bc", "rsync", "strip", "zstd", "depmod"] - if arch in ("arm64", "aarch64"): - required += ["aarch64-linux-gnu-gcc", "aarch64-linux-gnu-strip"] - return _missing(required) - - def check_mkosi_dependencies() -> list[str]: """Check host tools required for a native mkosi image build. @@ -178,4 +192,4 @@ def check_dependencies(arch: str) -> list[str]: Returns a list of missing command names (empty if all found). """ - return check_kernel_dependencies(arch) + check_mkosi_dependencies() + return check_mkosi_dependencies() diff --git a/docs/kernel-build.md b/docs/kernel-build.md index 0130ef3..0f5fb21 100644 --- a/docs/kernel-build.md +++ b/docs/kernel-build.md @@ -1,178 +1,5 @@ # Kernel Build Process -CaptainOS builds a custom Linux kernel from upstream source with -project-specific defconfigs. The build is orchestrated by the `captain.kernel` -module and driven through the CLI (`./build.py kernel`). +CaptainOS uses standard or custom Debian-packaged kernels. -## Quick Start - -```bash -# Build the kernel (defaults to Docker mode, amd64, kernel 6.18.16) -./build.py kernel - -# Build for arm64 -./build.py kernel --arch=arm64 - -# Use a local kernel source tree instead of downloading -./build.py kernel --kernel-src=/path/to/linux - -# Force a rebuild even if outputs already exist -./build.py kernel --force-kernel - -# Build natively (no Docker) -./build.py kernel --kernel-mode=native -``` - -Every flag also has an environment variable form (e.g. `ARCH`, `KERNEL_VERSION`, -`KERNEL_MODE`, `KERNEL_SRC`, `FORCE_KERNEL`). - -## Execution Modes - -The `--kernel-mode` flag controls how the kernel is built: - -| Mode | Description | -|----------|------------------------------------------------------------------------| -| `docker` | (default) Builds inside the `captainos-builder` Docker container. | -| `native` | Builds directly on the host; requires kernel build tools to be installed. | -| `skip` | Skips the kernel stage entirely. | - -In Docker mode, the builder container is based on Debian Trixie and includes -all kernel build dependencies (gcc, make, flex, bison, bc, libelf, libssl, -dwarves/pahole, etc.) plus cross-compilation toolchains for arm64. Inside -the container, the mode is forced to `native` so Docker is never invoked -recursively. - -## Build Pipeline - -The `kernel.build()` function runs four stages sequentially: - -### 1. Download - -`kernel.download_kernel()` fetches the upstream tarball from -`cdn.kernel.org`: - -``` -https://cdn.kernel.org/pub/linux/kernel/v{major}.x/linux-{version}.tar.xz -``` - -The tarball is extracted into `/var/tmp/kernel-build/linux-{version}/`. -If the extracted source directory already exists, it is reused. - -When `--kernel-src` is set, this step is skipped and the local source tree -is used as-is. - -### 2. Configure - -`kernel.configure_kernel()` applies the project defconfig for the target -architecture: - -1. Copies `kernel.configs/{major}.{minor}.y.{arch}` into the source tree as `.config`. -2. Runs `make olddefconfig` to resolve any new symbols against defaults. -3. Saves the fully resolved config to `kernel.configs/.config.resolved.{branch}.{arch}` for - debugging. - -If no defconfig file exists for the target kernel version and arch, the build -exits with an error listing the available kernel branches. - -For `x86_64` builds, the `COMMAND_LINE_SIZE` is patched from 2048 to 4096 -in `arch/x86/include/asm/setup.h` because Tinkerbell passes large kernel -command lines. - -Cross-compilation is set up automatically via the `ARCH` and -`CROSS_COMPILE` environment variables (e.g. `CROSS_COMPILE=aarch64-linux-gnu-` -for arm64). - -### 3. Compile - -`kernel.build_kernel()` runs the parallel make: - -```bash -make -j$(nproc) {image_target} modules -``` - -The image target is architecture-dependent: - -| Architecture | `ARCH` | Image target | Output path | -|-------------|-----------|-------------|-------------------------------| -| amd64 | `x86_64` | `bzImage` | `arch/x86/boot/bzImage` | -| arm64 | `arm64` | `Image` | `arch/arm64/boot/Image` | - -After compilation, `make -s kernelrelease` is invoked to determine the -exact built kernel version string (e.g. `6.18.16-captainos`). - -### 4. Install - -`kernel.install_kernel()` places the built artifacts into the output tree: - -1. **Module installation** — `make INSTALL_MOD_PATH=... modules_install` - installs modules to `mkosi.output/kernel/{version}/{arch}/modules/`. -2. **Strip** — Debug symbols are stripped from every `.ko` file with - `strip --strip-unneeded`. -3. **Compress** — Modules are compressed with `zstd --rm -q -19` producing - `.ko.zst` files. The defconfig enables `CONFIG_MODULE_COMPRESS_ZSTD` - and `CONFIG_MODULE_DECOMPRESS` so the kernel loads compressed modules at - runtime. -4. **Clean up** — The `build` and `source` symlinks are removed from the - modules directory. -5. **Merged-usr layout** — Modules are relocated from `lib/modules/` to - `usr/lib/modules/` to follow the merged-usr filesystem convention. -6. **depmod** — `depmod -a` regenerates module dependency metadata for - the compressed `.ko.zst` files. -7. **Kernel image** — The kernel image (`vmlinuz-{version}`) is copied to - `mkosi.output/kernel/{kernel_version}/{arch}/`. It is kept separate from - the extra-tree so it is **not** included in the initramfs CPIO — iPXE loads - the kernel independently. - -## Output Layout - -After a successful build, the kernel stage produces: - -``` -mkosi.output/ -├── tools/{arch}/ # tools only (containerd, runc, etc.) -│ ├── usr/local/bin/ -│ └── opt/cni/bin/ -└── kernel/{kernel_version}/{arch}/ - ├── vmlinuz-{version} # kernel image (bzImage or Image) - └── modules/ # passed as --extra-tree to mkosi - └── usr/lib/modules/{version}/ - ├── kernel/... # compressed .ko.zst module files - ├── modules.dep - ├── modules.dep.bin - └── ... -``` - -The tools and modules subtrees are both passed to mkosi via -separate `--extra-tree=` flags and merged into the initramfs. The vmlinuz -image is collected into `out/` by `artifacts.collect_kernel()` as -`out/vmlinuz-{kernel_version}-{arch}`. - -## Idempotency - -The CLI checks for existing outputs before starting: - -- If both `mkosi.output/kernel/{kernel_version}/{arch}/modules/usr/lib/modules/` and - `mkosi.output/kernel/{kernel_version}/{arch}/vmlinuz-*` exist, the build is skipped. -- Use `--force-kernel` (or `FORCE_KERNEL=1`) to force a rebuild. -- If modules exist but the vmlinuz is missing, the kernel is rebuilt - automatically. - -## Defconfigs - -Architecture-specific defconfigs live in the `kernel.configs/` directory, -named by stable branch: `{major}.{minor}.y.{arch}`. This allows -multiple kernel versions to coexist — each stable branch (e.g. 6.18.y, -6.19.y) has its own config per architecture. - -- `kernel.configs/6.18.y.amd64` — x86_64 config adapted for kernel 6.18. -- `kernel.configs/6.18.y.arm64` — arm64 config adapted for kernel 6.18. -- `kernel.configs/6.19.y.amd64` — x86_64 config adapted for kernel 6.19. -- `kernel.configs/6.19.y.arm64` — arm64 config adapted for kernel 6.19. - -The default kernel version is defined as `DEFAULT_KERNEL_VERSION` in -`captain/config.py` and can be overridden via `--kernel-version` or -the `KERNEL_VERSION` environment variable. - -Both configs include support for bare-metal provisioning, container -runtimes (cgroups v2, namespaces, overlayfs), and broad hardware/network -driver coverage. The local version suffix is set to `-captainos`. +The default flavor, `trixie-full`, uses `linux-image-generic` default Debian kernel. diff --git a/kernel.configs/6.18.y.amd64 b/kernel.configs/6.18.y.amd64 deleted file mode 100644 index c6d243c..0000000 --- a/kernel.configs/6.18.y.amd64 +++ /dev/null @@ -1,1032 +0,0 @@ -# CaptainOS kernel defconfig for x86_64 -# Comprehensive config for bare-metal provisioning with container/network support. -# Derived from HookOS generic-6.6.y-x86_64 proven config, adapted for kernel 6.18. -# -# Generate a full .config from this: -# cp 6.18.y.amd64 .config && make olddefconfig - -# --- Architecture --- -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_SMP=y -CONFIG_NR_CPUS=128 -# CONFIG_X86_EXTENDED_PLATFORM is not set -CONFIG_X86_INTEL_LPSS=y -CONFIG_HYPERVISOR_GUEST=y -CONFIG_PARAVIRT_SPINLOCKS=y -CONFIG_XEN=y -CONFIG_XEN_PVH=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -# CONFIG_X86_MCE is not set -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -# CONFIG_X86_5LEVEL is not set -CONFIG_HZ_1000=y -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_LEGACY_VSYSCALL_NONE=y -# CONFIG_MODIFY_LDT_SYSCALL is not set -CONFIG_IA32_EMULATION=y - -# --- General --- -CONFIG_LOCALVERSION="-captainos" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="captainos" -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_AUDIT=y -CONFIG_NO_HZ_IDLE=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_EXPERT=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -CONFIG_BINFMT_MISC=y - -# --- Cgroups --- -CONFIG_CGROUPS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_RDMA=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -CONFIG_CGROUP_MISC=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_MEMCG=y -CONFIG_BLK_CGROUP=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_SCHED=y -CONFIG_CPUSETS=y - -# --- Namespaces --- -CONFIG_NAMESPACES=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y - -# --- Init / Initramfs --- -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -# CONFIG_RD_BZIP2 is not set -# CONFIG_RD_LZMA is not set -CONFIG_RD_GZIP=y -CONFIG_RD_XZ=y -# CONFIG_RD_LZO is not set -# CONFIG_RD_LZ4 is not set -CONFIG_RD_ZSTD=y - -# --- Memory management --- -CONFIG_SLAB_FREELIST_RANDOM=y -# CONFIG_COMPAT_BRK is not set -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_ZONE_DEVICE=y -CONFIG_HUGETLBFS=y - -# --- Power / Suspend --- -# CONFIG_SUSPEND is not set - -# --- ACPI --- -CONFIG_ACPI=y -# CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_IPMI=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=y -CONFIG_ACPI_SBS=y -CONFIG_ACPI_NFIT=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y - -# --- CPU frequency --- -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_X86_PCC_CPUFREQ=y -CONFIG_X86_ACPI_CPUFREQ=y -CONFIG_X86_POWERNOW_K8=y -CONFIG_X86_P4_CLOCKMOD=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_INTEL_IDLE=y - -# --- KVM --- -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m - -# --- Networking --- -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=y -CONFIG_UNIX=y -CONFIG_UNIX_DIAG=y -CONFIG_XFRM_USER=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_XDP_SOCKETS=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NET_IPIP=y -CONFIG_NET_IPGRE_DEMUX=y -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_NET_IPVTI=m -CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_UDP_DIAG=y -CONFIG_TCP_MD5SIG=y -CONFIG_SYN_COOKIES=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_IPV6_ILA=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETLABEL=y -CONFIG_NETWORK_SECMARK=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_VLAN_8021Q=y -CONFIG_BONDING=m -CONFIG_TUN=y -CONFIG_VETH=y -CONFIG_MACVLAN=y -CONFIG_MACVTAP=y -CONFIG_IPVLAN=y -CONFIG_VXLAN=y -CONFIG_GENEVE=m -CONFIG_NETCONSOLE=y -CONFIG_TAP=y -CONFIG_DUMMY=m -CONFIG_NLMON=y -CONFIG_IP_SCTP=m -CONFIG_L2TP=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_OPENVSWITCH=m -CONFIG_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_HYPERV_VSOCKETS=m -CONFIG_NETLINK_DIAG=y -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_9P=y -CONFIG_NET_9P_VIRTIO=y - -# --- Traffic control --- -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_CLS_BASIC=y -CONFIG_NET_CLS_ROUTE4=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=y -CONFIG_NET_CLS_MATCHALL=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_EMATCH_IPSET=y -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=y -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=y -CONFIG_NET_ACT_IPT=y -CONFIG_NET_ACT_NAT=y -CONFIG_NET_ACT_PEDIT=y -CONFIG_NET_ACT_SIMP=y -CONFIG_NET_ACT_SKBEDIT=y -CONFIG_NET_ACT_CSUM=y -CONFIG_NET_ACT_BPF=y - -# --- Netfilter --- -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_SNMP=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_SIP=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NF_CT_NETLINK_TIMEOUT=y -CONFIG_NF_CT_NETLINK_HELPER=y -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=y -CONFIG_NF_TABLES=y -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_CT=y -CONFIG_NFT_CONNLIMIT=y -CONFIG_NFT_LOG=y -CONFIG_NFT_LIMIT=y -CONFIG_NFT_MASQ=y -CONFIG_NFT_REDIR=y -CONFIG_NFT_NAT=y -CONFIG_NFT_TUNNEL=y -CONFIG_NFT_QUEUE=y -CONFIG_NFT_REJECT=y -CONFIG_NFT_COMPAT=y -CONFIG_NFT_HASH=y -CONFIG_NFT_OSF=y -CONFIG_NFT_TPROXY=y -CONFIG_NFT_DUP_NETDEV=y -CONFIG_NFT_FWD_NETDEV=y -CONFIG_NETFILTER_XT_SET=y -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_DSCP=y -CONFIG_NETFILTER_XT_TARGET_HMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y -CONFIG_NETFILTER_XT_MATCH_BPF=y -CONFIG_NETFILTER_XT_MATCH_CGROUP=y -CONFIG_NETFILTER_XT_MATCH_CLUSTER=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_CPU=y -CONFIG_NETFILTER_XT_MATCH_DCCP=y -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPCOMP=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_IPVS=y -CONFIG_NETFILTER_XT_MATCH_L2TP=y -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_NFACCT=y -CONFIG_NETFILTER_XT_MATCH_OSF=y -CONFIG_NETFILTER_XT_MATCH_OWNER=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_RATEEST=y -CONFIG_NETFILTER_XT_MATCH_REALM=y -CONFIG_NETFILTER_XT_MATCH_RECENT=y -CONFIG_NETFILTER_XT_MATCH_SCTP=y -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_IP_SET=y -CONFIG_IP_SET_BITMAP_IP=y -CONFIG_IP_SET_BITMAP_IPMAC=y -CONFIG_IP_SET_BITMAP_PORT=y -CONFIG_IP_SET_HASH_IP=y -CONFIG_IP_SET_HASH_IPPORT=y -CONFIG_IP_SET_HASH_IPPORTIP=y -CONFIG_IP_SET_HASH_IPPORTNET=y -CONFIG_IP_SET_HASH_NET=y -CONFIG_IP_SET_HASH_NETPORT=y -CONFIG_IP_SET_HASH_NETIFACE=y -CONFIG_IP_SET_LIST_SET=y -CONFIG_IP_VS=y -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y -CONFIG_IP_VS_RR=y -CONFIG_IP_VS_WRR=y -CONFIG_IP_VS_LC=y -CONFIG_IP_VS_WLC=y -CONFIG_IP_VS_FO=y -CONFIG_IP_VS_OVF=y -CONFIG_IP_VS_LBLC=y -CONFIG_IP_VS_LBLCR=y -CONFIG_IP_VS_DH=y -CONFIG_IP_VS_SH=y -CONFIG_IP_VS_MH=y -CONFIG_IP_VS_SED=y -CONFIG_IP_VS_NQ=y -CONFIG_IP_VS_FTP=y -CONFIG_NFT_DUP_IPV4=y -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_LOG_ARP=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_TARGET_SYNPROXY=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_TARGET_ECN=y -CONFIG_IP_NF_TARGET_TTL=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NFT_DUP_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_AH=y -CONFIG_IP6_NF_MATCH_EUI64=y -CONFIG_IP6_NF_MATCH_FRAG=y -CONFIG_IP6_NF_MATCH_OPTS=y -CONFIG_IP6_NF_MATCH_HL=y -CONFIG_IP6_NF_MATCH_IPV6HEADER=y -CONFIG_IP6_NF_MATCH_MH=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_MATCH_RT=y -CONFIG_IP6_NF_TARGET_HL=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_TARGET_SYNPROXY=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_IP6_NF_SECURITY=y -CONFIG_IP6_NF_NAT=y -CONFIG_IP6_NF_TARGET_MASQUERADE=y -CONFIG_IP6_NF_TARGET_NPT=y -CONFIG_NF_TABLES_BRIDGE=y -CONFIG_NFT_BRIDGE_REJECT=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_BRIDGE_EBT_T_FILTER=y -CONFIG_BRIDGE_EBT_T_NAT=y -CONFIG_BRIDGE_EBT_802_3=y -CONFIG_BRIDGE_EBT_AMONG=y -CONFIG_BRIDGE_EBT_ARP=y -CONFIG_BRIDGE_EBT_IP=y -CONFIG_BRIDGE_EBT_IP6=y -CONFIG_BRIDGE_EBT_LIMIT=y -CONFIG_BRIDGE_EBT_MARK=y -CONFIG_BRIDGE_EBT_PKTTYPE=y -CONFIG_BRIDGE_EBT_STP=y -CONFIG_BRIDGE_EBT_VLAN=y -CONFIG_BRIDGE_EBT_ARPREPLY=y -CONFIG_BRIDGE_EBT_DNAT=y -CONFIG_BRIDGE_EBT_MARK_T=y -CONFIG_BRIDGE_EBT_REDIRECT=y -CONFIG_BRIDGE_EBT_SNAT=y -CONFIG_BRIDGE_EBT_LOG=y -CONFIG_BRIDGE_EBT_NFLOG=y - -# --- Block devices --- -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_THROTTLING=y -CONFIG_BLK_CGROUP_IOLATENCY=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_LDM_PARTITION=y -CONFIG_CMDLINE_PARTITION=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_DRBD=m -CONFIG_BLK_DEV_NBD=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_ATA_OVER_ETH=m -CONFIG_BLK_DEV_NVME=y - -# --- SCSI --- -CONFIG_SCSI=y -# CONFIG_SCSI_PROC_FS is not set -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_ISCSI_TCP=m -CONFIG_SCSI_HPSA=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT3SAS=y -CONFIG_SCSI_MPI3MR=m -CONFIG_SCSI_SMARTPQI=m -CONFIG_VMWARE_PVSCSI=y -CONFIG_XEN_SCSI_FRONTEND=y -CONFIG_SCSI_VIRTIO=y - -# --- ATA / SATA --- -CONFIG_ATA=y -# CONFIG_ATA_VERBOSE_ERROR is not set -# CONFIG_SATA_PMP is not set -CONFIG_SATA_AHCI=y -CONFIG_ATA_PIIX=y -CONFIG_SATA_MV=y -CONFIG_SATA_NV=y -CONFIG_SATA_PROMISE=y -CONFIG_SATA_SIL=y -CONFIG_SATA_SIS=y -CONFIG_SATA_SVW=y -CONFIG_SATA_ULI=y -CONFIG_SATA_VIA=y -CONFIG_SATA_VITESSE=y -CONFIG_ATA_GENERIC=y - -# --- Device mapper / MD --- -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=y -CONFIG_MD_RAID0=y -CONFIG_MD_RAID1=y -CONFIG_MD_RAID10=y -CONFIG_MD_RAID456=y -CONFIG_MD_MULTIPATH=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_THIN_PROVISIONING=y -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m - -# --- Fusion --- -CONFIG_FUSION=y -CONFIG_FUSION_SPI=y - -# --- Network drivers (built-in for reliable boot) --- -CONFIG_NETDEVICES=y -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_I825XX is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROCHIP is not set -# CONFIG_NET_VENDOR_MYRI is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETERION is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -# CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_VENDOR_PACKET_ENGINES is not set -# CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_BROCADE is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RDC is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_E1000=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IGBVF=y -CONFIG_IXGBE=y -CONFIG_IXGBEVF=y -CONFIG_I40E=y -CONFIG_I40EVF=y -CONFIG_ICE=y -CONFIG_IGC=y -CONFIG_MLX4_EN=y -CONFIG_MLX5_CORE=y -CONFIG_MLX5_CORE_EN=y -CONFIG_BNXT=y -CONFIG_CNIC=m -CONFIG_TIGON3=y -CONFIG_BNX2X=y -CONFIG_R8169=y -CONFIG_8139CP=y -CONFIG_8139TOO=y -CONFIG_VMXNET3=y -CONFIG_HYPERV_NET=y -CONFIG_ENA_ETHERNET=m -CONFIG_GVE=m -CONFIG_NFP=m -CONFIG_IONIC=y -CONFIG_VIRTIO_NET=y - -# --- USB network --- -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=y -CONFIG_USB_RTL8152=y -CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=y -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_AX88179_178A=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_NET_ZAURUS=m - -# --- WAN --- -CONFIG_WAN=y -CONFIG_HDLC=m -CONFIG_HDLC_CISCO=m - -# --- PPP --- -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m - -# --- USB --- -CONFIG_USB_SUPPORT=y -CONFIG_USB=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_UHCI_HCD=y -CONFIG_USB_XHCI_PCI=y -CONFIG_USB_UAS=y -CONFIG_USB_STORAGE=y - -# --- USB serial (BMC/console access) --- -CONFIG_USB_SERIAL=y -CONFIG_USB_SERIAL_CONSOLE=y -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_CH341=y -CONFIG_USB_SERIAL_CP210X=y -CONFIG_USB_SERIAL_FTDI_SIO=y -CONFIG_USB_SERIAL_PL2303=y -CONFIG_USB_SERIAL_TI=y -CONFIG_USB_SERIAL_OPTION=y - -# --- Input --- -CONFIG_INPUT_FF_MEMLESS=y -CONFIG_INPUT_SPARSEKMAP=y -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_MISC=y -CONFIG_INPUT_PCSPKR=y -CONFIG_INPUT_ATLAS_BTNS=y -CONFIG_INPUT_UINPUT=y -CONFIG_SERIO_PCIPS2=y -CONFIG_SERIO_RAW=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TOUCHSCREEN is not set - -# --- MMC --- -CONFIG_MMC=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PCI=y -# CONFIG_MMC_RICOH_MMC is not set -CONFIG_MMC_SDHCI_ACPI=y -CONFIG_MMC_SDHCI_PLTFM=y - -# --- Filesystems --- -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_XFS_FS=y -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -CONFIG_FS_DAX=y -CONFIG_FS_ENCRYPTION=y -CONFIG_FANOTIFY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_VFAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_FAT_DEFAULT_IOCHARSET="utf8" -CONFIG_NTFS_FS=m -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_SYSFS=y -CONFIG_EFIVAR_FS=y -CONFIG_OVERLAY_FS=y -CONFIG_SQUASHFS=y -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -CONFIG_FUSE_FS=y -CONFIG_CUSE=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=y -CONFIG_FSCACHE=y -CONFIG_FSCACHE_STATS=y -CONFIG_CACHEFILES=y -CONFIG_NFS_FS=m -# CONFIG_NFS_V2 is not set -CONFIG_NFS_V4=m -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_NFS_FSCACHE=y -CONFIG_NFSD=m -CONFIG_NFSD_V4=y -CONFIG_CEPH_FS=m -CONFIG_CEPH_FSCACHE=y -CONFIG_CEPH_FS_POSIX_ACL=y -CONFIG_CIFS=y -# CONFIG_CIFS_ALLOW_INSECURE_LEGACY is not set -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_DFS_UPCALL=y -CONFIG_CIFS_FSCACHE=y -CONFIG_9P_FS=y -CONFIG_9P_FSCACHE=y -CONFIG_9P_FS_POSIX_ACL=y -CONFIG_9P_FS_SECURITY=y - -# --- NLS --- -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_UTF8=y - -# --- Virtio (built-in for reliable boot) --- -CONFIG_VIRTIO=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_BLK=y -CONFIG_VIRTIO_CONSOLE=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_INPUT=y -CONFIG_VIRTIO_MMIO=y -CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y -CONFIG_HW_RANDOM_VIRTIO=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_VSOCK=m - -# --- Console / Serial --- -CONFIG_VT=y -CONFIG_VT_CONSOLE=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=32 -# CONFIG_SERIAL_8250_MID is not set -CONFIG_SERIAL_DEV_BUS=y -CONFIG_TTY_PRINTK=y -CONFIG_HW_CONSOLE=y -CONFIG_VGA_CONSOLE=y -CONFIG_DUMMY_CONSOLE=y - -# --- IPMI --- -CONFIG_IPMI_HANDLER=y -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_DEVICE_INTERFACE=y -CONFIG_IPMI_SI=y -CONFIG_IPMI_POWEROFF=y - -# --- Hardware RNG --- -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=y - -# --- I2C --- -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y - -# --- TPM --- -CONFIG_TCG_TIS_I2C_ATMEL=m -CONFIG_TCG_TIS_I2C_INFINEON=m -CONFIG_TCG_TIS_I2C_NUVOTON=m -CONFIG_TCG_NSC=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_INFINEON=m -CONFIG_TCG_XEN=m -CONFIG_TCG_VTPM_PROXY=m -CONFIG_TCG_TIS_ST33ZP24_I2C=m - -# --- Platform / MFD --- -CONFIG_NVRAM=y -CONFIG_HPET=y -CONFIG_HANGCHECK_TIMER=y -CONFIG_LPC_ICH=y -CONFIG_LPC_SCH=y -CONFIG_MFD_INTEL_LPSS_ACPI=y -CONFIG_MFD_INTEL_LPSS_PCI=y -CONFIG_MFD_SM501=y -CONFIG_MFD_VX855=y -# CONFIG_THERMAL_HWMON is not set - -# --- RTC --- -CONFIG_RTC_CLASS=y - -# --- DMA --- -CONFIG_DMADEVICES=y - -# --- Security --- -CONFIG_SECCOMP=y -CONFIG_SECCOMP_FILTER=y -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_NETWORK_XFRM=y -CONFIG_SECURITY_PATH=y -CONFIG_SECURITY_DMESG_RESTRICT=y -CONFIG_KEYS=y -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=y -CONFIG_KEY_DH_OPERATIONS=y -CONFIG_HARDENED_USERCOPY=y -CONFIG_FORTIFY_SOURCE=y -CONFIG_STATIC_USERMODEHELPER=y -CONFIG_SECURITY_YAMA=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_IMA=y -CONFIG_IMA_DEFAULT_HASH_SHA256=y -CONFIG_IMA_READ_POLICY=y -CONFIG_IMA_APPRAISE=y -CONFIG_EVM=y -CONFIG_LSM="yama,loadpin,safesetid,integrity" - -# --- Crypto --- -CONFIG_CRYPTO=y -CONFIG_CRYPTO_USER=y -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_XTS=y -CONFIG_CRYPTO_ANUBIS=y -CONFIG_CRYPTO_BLOWFISH=y -CONFIG_CRYPTO_CAMELLIA=y -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_FCRYPT=y -CONFIG_CRYPTO_KHAZAD=y -CONFIG_CRYPTO_SEED=y -CONFIG_CRYPTO_TEA=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_KEYWRAP=y -CONFIG_CRYPTO_LRW=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_CHACHA20POLY1305=y -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=y -CONFIG_CRYPTO_MICHAEL_MIC=y -CONFIG_CRYPTO_RMD160=y -CONFIG_CRYPTO_VMAC=y -CONFIG_CRYPTO_WP512=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_CRC32=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRYPTO_842=y -CONFIG_CRYPTO_LZ4=y -CONFIG_CRYPTO_LZ4HC=y -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_USER_API_RNG=y -CONFIG_CRYPTO_USER_API_AEAD=y - -# --- x86_64 hardware crypto acceleration --- -CONFIG_CRYPTO_AES_NI_INTEL=y -CONFIG_CRYPTO_BLOWFISH_X86_64=y -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=y -CONFIG_CRYPTO_CAST5_AVX_X86_64=y -CONFIG_CRYPTO_CAST6_AVX_X86_64=y -CONFIG_CRYPTO_DES3_EDE_X86_64=y -CONFIG_CRYPTO_SERPENT_SSE2_X86_64=y -CONFIG_CRYPTO_SERPENT_AVX2_X86_64=y -CONFIG_CRYPTO_TWOFISH_AVX_X86_64=y -CONFIG_CRYPTO_CHACHA20_X86_64=y -CONFIG_CRYPTO_POLY1305_X86_64=y -CONFIG_CRYPTO_SHA1_SSSE3=y -CONFIG_CRYPTO_SHA256_SSSE3=y -CONFIG_CRYPTO_SHA512_SSSE3=y -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=y -CONFIG_CRYPTO_CRC32C_INTEL=y -CONFIG_CRYPTO_CRC32_PCLMUL=y -CONFIG_CRYPTO_DEV_PADLOCK=y -CONFIG_CRYPTO_DEV_PADLOCK_AES=y -CONFIG_CRYPTO_DEV_PADLOCK_SHA=y -CONFIG_CRYPTO_DEV_VIRTIO=m -CONFIG_PKCS7_MESSAGE_PARSER=y - -# --- BPF --- -CONFIG_BPF=y -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_JIT_ALWAYS_ON=y - -# --- PCI --- -CONFIG_PCI=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCI_STUB=y -CONFIG_PCI_IOV=y -# CONFIG_VGA_ARB is not set -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_SHPC=y -CONFIG_PCI_HYPERV_INTERFACE=m - -# --- IOMMU --- -CONFIG_AMD_IOMMU=y -CONFIG_INTEL_IOMMU=y -CONFIG_IRQ_REMAP=y - -# --- Xen --- -# CONFIG_XEN_BACKEND is not set -CONFIG_XEN_GNTDEV=y -CONFIG_XEN_GRANT_DEV_ALLOC=y -CONFIG_XEN_PVCALLS_FRONTEND=y -CONFIG_XEN_ACPI_PROCESSOR=y -# CONFIG_XEN_SYMS is not set - -# --- Intel platform --- -CONFIG_INTEL_IPS=y - -# --- DEVFREQ --- -CONFIG_PM_DEVFREQ=y -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y - -# --- Reset --- -CONFIG_RESET_CONTROLLER=y - -# --- DAX --- -CONFIG_DEV_DAX=y -CONFIG_DEV_DAX_PMEM=m - -# --- Misc --- -CONFIG_FW_LOADER=y -CONFIG_PRINTK=y -CONFIG_PRINTK_TIME=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_EARLY_PRINTK=y -CONFIG_PANIC_TIMEOUT=10 -CONFIG_PANIC_ON_OOPS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_UEVENT_HELPER=y -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_CONNECTOR=y -CONFIG_DMI_SYSFS=y -CONFIG_SYSFB_SIMPLEFB=y -CONFIG_RESET_ATTACK_MITIGATION=y -# CONFIG_PNP_DEBUG_MESSAGES is not set -CONFIG_HYPERV=y -CONFIG_HYPERV_UTILS=y -CONFIG_HYPERV_BALLOON=y - -# --- Module compression --- -CONFIG_MODULE_COMPRESS_ZSTD=y -CONFIG_MODULE_DECOMPRESS=y - -# --- EFI boot support --- -CONFIG_EFI=y -CONFIG_EFI_STUB=y -CONFIG_EFI_MIXED=y - -# --- Framebuffer/Console (required for EFI boot video output) --- -CONFIG_DRM=y -CONFIG_DRM_I915=y -CONFIG_DRM_SIMPLEDRM=y -CONFIG_FB=y -CONFIG_FB_EFI=y -CONFIG_FRAMEBUFFER_CONSOLE=y - -# --- Debugging / Diagnostics --- -CONFIG_FRAME_WARN=1024 -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_HARDLOCKUP_DETECTOR=y -CONFIG_WQ_WATCHDOG=y -CONFIG_DEBUG_NOTIFIERS=y -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -CONFIG_IO_STRICT_DEVMEM=y - -# --- Disable unnecessary subsystems --- -# CONFIG_SOUND is not set -# CONFIG_WIRELESS is not set -# CONFIG_WLAN is not set -# CONFIG_BLUETOOTH is not set -# CONFIG_NFC is not set -# CONFIG_INFINIBAND is not set -# CONFIG_MEDIA_SUPPORT is not set diff --git a/kernel.configs/6.18.y.arm64 b/kernel.configs/6.18.y.arm64 deleted file mode 100644 index 795c91e..0000000 --- a/kernel.configs/6.18.y.arm64 +++ /dev/null @@ -1,515 +0,0 @@ -# CaptainOS kernel defconfig for arm64 (aarch64) -# Minimal config for bare-metal provisioning with container/network support. -# Based on 6.12.y config with targeted additions for CaptainOS use case, adapted for kernel 6.18. -# -# Generate a full .config from this: -# cp 6.18.y.arm64 .config && make ARCH=arm64 olddefconfig - -# --- Architecture --- -CONFIG_ARM64=y -CONFIG_SMP=y -CONFIG_NR_CPUS=128 -CONFIG_NUMA=y -CONFIG_ARM64_VA_BITS_48=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_SMT=y -CONFIG_HZ_1000=y -CONFIG_XEN=y -CONFIG_COMPAT=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y - -# --- General --- -CONFIG_LOCALVERSION="-captainos" -CONFIG_DEFAULT_HOSTNAME="captainos" -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_AUDIT=y -CONFIG_NO_HZ_IDLE=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_CGROUPS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -CONFIG_CGROUP_MISC=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CGROUP_RDMA=y -CONFIG_CPUSETS=y -CONFIG_MEMCG=y -CONFIG_BLK_CGROUP=y -CONFIG_BLK_CGROUP_IOLATENCY=y -CONFIG_BLK_DEV_THROTTLING=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_SCHED=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_NAMESPACES=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_CHECKPOINT_RESTORE=y -# Memory management -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_KSM=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_HUGETLBFS=y - -# --- Init / Initramfs --- -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" - -# --- Networking --- -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y -CONFIG_IPV6=y -CONFIG_BRIDGE=y -CONFIG_VLAN_8021Q=m -CONFIG_BONDING=m -CONFIG_TUN=y -CONFIG_VETH=y -CONFIG_MACVLAN=y -CONFIG_MACVTAP=y -CONFIG_IPVLAN=y -CONFIG_TAP=y -CONFIG_DUMMY=m -CONFIG_VXLAN=y -CONFIG_GENEVE=m -CONFIG_NLMON=y -# Traffic control / QoS (needed by CNI plugins) -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_CLS_BASIC=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=y -CONFIG_NET_CLS_MATCHALL=y -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=y -CONFIG_NET_ACT_MIRRED=y -CONFIG_NET_ACT_BPF=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_U32=y - -# --- Netfilter --- -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_NAT=y -CONFIG_NF_TABLES=y -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_IPV4=y -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_NAT=y -CONFIG_NFT_MASQ=y -CONFIG_NFT_CT=y -CONFIG_NFT_COMPAT=y -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_NAT=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_NAT=y -CONFIG_BRIDGE_NETFILTER=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_BRIDGE_EBT_T_FILTER=y -CONFIG_BRIDGE_EBT_T_NAT=y -CONFIG_BRIDGE_EBT_ARP=y -CONFIG_BRIDGE_EBT_IP=y -CONFIG_BRIDGE_EBT_IP6=y -CONFIG_BRIDGE_EBT_MARK=y -CONFIG_BRIDGE_EBT_MARK_T=y -CONFIG_BRIDGE_EBT_VLAN=y -# IPVS (container load balancing / kube-proxy) -CONFIG_IP_VS=y -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_RR=y -CONFIG_IP_VS_WRR=y -CONFIG_IP_VS_SH=y -CONFIG_IP_VS_NFCT=y -# IP sets (used by iptables/nftables for efficient matching) -CONFIG_IP_SET=y -CONFIG_IP_SET_HASH_IP=y -CONFIG_IP_SET_HASH_IPPORT=y -CONFIG_IP_SET_HASH_IPPORTNET=y -CONFIG_IP_SET_HASH_NET=y -CONFIG_IP_SET_HASH_NETPORT=y -CONFIG_IP_SET_BITMAP_IP=y -CONFIG_IP_SET_BITMAP_PORT=y -CONFIG_IP_SET_LIST_SET=y -CONFIG_NETFILTER_XT_SET=y - -# --- Block devices --- -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_NBD=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_BLK_DEV_NVME=y -CONFIG_ATA=y -CONFIG_SATA_AHCI=y -CONFIG_SATA_AHCI_PLATFORM=y - -# --- SCSI --- -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT3SAS=y -CONFIG_SCSI_MPI3MR=m -CONFIG_SCSI_HPSA=y -CONFIG_SCSI_HISI_SAS=y -CONFIG_SCSI_HISI_SAS_PCI=y -CONFIG_XEN_SCSI_FRONTEND=y -CONFIG_SCSI_VIRTIO=y - -# --- Device mapper --- -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_THIN_PROVISIONING=y -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_MULTIPATH=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m - -# --- Network drivers (built-in for reliable boot) --- -CONFIG_NETDEVICES=y -CONFIG_VIRTIO_NET=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IXGBE=y -CONFIG_I40E=y -CONFIG_ICE=y -CONFIG_IGC=y -CONFIG_MLX4_EN=y -CONFIG_MLX5_CORE=y -CONFIG_MLX5_CORE_EN=y -CONFIG_BNXT=y -CONFIG_TIGON3=y -CONFIG_R8169=y -CONFIG_THUNDER_NIC_BGX=m -CONFIG_THUNDER_NIC_PF=m -CONFIG_THUNDER_NIC_VF=m -CONFIG_MACB=m -CONFIG_HNS3=m -CONFIG_NET_XGENE=y -CONFIG_MVNETA=y -CONFIG_MVPP2=y -CONFIG_STMMAC_ETH=m -CONFIG_ENA_ETHERNET=m -CONFIG_GVE=m -CONFIG_VMXNET3=y -CONFIG_AMD_XGBE=y -# Broadcom GENET (Raspberry Pi 4B built-in Ethernet) -CONFIG_BCMGENET=y -# PHY subsystem (required by GENET and other MAC drivers) -CONFIG_PHYLIB=y -CONFIG_MDIO_BUS=y -CONFIG_BROADCOM_PHY=y -CONFIG_BCM7XXX_PHY=y -CONFIG_MDIO_BCM_UNIMAC=y -CONFIG_MARVELL_PHY=y -CONFIG_MARVELL_10G_PHY=y -CONFIG_MICREL_PHY=y -CONFIG_ROCKCHIP_PHY=y -CONFIG_REALTEK_PHY=y -CONFIG_MESON_GXL_PHY=y -CONFIG_AMD_PHY=y - -# --- USB --- -CONFIG_USB_SUPPORT=y -CONFIG_USB=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_XHCI_PCI=y -CONFIG_USB_UAS=y -CONFIG_USB_STORAGE=y -CONFIG_USB_DWC3=y -CONFIG_USB_DWC3_HOST=y -CONFIG_USB_DWC2=y -CONFIG_USB_ISP1760=y - -# --- MMC/SD (Raspberry Pi, Rockchip, and other SBCs) --- -CONFIG_MMC=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PCI=y -CONFIG_MMC_SDHCI_ACPI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_OF_ARASAN=y -CONFIG_MMC_SDHCI_OF_DWCMSHC=y -CONFIG_MMC_SDHCI_CADENCE=y -CONFIG_MMC_SDHCI_TEGRA=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_XENON=y -CONFIG_MMC_SDHCI_F_SDH30=y -CONFIG_MMC_ARMMMCI=y -CONFIG_MMC_DW=y -CONFIG_MMC_DW_ROCKCHIP=y -CONFIG_MMC_DW_K3=y -CONFIG_MMC_MESON_GX=y -CONFIG_MMC_SUNXI=m -CONFIG_MMC_SPI=y -CONFIG_MMC_HSQ=y - -# --- Filesystems --- -CONFIG_EXT4_FS=y -CONFIG_XFS_FS=m -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_PROC_FS=y -CONFIG_SYSFS=y -CONFIG_OVERLAY_FS=y -CONFIG_SQUASHFS=y -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -CONFIG_NFS_FS=m -CONFIG_NFS_V4=m -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_FUSE_FS=y -CONFIG_CUSE=y -CONFIG_EFIVAR_FS=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_UDF_FS=y -CONFIG_MSDOS_FS=y -CONFIG_FAT_DEFAULT_IOCHARSET="utf8" -CONFIG_NTFS_FS=m -CONFIG_TMPFS_XATTR=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_UTF8=y - -# --- Virtio (built-in for reliable boot) --- -CONFIG_VIRTIO=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_BLK=y -CONFIG_VIRTIO_CONSOLE=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_INPUT=y -CONFIG_VIRTIO_MMIO=y -CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y -CONFIG_HW_RANDOM_VIRTIO=y - -# --- Console / Serial --- -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_VT=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_DUMMY_CONSOLE=y - -# --- Security --- -CONFIG_SECCOMP=y -CONFIG_SECCOMP_FILTER=y -CONFIG_SECURITY=y -CONFIG_KEYS=y - -# --- Crypto --- -CONFIG_CRYPTO=y -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_XTS=y -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_AES_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y - -# --- BPF --- -CONFIG_BPF=y -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_XDP_SOCKETS=y - -# --- ARM64 SoC/platform support --- -CONFIG_ARCH_BCM2835=y -CONFIG_ARCH_ROCKCHIP=y -CONFIG_ARCH_MESON=y -CONFIG_ARCH_SUNXI=y -CONFIG_ARCH_TEGRA=y -CONFIG_ARCH_MVEBU=y -CONFIG_ARCH_HISI=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_EXYNOS=y -CONFIG_ARCH_VEXPRESS=y -CONFIG_ARCH_XGENE=y -CONFIG_ARCH_THUNDER=y -CONFIG_ARCH_THUNDER2=y -CONFIG_ARCH_SEATTLE=y -CONFIG_ARCH_SYNQUACER=y -CONFIG_ARCH_UNIPHIER=y -# ACPI / UEFI -CONFIG_ACPI=y -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_IPMI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_PCI=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI=y -CONFIG_PCI_HOST_GENERIC=y -CONFIG_PCIE_ROCKCHIP_HOST=y -CONFIG_PCIE_ARMADA_8K=y -CONFIG_PCIE_TEGRA194_HOST=y -# Broadcom STB PCIe controller (Raspberry Pi 4B — needed for VL805 USB 3.0) -CONFIG_PCIE_BRCMSTB=y -CONFIG_PCIE_ALTERA=y -CONFIG_PCI_AARDVARK=y -CONFIG_PCI_TEGRA=y -CONFIG_PCI_XGENE=y -CONFIG_PCI_HOST_THUNDER_PEM=y -CONFIG_PCI_HOST_THUNDER_ECAM=y -CONFIG_PCIE_QCOM=y -CONFIG_PCIE_AL=y -CONFIG_PCI_MESON=y -CONFIG_PCI_HISI=y -CONFIG_PCIE_KIRIN=y -CONFIG_PCIE_HISI_STB=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PASID=y -CONFIG_HOTPLUG_PCI_ACPI=y -# IOMMU -CONFIG_ARM_SMMU=y -CONFIG_ARM_SMMU_V3=y -CONFIG_FW_LOADER=y -# Raspberry Pi firmware mailbox (required for GENET MAC address, clocks, etc.) -CONFIG_RASPBERRYPI_FIRMWARE=y -CONFIG_RASPBERRYPI_POWER=y -CONFIG_ARM_SCMI_PROTOCOL=y -CONFIG_ARM_SCPI_PROTOCOL=y -CONFIG_OF=y -CONFIG_USE_OF=y -# RTC -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_EFI=y -# DMA -CONFIG_DMADEVICES=y -CONFIG_PL330_DMA=y -CONFIG_PRINTK=y -CONFIG_PRINTK_TIME=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_PANIC_TIMEOUT=10 -CONFIG_PANIC_ON_OOPS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_RD_GZIP=y -CONFIG_RD_XZ=y -CONFIG_RD_ZSTD=y -CONFIG_UEVENT_HELPER=y -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_CONNECTOR=y -CONFIG_DMI_SYSFS=y -CONFIG_EFI=y -CONFIG_EFI_STUB=y - -# --- Framebuffer/Console (required for EFI boot video output) --- -CONFIG_DRM=y -CONFIG_DRM_SIMPLEDRM=y -CONFIG_DRM_VIRTIO_GPU=y -CONFIG_FB=y -CONFIG_FB_EFI=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_SYSFB_SIMPLEFB=y -# Virtualization -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=y -CONFIG_VFIO=y -CONFIG_VFIO_PCI=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_VSOCK=m - -# --- Module compression --- -CONFIG_MODULE_COMPRESS_ZSTD=y -CONFIG_MODULE_DECOMPRESS=y - -# --- I2C (SBC board management, EEPROM, sensors) --- -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MV64XXX=y -CONFIG_I2C_RK3X=y -CONFIG_I2C_TEGRA=y -CONFIG_I2C_DESIGNWARE_PLATFORM=y -# SPI -CONFIG_SPI=y -CONFIG_SPI_ROCKCHIP=y -CONFIG_SPI_PL022=y -CONFIG_SPI_ORION=y -# GPIO -CONFIG_GPIOLIB=y -# Hardware watchdog -CONFIG_WATCHDOG=y -CONFIG_SOFTLOCKUP_DETECTOR=y -CONFIG_WQ_WATCHDOG=y -# Hardware RNG -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=y -# TPM -CONFIG_TCG_TIS=y -CONFIG_TCG_FTPM_TEE=y -CONFIG_TEE=y -CONFIG_OPTEE=y - -# --- Disable unnecessary subsystems --- -# CONFIG_SOUND is not set -# CONFIG_WIRELESS is not set -# CONFIG_WLAN is not set -# CONFIG_BLUETOOTH is not set -# CONFIG_NFC is not set -# CONFIG_INFINIBAND is not set -# CONFIG_MEDIA_SUPPORT is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TOUCHSCREEN is not set diff --git a/kernel.configs/6.19.y.amd64 b/kernel.configs/6.19.y.amd64 deleted file mode 100644 index c6d243c..0000000 --- a/kernel.configs/6.19.y.amd64 +++ /dev/null @@ -1,1032 +0,0 @@ -# CaptainOS kernel defconfig for x86_64 -# Comprehensive config for bare-metal provisioning with container/network support. -# Derived from HookOS generic-6.6.y-x86_64 proven config, adapted for kernel 6.18. -# -# Generate a full .config from this: -# cp 6.18.y.amd64 .config && make olddefconfig - -# --- Architecture --- -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_SMP=y -CONFIG_NR_CPUS=128 -# CONFIG_X86_EXTENDED_PLATFORM is not set -CONFIG_X86_INTEL_LPSS=y -CONFIG_HYPERVISOR_GUEST=y -CONFIG_PARAVIRT_SPINLOCKS=y -CONFIG_XEN=y -CONFIG_XEN_PVH=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -# CONFIG_X86_MCE is not set -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -# CONFIG_X86_5LEVEL is not set -CONFIG_HZ_1000=y -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_LEGACY_VSYSCALL_NONE=y -# CONFIG_MODIFY_LDT_SYSCALL is not set -CONFIG_IA32_EMULATION=y - -# --- General --- -CONFIG_LOCALVERSION="-captainos" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="captainos" -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_AUDIT=y -CONFIG_NO_HZ_IDLE=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_EXPERT=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -CONFIG_BINFMT_MISC=y - -# --- Cgroups --- -CONFIG_CGROUPS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_RDMA=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -CONFIG_CGROUP_MISC=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_MEMCG=y -CONFIG_BLK_CGROUP=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_SCHED=y -CONFIG_CPUSETS=y - -# --- Namespaces --- -CONFIG_NAMESPACES=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y - -# --- Init / Initramfs --- -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -# CONFIG_RD_BZIP2 is not set -# CONFIG_RD_LZMA is not set -CONFIG_RD_GZIP=y -CONFIG_RD_XZ=y -# CONFIG_RD_LZO is not set -# CONFIG_RD_LZ4 is not set -CONFIG_RD_ZSTD=y - -# --- Memory management --- -CONFIG_SLAB_FREELIST_RANDOM=y -# CONFIG_COMPAT_BRK is not set -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_ZONE_DEVICE=y -CONFIG_HUGETLBFS=y - -# --- Power / Suspend --- -# CONFIG_SUSPEND is not set - -# --- ACPI --- -CONFIG_ACPI=y -# CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_IPMI=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=y -CONFIG_ACPI_SBS=y -CONFIG_ACPI_NFIT=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y - -# --- CPU frequency --- -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_X86_PCC_CPUFREQ=y -CONFIG_X86_ACPI_CPUFREQ=y -CONFIG_X86_POWERNOW_K8=y -CONFIG_X86_P4_CLOCKMOD=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_INTEL_IDLE=y - -# --- KVM --- -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m - -# --- Networking --- -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=y -CONFIG_UNIX=y -CONFIG_UNIX_DIAG=y -CONFIG_XFRM_USER=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_XDP_SOCKETS=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NET_IPIP=y -CONFIG_NET_IPGRE_DEMUX=y -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_NET_IPVTI=m -CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_UDP_DIAG=y -CONFIG_TCP_MD5SIG=y -CONFIG_SYN_COOKIES=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_IPV6_ILA=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETLABEL=y -CONFIG_NETWORK_SECMARK=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_VLAN_8021Q=y -CONFIG_BONDING=m -CONFIG_TUN=y -CONFIG_VETH=y -CONFIG_MACVLAN=y -CONFIG_MACVTAP=y -CONFIG_IPVLAN=y -CONFIG_VXLAN=y -CONFIG_GENEVE=m -CONFIG_NETCONSOLE=y -CONFIG_TAP=y -CONFIG_DUMMY=m -CONFIG_NLMON=y -CONFIG_IP_SCTP=m -CONFIG_L2TP=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_OPENVSWITCH=m -CONFIG_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_HYPERV_VSOCKETS=m -CONFIG_NETLINK_DIAG=y -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_9P=y -CONFIG_NET_9P_VIRTIO=y - -# --- Traffic control --- -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_CLS_BASIC=y -CONFIG_NET_CLS_ROUTE4=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=y -CONFIG_NET_CLS_MATCHALL=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_EMATCH_IPSET=y -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=y -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=y -CONFIG_NET_ACT_IPT=y -CONFIG_NET_ACT_NAT=y -CONFIG_NET_ACT_PEDIT=y -CONFIG_NET_ACT_SIMP=y -CONFIG_NET_ACT_SKBEDIT=y -CONFIG_NET_ACT_CSUM=y -CONFIG_NET_ACT_BPF=y - -# --- Netfilter --- -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_SNMP=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_SIP=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NF_CT_NETLINK_TIMEOUT=y -CONFIG_NF_CT_NETLINK_HELPER=y -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=y -CONFIG_NF_TABLES=y -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_CT=y -CONFIG_NFT_CONNLIMIT=y -CONFIG_NFT_LOG=y -CONFIG_NFT_LIMIT=y -CONFIG_NFT_MASQ=y -CONFIG_NFT_REDIR=y -CONFIG_NFT_NAT=y -CONFIG_NFT_TUNNEL=y -CONFIG_NFT_QUEUE=y -CONFIG_NFT_REJECT=y -CONFIG_NFT_COMPAT=y -CONFIG_NFT_HASH=y -CONFIG_NFT_OSF=y -CONFIG_NFT_TPROXY=y -CONFIG_NFT_DUP_NETDEV=y -CONFIG_NFT_FWD_NETDEV=y -CONFIG_NETFILTER_XT_SET=y -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_DSCP=y -CONFIG_NETFILTER_XT_TARGET_HMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y -CONFIG_NETFILTER_XT_MATCH_BPF=y -CONFIG_NETFILTER_XT_MATCH_CGROUP=y -CONFIG_NETFILTER_XT_MATCH_CLUSTER=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_CPU=y -CONFIG_NETFILTER_XT_MATCH_DCCP=y -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPCOMP=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_IPVS=y -CONFIG_NETFILTER_XT_MATCH_L2TP=y -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_NFACCT=y -CONFIG_NETFILTER_XT_MATCH_OSF=y -CONFIG_NETFILTER_XT_MATCH_OWNER=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_RATEEST=y -CONFIG_NETFILTER_XT_MATCH_REALM=y -CONFIG_NETFILTER_XT_MATCH_RECENT=y -CONFIG_NETFILTER_XT_MATCH_SCTP=y -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_IP_SET=y -CONFIG_IP_SET_BITMAP_IP=y -CONFIG_IP_SET_BITMAP_IPMAC=y -CONFIG_IP_SET_BITMAP_PORT=y -CONFIG_IP_SET_HASH_IP=y -CONFIG_IP_SET_HASH_IPPORT=y -CONFIG_IP_SET_HASH_IPPORTIP=y -CONFIG_IP_SET_HASH_IPPORTNET=y -CONFIG_IP_SET_HASH_NET=y -CONFIG_IP_SET_HASH_NETPORT=y -CONFIG_IP_SET_HASH_NETIFACE=y -CONFIG_IP_SET_LIST_SET=y -CONFIG_IP_VS=y -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y -CONFIG_IP_VS_RR=y -CONFIG_IP_VS_WRR=y -CONFIG_IP_VS_LC=y -CONFIG_IP_VS_WLC=y -CONFIG_IP_VS_FO=y -CONFIG_IP_VS_OVF=y -CONFIG_IP_VS_LBLC=y -CONFIG_IP_VS_LBLCR=y -CONFIG_IP_VS_DH=y -CONFIG_IP_VS_SH=y -CONFIG_IP_VS_MH=y -CONFIG_IP_VS_SED=y -CONFIG_IP_VS_NQ=y -CONFIG_IP_VS_FTP=y -CONFIG_NFT_DUP_IPV4=y -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_LOG_ARP=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_TARGET_SYNPROXY=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_TARGET_ECN=y -CONFIG_IP_NF_TARGET_TTL=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NFT_DUP_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_AH=y -CONFIG_IP6_NF_MATCH_EUI64=y -CONFIG_IP6_NF_MATCH_FRAG=y -CONFIG_IP6_NF_MATCH_OPTS=y -CONFIG_IP6_NF_MATCH_HL=y -CONFIG_IP6_NF_MATCH_IPV6HEADER=y -CONFIG_IP6_NF_MATCH_MH=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_MATCH_RT=y -CONFIG_IP6_NF_TARGET_HL=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_TARGET_SYNPROXY=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_IP6_NF_SECURITY=y -CONFIG_IP6_NF_NAT=y -CONFIG_IP6_NF_TARGET_MASQUERADE=y -CONFIG_IP6_NF_TARGET_NPT=y -CONFIG_NF_TABLES_BRIDGE=y -CONFIG_NFT_BRIDGE_REJECT=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_BRIDGE_EBT_T_FILTER=y -CONFIG_BRIDGE_EBT_T_NAT=y -CONFIG_BRIDGE_EBT_802_3=y -CONFIG_BRIDGE_EBT_AMONG=y -CONFIG_BRIDGE_EBT_ARP=y -CONFIG_BRIDGE_EBT_IP=y -CONFIG_BRIDGE_EBT_IP6=y -CONFIG_BRIDGE_EBT_LIMIT=y -CONFIG_BRIDGE_EBT_MARK=y -CONFIG_BRIDGE_EBT_PKTTYPE=y -CONFIG_BRIDGE_EBT_STP=y -CONFIG_BRIDGE_EBT_VLAN=y -CONFIG_BRIDGE_EBT_ARPREPLY=y -CONFIG_BRIDGE_EBT_DNAT=y -CONFIG_BRIDGE_EBT_MARK_T=y -CONFIG_BRIDGE_EBT_REDIRECT=y -CONFIG_BRIDGE_EBT_SNAT=y -CONFIG_BRIDGE_EBT_LOG=y -CONFIG_BRIDGE_EBT_NFLOG=y - -# --- Block devices --- -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_THROTTLING=y -CONFIG_BLK_CGROUP_IOLATENCY=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_LDM_PARTITION=y -CONFIG_CMDLINE_PARTITION=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_DRBD=m -CONFIG_BLK_DEV_NBD=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_ATA_OVER_ETH=m -CONFIG_BLK_DEV_NVME=y - -# --- SCSI --- -CONFIG_SCSI=y -# CONFIG_SCSI_PROC_FS is not set -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_ISCSI_TCP=m -CONFIG_SCSI_HPSA=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT3SAS=y -CONFIG_SCSI_MPI3MR=m -CONFIG_SCSI_SMARTPQI=m -CONFIG_VMWARE_PVSCSI=y -CONFIG_XEN_SCSI_FRONTEND=y -CONFIG_SCSI_VIRTIO=y - -# --- ATA / SATA --- -CONFIG_ATA=y -# CONFIG_ATA_VERBOSE_ERROR is not set -# CONFIG_SATA_PMP is not set -CONFIG_SATA_AHCI=y -CONFIG_ATA_PIIX=y -CONFIG_SATA_MV=y -CONFIG_SATA_NV=y -CONFIG_SATA_PROMISE=y -CONFIG_SATA_SIL=y -CONFIG_SATA_SIS=y -CONFIG_SATA_SVW=y -CONFIG_SATA_ULI=y -CONFIG_SATA_VIA=y -CONFIG_SATA_VITESSE=y -CONFIG_ATA_GENERIC=y - -# --- Device mapper / MD --- -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=y -CONFIG_MD_RAID0=y -CONFIG_MD_RAID1=y -CONFIG_MD_RAID10=y -CONFIG_MD_RAID456=y -CONFIG_MD_MULTIPATH=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_THIN_PROVISIONING=y -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m - -# --- Fusion --- -CONFIG_FUSION=y -CONFIG_FUSION_SPI=y - -# --- Network drivers (built-in for reliable boot) --- -CONFIG_NETDEVICES=y -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_I825XX is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROCHIP is not set -# CONFIG_NET_VENDOR_MYRI is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETERION is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -# CONFIG_NET_VENDOR_OKI is not set -# CONFIG_NET_VENDOR_PACKET_ENGINES is not set -# CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_BROCADE is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RDC is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_E1000=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IGBVF=y -CONFIG_IXGBE=y -CONFIG_IXGBEVF=y -CONFIG_I40E=y -CONFIG_I40EVF=y -CONFIG_ICE=y -CONFIG_IGC=y -CONFIG_MLX4_EN=y -CONFIG_MLX5_CORE=y -CONFIG_MLX5_CORE_EN=y -CONFIG_BNXT=y -CONFIG_CNIC=m -CONFIG_TIGON3=y -CONFIG_BNX2X=y -CONFIG_R8169=y -CONFIG_8139CP=y -CONFIG_8139TOO=y -CONFIG_VMXNET3=y -CONFIG_HYPERV_NET=y -CONFIG_ENA_ETHERNET=m -CONFIG_GVE=m -CONFIG_NFP=m -CONFIG_IONIC=y -CONFIG_VIRTIO_NET=y - -# --- USB network --- -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=y -CONFIG_USB_RTL8152=y -CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=y -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_AX88179_178A=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_NET_ZAURUS=m - -# --- WAN --- -CONFIG_WAN=y -CONFIG_HDLC=m -CONFIG_HDLC_CISCO=m - -# --- PPP --- -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m - -# --- USB --- -CONFIG_USB_SUPPORT=y -CONFIG_USB=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_UHCI_HCD=y -CONFIG_USB_XHCI_PCI=y -CONFIG_USB_UAS=y -CONFIG_USB_STORAGE=y - -# --- USB serial (BMC/console access) --- -CONFIG_USB_SERIAL=y -CONFIG_USB_SERIAL_CONSOLE=y -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_CH341=y -CONFIG_USB_SERIAL_CP210X=y -CONFIG_USB_SERIAL_FTDI_SIO=y -CONFIG_USB_SERIAL_PL2303=y -CONFIG_USB_SERIAL_TI=y -CONFIG_USB_SERIAL_OPTION=y - -# --- Input --- -CONFIG_INPUT_FF_MEMLESS=y -CONFIG_INPUT_SPARSEKMAP=y -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_MISC=y -CONFIG_INPUT_PCSPKR=y -CONFIG_INPUT_ATLAS_BTNS=y -CONFIG_INPUT_UINPUT=y -CONFIG_SERIO_PCIPS2=y -CONFIG_SERIO_RAW=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TOUCHSCREEN is not set - -# --- MMC --- -CONFIG_MMC=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PCI=y -# CONFIG_MMC_RICOH_MMC is not set -CONFIG_MMC_SDHCI_ACPI=y -CONFIG_MMC_SDHCI_PLTFM=y - -# --- Filesystems --- -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_XFS_FS=y -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -CONFIG_FS_DAX=y -CONFIG_FS_ENCRYPTION=y -CONFIG_FANOTIFY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_VFAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_FAT_DEFAULT_IOCHARSET="utf8" -CONFIG_NTFS_FS=m -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_SYSFS=y -CONFIG_EFIVAR_FS=y -CONFIG_OVERLAY_FS=y -CONFIG_SQUASHFS=y -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -CONFIG_FUSE_FS=y -CONFIG_CUSE=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=y -CONFIG_FSCACHE=y -CONFIG_FSCACHE_STATS=y -CONFIG_CACHEFILES=y -CONFIG_NFS_FS=m -# CONFIG_NFS_V2 is not set -CONFIG_NFS_V4=m -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_NFS_FSCACHE=y -CONFIG_NFSD=m -CONFIG_NFSD_V4=y -CONFIG_CEPH_FS=m -CONFIG_CEPH_FSCACHE=y -CONFIG_CEPH_FS_POSIX_ACL=y -CONFIG_CIFS=y -# CONFIG_CIFS_ALLOW_INSECURE_LEGACY is not set -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_DFS_UPCALL=y -CONFIG_CIFS_FSCACHE=y -CONFIG_9P_FS=y -CONFIG_9P_FSCACHE=y -CONFIG_9P_FS_POSIX_ACL=y -CONFIG_9P_FS_SECURITY=y - -# --- NLS --- -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_UTF8=y - -# --- Virtio (built-in for reliable boot) --- -CONFIG_VIRTIO=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_BLK=y -CONFIG_VIRTIO_CONSOLE=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_INPUT=y -CONFIG_VIRTIO_MMIO=y -CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y -CONFIG_HW_RANDOM_VIRTIO=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_VSOCK=m - -# --- Console / Serial --- -CONFIG_VT=y -CONFIG_VT_CONSOLE=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=32 -# CONFIG_SERIAL_8250_MID is not set -CONFIG_SERIAL_DEV_BUS=y -CONFIG_TTY_PRINTK=y -CONFIG_HW_CONSOLE=y -CONFIG_VGA_CONSOLE=y -CONFIG_DUMMY_CONSOLE=y - -# --- IPMI --- -CONFIG_IPMI_HANDLER=y -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_DEVICE_INTERFACE=y -CONFIG_IPMI_SI=y -CONFIG_IPMI_POWEROFF=y - -# --- Hardware RNG --- -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=y - -# --- I2C --- -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y - -# --- TPM --- -CONFIG_TCG_TIS_I2C_ATMEL=m -CONFIG_TCG_TIS_I2C_INFINEON=m -CONFIG_TCG_TIS_I2C_NUVOTON=m -CONFIG_TCG_NSC=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_INFINEON=m -CONFIG_TCG_XEN=m -CONFIG_TCG_VTPM_PROXY=m -CONFIG_TCG_TIS_ST33ZP24_I2C=m - -# --- Platform / MFD --- -CONFIG_NVRAM=y -CONFIG_HPET=y -CONFIG_HANGCHECK_TIMER=y -CONFIG_LPC_ICH=y -CONFIG_LPC_SCH=y -CONFIG_MFD_INTEL_LPSS_ACPI=y -CONFIG_MFD_INTEL_LPSS_PCI=y -CONFIG_MFD_SM501=y -CONFIG_MFD_VX855=y -# CONFIG_THERMAL_HWMON is not set - -# --- RTC --- -CONFIG_RTC_CLASS=y - -# --- DMA --- -CONFIG_DMADEVICES=y - -# --- Security --- -CONFIG_SECCOMP=y -CONFIG_SECCOMP_FILTER=y -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_NETWORK_XFRM=y -CONFIG_SECURITY_PATH=y -CONFIG_SECURITY_DMESG_RESTRICT=y -CONFIG_KEYS=y -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=y -CONFIG_KEY_DH_OPERATIONS=y -CONFIG_HARDENED_USERCOPY=y -CONFIG_FORTIFY_SOURCE=y -CONFIG_STATIC_USERMODEHELPER=y -CONFIG_SECURITY_YAMA=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_IMA=y -CONFIG_IMA_DEFAULT_HASH_SHA256=y -CONFIG_IMA_READ_POLICY=y -CONFIG_IMA_APPRAISE=y -CONFIG_EVM=y -CONFIG_LSM="yama,loadpin,safesetid,integrity" - -# --- Crypto --- -CONFIG_CRYPTO=y -CONFIG_CRYPTO_USER=y -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_XTS=y -CONFIG_CRYPTO_ANUBIS=y -CONFIG_CRYPTO_BLOWFISH=y -CONFIG_CRYPTO_CAMELLIA=y -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_FCRYPT=y -CONFIG_CRYPTO_KHAZAD=y -CONFIG_CRYPTO_SEED=y -CONFIG_CRYPTO_TEA=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_KEYWRAP=y -CONFIG_CRYPTO_LRW=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_CHACHA20POLY1305=y -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=y -CONFIG_CRYPTO_MICHAEL_MIC=y -CONFIG_CRYPTO_RMD160=y -CONFIG_CRYPTO_VMAC=y -CONFIG_CRYPTO_WP512=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_CRC32=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRYPTO_842=y -CONFIG_CRYPTO_LZ4=y -CONFIG_CRYPTO_LZ4HC=y -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_USER_API_RNG=y -CONFIG_CRYPTO_USER_API_AEAD=y - -# --- x86_64 hardware crypto acceleration --- -CONFIG_CRYPTO_AES_NI_INTEL=y -CONFIG_CRYPTO_BLOWFISH_X86_64=y -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=y -CONFIG_CRYPTO_CAST5_AVX_X86_64=y -CONFIG_CRYPTO_CAST6_AVX_X86_64=y -CONFIG_CRYPTO_DES3_EDE_X86_64=y -CONFIG_CRYPTO_SERPENT_SSE2_X86_64=y -CONFIG_CRYPTO_SERPENT_AVX2_X86_64=y -CONFIG_CRYPTO_TWOFISH_AVX_X86_64=y -CONFIG_CRYPTO_CHACHA20_X86_64=y -CONFIG_CRYPTO_POLY1305_X86_64=y -CONFIG_CRYPTO_SHA1_SSSE3=y -CONFIG_CRYPTO_SHA256_SSSE3=y -CONFIG_CRYPTO_SHA512_SSSE3=y -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=y -CONFIG_CRYPTO_CRC32C_INTEL=y -CONFIG_CRYPTO_CRC32_PCLMUL=y -CONFIG_CRYPTO_DEV_PADLOCK=y -CONFIG_CRYPTO_DEV_PADLOCK_AES=y -CONFIG_CRYPTO_DEV_PADLOCK_SHA=y -CONFIG_CRYPTO_DEV_VIRTIO=m -CONFIG_PKCS7_MESSAGE_PARSER=y - -# --- BPF --- -CONFIG_BPF=y -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_JIT_ALWAYS_ON=y - -# --- PCI --- -CONFIG_PCI=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCI_STUB=y -CONFIG_PCI_IOV=y -# CONFIG_VGA_ARB is not set -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_SHPC=y -CONFIG_PCI_HYPERV_INTERFACE=m - -# --- IOMMU --- -CONFIG_AMD_IOMMU=y -CONFIG_INTEL_IOMMU=y -CONFIG_IRQ_REMAP=y - -# --- Xen --- -# CONFIG_XEN_BACKEND is not set -CONFIG_XEN_GNTDEV=y -CONFIG_XEN_GRANT_DEV_ALLOC=y -CONFIG_XEN_PVCALLS_FRONTEND=y -CONFIG_XEN_ACPI_PROCESSOR=y -# CONFIG_XEN_SYMS is not set - -# --- Intel platform --- -CONFIG_INTEL_IPS=y - -# --- DEVFREQ --- -CONFIG_PM_DEVFREQ=y -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y - -# --- Reset --- -CONFIG_RESET_CONTROLLER=y - -# --- DAX --- -CONFIG_DEV_DAX=y -CONFIG_DEV_DAX_PMEM=m - -# --- Misc --- -CONFIG_FW_LOADER=y -CONFIG_PRINTK=y -CONFIG_PRINTK_TIME=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_EARLY_PRINTK=y -CONFIG_PANIC_TIMEOUT=10 -CONFIG_PANIC_ON_OOPS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_UEVENT_HELPER=y -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_CONNECTOR=y -CONFIG_DMI_SYSFS=y -CONFIG_SYSFB_SIMPLEFB=y -CONFIG_RESET_ATTACK_MITIGATION=y -# CONFIG_PNP_DEBUG_MESSAGES is not set -CONFIG_HYPERV=y -CONFIG_HYPERV_UTILS=y -CONFIG_HYPERV_BALLOON=y - -# --- Module compression --- -CONFIG_MODULE_COMPRESS_ZSTD=y -CONFIG_MODULE_DECOMPRESS=y - -# --- EFI boot support --- -CONFIG_EFI=y -CONFIG_EFI_STUB=y -CONFIG_EFI_MIXED=y - -# --- Framebuffer/Console (required for EFI boot video output) --- -CONFIG_DRM=y -CONFIG_DRM_I915=y -CONFIG_DRM_SIMPLEDRM=y -CONFIG_FB=y -CONFIG_FB_EFI=y -CONFIG_FRAMEBUFFER_CONSOLE=y - -# --- Debugging / Diagnostics --- -CONFIG_FRAME_WARN=1024 -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_HARDLOCKUP_DETECTOR=y -CONFIG_WQ_WATCHDOG=y -CONFIG_DEBUG_NOTIFIERS=y -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -CONFIG_IO_STRICT_DEVMEM=y - -# --- Disable unnecessary subsystems --- -# CONFIG_SOUND is not set -# CONFIG_WIRELESS is not set -# CONFIG_WLAN is not set -# CONFIG_BLUETOOTH is not set -# CONFIG_NFC is not set -# CONFIG_INFINIBAND is not set -# CONFIG_MEDIA_SUPPORT is not set diff --git a/kernel.configs/6.19.y.arm64 b/kernel.configs/6.19.y.arm64 deleted file mode 100644 index 795c91e..0000000 --- a/kernel.configs/6.19.y.arm64 +++ /dev/null @@ -1,515 +0,0 @@ -# CaptainOS kernel defconfig for arm64 (aarch64) -# Minimal config for bare-metal provisioning with container/network support. -# Based on 6.12.y config with targeted additions for CaptainOS use case, adapted for kernel 6.18. -# -# Generate a full .config from this: -# cp 6.18.y.arm64 .config && make ARCH=arm64 olddefconfig - -# --- Architecture --- -CONFIG_ARM64=y -CONFIG_SMP=y -CONFIG_NR_CPUS=128 -CONFIG_NUMA=y -CONFIG_ARM64_VA_BITS_48=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_SMT=y -CONFIG_HZ_1000=y -CONFIG_XEN=y -CONFIG_COMPAT=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y - -# --- General --- -CONFIG_LOCALVERSION="-captainos" -CONFIG_DEFAULT_HOSTNAME="captainos" -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_AUDIT=y -CONFIG_NO_HZ_IDLE=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_CGROUPS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -CONFIG_CGROUP_MISC=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CGROUP_RDMA=y -CONFIG_CPUSETS=y -CONFIG_MEMCG=y -CONFIG_BLK_CGROUP=y -CONFIG_BLK_CGROUP_IOLATENCY=y -CONFIG_BLK_DEV_THROTTLING=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_SCHED=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_NAMESPACES=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_CHECKPOINT_RESTORE=y -# Memory management -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_KSM=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_HUGETLBFS=y - -# --- Init / Initramfs --- -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" - -# --- Networking --- -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IPGRE=m -CONFIG_SYN_COOKIES=y -CONFIG_IPV6=y -CONFIG_BRIDGE=y -CONFIG_VLAN_8021Q=m -CONFIG_BONDING=m -CONFIG_TUN=y -CONFIG_VETH=y -CONFIG_MACVLAN=y -CONFIG_MACVTAP=y -CONFIG_IPVLAN=y -CONFIG_TAP=y -CONFIG_DUMMY=m -CONFIG_VXLAN=y -CONFIG_GENEVE=m -CONFIG_NLMON=y -# Traffic control / QoS (needed by CNI plugins) -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_CLS_BASIC=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=y -CONFIG_NET_CLS_MATCHALL=y -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=y -CONFIG_NET_ACT_MIRRED=y -CONFIG_NET_ACT_BPF=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_U32=y - -# --- Netfilter --- -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_NAT=y -CONFIG_NF_TABLES=y -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_IPV4=y -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_NAT=y -CONFIG_NFT_MASQ=y -CONFIG_NFT_CT=y -CONFIG_NFT_COMPAT=y -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_NAT=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_NAT=y -CONFIG_BRIDGE_NETFILTER=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_BRIDGE_EBT_T_FILTER=y -CONFIG_BRIDGE_EBT_T_NAT=y -CONFIG_BRIDGE_EBT_ARP=y -CONFIG_BRIDGE_EBT_IP=y -CONFIG_BRIDGE_EBT_IP6=y -CONFIG_BRIDGE_EBT_MARK=y -CONFIG_BRIDGE_EBT_MARK_T=y -CONFIG_BRIDGE_EBT_VLAN=y -# IPVS (container load balancing / kube-proxy) -CONFIG_IP_VS=y -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_RR=y -CONFIG_IP_VS_WRR=y -CONFIG_IP_VS_SH=y -CONFIG_IP_VS_NFCT=y -# IP sets (used by iptables/nftables for efficient matching) -CONFIG_IP_SET=y -CONFIG_IP_SET_HASH_IP=y -CONFIG_IP_SET_HASH_IPPORT=y -CONFIG_IP_SET_HASH_IPPORTNET=y -CONFIG_IP_SET_HASH_NET=y -CONFIG_IP_SET_HASH_NETPORT=y -CONFIG_IP_SET_BITMAP_IP=y -CONFIG_IP_SET_BITMAP_PORT=y -CONFIG_IP_SET_LIST_SET=y -CONFIG_NETFILTER_XT_SET=y - -# --- Block devices --- -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_NBD=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_BLK_DEV_NVME=y -CONFIG_ATA=y -CONFIG_SATA_AHCI=y -CONFIG_SATA_AHCI_PLATFORM=y - -# --- SCSI --- -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT3SAS=y -CONFIG_SCSI_MPI3MR=m -CONFIG_SCSI_HPSA=y -CONFIG_SCSI_HISI_SAS=y -CONFIG_SCSI_HISI_SAS_PCI=y -CONFIG_XEN_SCSI_FRONTEND=y -CONFIG_SCSI_VIRTIO=y - -# --- Device mapper --- -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_THIN_PROVISIONING=y -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_MULTIPATH=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m - -# --- Network drivers (built-in for reliable boot) --- -CONFIG_NETDEVICES=y -CONFIG_VIRTIO_NET=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IXGBE=y -CONFIG_I40E=y -CONFIG_ICE=y -CONFIG_IGC=y -CONFIG_MLX4_EN=y -CONFIG_MLX5_CORE=y -CONFIG_MLX5_CORE_EN=y -CONFIG_BNXT=y -CONFIG_TIGON3=y -CONFIG_R8169=y -CONFIG_THUNDER_NIC_BGX=m -CONFIG_THUNDER_NIC_PF=m -CONFIG_THUNDER_NIC_VF=m -CONFIG_MACB=m -CONFIG_HNS3=m -CONFIG_NET_XGENE=y -CONFIG_MVNETA=y -CONFIG_MVPP2=y -CONFIG_STMMAC_ETH=m -CONFIG_ENA_ETHERNET=m -CONFIG_GVE=m -CONFIG_VMXNET3=y -CONFIG_AMD_XGBE=y -# Broadcom GENET (Raspberry Pi 4B built-in Ethernet) -CONFIG_BCMGENET=y -# PHY subsystem (required by GENET and other MAC drivers) -CONFIG_PHYLIB=y -CONFIG_MDIO_BUS=y -CONFIG_BROADCOM_PHY=y -CONFIG_BCM7XXX_PHY=y -CONFIG_MDIO_BCM_UNIMAC=y -CONFIG_MARVELL_PHY=y -CONFIG_MARVELL_10G_PHY=y -CONFIG_MICREL_PHY=y -CONFIG_ROCKCHIP_PHY=y -CONFIG_REALTEK_PHY=y -CONFIG_MESON_GXL_PHY=y -CONFIG_AMD_PHY=y - -# --- USB --- -CONFIG_USB_SUPPORT=y -CONFIG_USB=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_XHCI_PCI=y -CONFIG_USB_UAS=y -CONFIG_USB_STORAGE=y -CONFIG_USB_DWC3=y -CONFIG_USB_DWC3_HOST=y -CONFIG_USB_DWC2=y -CONFIG_USB_ISP1760=y - -# --- MMC/SD (Raspberry Pi, Rockchip, and other SBCs) --- -CONFIG_MMC=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PCI=y -CONFIG_MMC_SDHCI_ACPI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_OF_ARASAN=y -CONFIG_MMC_SDHCI_OF_DWCMSHC=y -CONFIG_MMC_SDHCI_CADENCE=y -CONFIG_MMC_SDHCI_TEGRA=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_XENON=y -CONFIG_MMC_SDHCI_F_SDH30=y -CONFIG_MMC_ARMMMCI=y -CONFIG_MMC_DW=y -CONFIG_MMC_DW_ROCKCHIP=y -CONFIG_MMC_DW_K3=y -CONFIG_MMC_MESON_GX=y -CONFIG_MMC_SUNXI=m -CONFIG_MMC_SPI=y -CONFIG_MMC_HSQ=y - -# --- Filesystems --- -CONFIG_EXT4_FS=y -CONFIG_XFS_FS=m -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_PROC_FS=y -CONFIG_SYSFS=y -CONFIG_OVERLAY_FS=y -CONFIG_SQUASHFS=y -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -CONFIG_NFS_FS=m -CONFIG_NFS_V4=m -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_FUSE_FS=y -CONFIG_CUSE=y -CONFIG_EFIVAR_FS=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_UDF_FS=y -CONFIG_MSDOS_FS=y -CONFIG_FAT_DEFAULT_IOCHARSET="utf8" -CONFIG_NTFS_FS=m -CONFIG_TMPFS_XATTR=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_UTF8=y - -# --- Virtio (built-in for reliable boot) --- -CONFIG_VIRTIO=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_BLK=y -CONFIG_VIRTIO_CONSOLE=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_INPUT=y -CONFIG_VIRTIO_MMIO=y -CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y -CONFIG_HW_RANDOM_VIRTIO=y - -# --- Console / Serial --- -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_VT=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_DUMMY_CONSOLE=y - -# --- Security --- -CONFIG_SECCOMP=y -CONFIG_SECCOMP_FILTER=y -CONFIG_SECURITY=y -CONFIG_KEYS=y - -# --- Crypto --- -CONFIG_CRYPTO=y -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_XTS=y -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_AES_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y - -# --- BPF --- -CONFIG_BPF=y -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_XDP_SOCKETS=y - -# --- ARM64 SoC/platform support --- -CONFIG_ARCH_BCM2835=y -CONFIG_ARCH_ROCKCHIP=y -CONFIG_ARCH_MESON=y -CONFIG_ARCH_SUNXI=y -CONFIG_ARCH_TEGRA=y -CONFIG_ARCH_MVEBU=y -CONFIG_ARCH_HISI=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_EXYNOS=y -CONFIG_ARCH_VEXPRESS=y -CONFIG_ARCH_XGENE=y -CONFIG_ARCH_THUNDER=y -CONFIG_ARCH_THUNDER2=y -CONFIG_ARCH_SEATTLE=y -CONFIG_ARCH_SYNQUACER=y -CONFIG_ARCH_UNIPHIER=y -# ACPI / UEFI -CONFIG_ACPI=y -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_IPMI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_PCI=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI=y -CONFIG_PCI_HOST_GENERIC=y -CONFIG_PCIE_ROCKCHIP_HOST=y -CONFIG_PCIE_ARMADA_8K=y -CONFIG_PCIE_TEGRA194_HOST=y -# Broadcom STB PCIe controller (Raspberry Pi 4B — needed for VL805 USB 3.0) -CONFIG_PCIE_BRCMSTB=y -CONFIG_PCIE_ALTERA=y -CONFIG_PCI_AARDVARK=y -CONFIG_PCI_TEGRA=y -CONFIG_PCI_XGENE=y -CONFIG_PCI_HOST_THUNDER_PEM=y -CONFIG_PCI_HOST_THUNDER_ECAM=y -CONFIG_PCIE_QCOM=y -CONFIG_PCIE_AL=y -CONFIG_PCI_MESON=y -CONFIG_PCI_HISI=y -CONFIG_PCIE_KIRIN=y -CONFIG_PCIE_HISI_STB=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PASID=y -CONFIG_HOTPLUG_PCI_ACPI=y -# IOMMU -CONFIG_ARM_SMMU=y -CONFIG_ARM_SMMU_V3=y -CONFIG_FW_LOADER=y -# Raspberry Pi firmware mailbox (required for GENET MAC address, clocks, etc.) -CONFIG_RASPBERRYPI_FIRMWARE=y -CONFIG_RASPBERRYPI_POWER=y -CONFIG_ARM_SCMI_PROTOCOL=y -CONFIG_ARM_SCPI_PROTOCOL=y -CONFIG_OF=y -CONFIG_USE_OF=y -# RTC -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_EFI=y -# DMA -CONFIG_DMADEVICES=y -CONFIG_PL330_DMA=y -CONFIG_PRINTK=y -CONFIG_PRINTK_TIME=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_PANIC_TIMEOUT=10 -CONFIG_PANIC_ON_OOPS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_RD_GZIP=y -CONFIG_RD_XZ=y -CONFIG_RD_ZSTD=y -CONFIG_UEVENT_HELPER=y -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_CONNECTOR=y -CONFIG_DMI_SYSFS=y -CONFIG_EFI=y -CONFIG_EFI_STUB=y - -# --- Framebuffer/Console (required for EFI boot video output) --- -CONFIG_DRM=y -CONFIG_DRM_SIMPLEDRM=y -CONFIG_DRM_VIRTIO_GPU=y -CONFIG_FB=y -CONFIG_FB_EFI=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_SYSFB_SIMPLEFB=y -# Virtualization -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=y -CONFIG_VFIO=y -CONFIG_VFIO_PCI=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_VSOCK=m - -# --- Module compression --- -CONFIG_MODULE_COMPRESS_ZSTD=y -CONFIG_MODULE_DECOMPRESS=y - -# --- I2C (SBC board management, EEPROM, sensors) --- -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MV64XXX=y -CONFIG_I2C_RK3X=y -CONFIG_I2C_TEGRA=y -CONFIG_I2C_DESIGNWARE_PLATFORM=y -# SPI -CONFIG_SPI=y -CONFIG_SPI_ROCKCHIP=y -CONFIG_SPI_PL022=y -CONFIG_SPI_ORION=y -# GPIO -CONFIG_GPIOLIB=y -# Hardware watchdog -CONFIG_WATCHDOG=y -CONFIG_SOFTLOCKUP_DETECTOR=y -CONFIG_WQ_WATCHDOG=y -# Hardware RNG -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=y -# TPM -CONFIG_TCG_TIS=y -CONFIG_TCG_FTPM_TEE=y -CONFIG_TEE=y -CONFIG_OPTEE=y - -# --- Disable unnecessary subsystems --- -# CONFIG_SOUND is not set -# CONFIG_WIRELESS is not set -# CONFIG_WLAN is not set -# CONFIG_BLUETOOTH is not set -# CONFIG_NFC is not set -# CONFIG_INFINIBAND is not set -# CONFIG_MEDIA_SUPPORT is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TOUCHSCREEN is not set diff --git a/mkosi.postinst b/mkosi.postinst deleted file mode 100755 index 6ce6f59..0000000 --- a/mkosi.postinst +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# mkosi.postinst — Post-installation script for the CaptainOS initrd. -# Runs inside the image chroot after packages are installed. -# NOTE: The filesystem is mostly read-only here. Use mkosi.extra/ for -# creating directories and dropping config files. -set -euo pipefail - -echo "==> CaptainOS post-install: configuring services..." - -# Ensure CA certificate bundle is generated (dpkg triggers may not -# fire reliably in mkosi's chroot). -update-ca-certificates --fresh 2>/dev/null || true - -# Enable core services -for unit in systemd-networkd systemd-resolved systemd-timesyncd containerd captainos-banner \ - captainos-static-network tink-agent-setup tink-agent rsyslog rsyslog-hostname-reload.path; do - systemctl enable "$unit" 2>/dev/null || true -done - -# Root password is set via mkosi.conf RootPassword= setting - -# Disable unnecessary systemd units to speed up boot -for unit in \ - apt-daily.timer \ - apt-daily-upgrade.timer \ - e2scrub_all.timer \ - e2scrub_reap.service \ - fstrim.timer \ - logrotate.timer \ - man-db.timer \ - remote-fs.target \ - systemd-firstboot.service; do - systemctl disable "$unit" 2>/dev/null || true - systemctl mask "$unit" 2>/dev/null || true -done - -# Set default target to multi-user (no graphical) -systemctl set-default multi-user.target 2>/dev/null || true - -echo "==> CaptainOS post-install complete." diff --git a/pyproject.toml b/pyproject.toml index 2ab7dea..3ecbea2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,23 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + [project] +version = "0.0.1" name = "captain" -requires-python = ">=3.10" -dependencies = ["configargparse>=1.7"] +requires-python = ">=3.13" +dependencies = [ + "click>=8.1", + "rich>=14.3.3", + "jinja2>=3.1.6", + "trogon>=0.6.0", +] + +[project.scripts] +captain = "build:main" + +[project.optional-dependencies] +dev = ["ruff>=0.15.9", "pyright>=1.1"] [tool.ruff] target-version = "py310" @@ -22,6 +38,7 @@ select = [ "captain/iso.py" = ["E501"] # embedded grub config has long data lines [tool.pyright] -pythonVersion = "3.10" +pythonVersion = "3.13" typeCheckingMode = "standard" exclude = ["mkosi.tools", "mkosi.output", "__pycache__"] +include = ["captain", "build.py"] diff --git a/requirements-dev.txt b/requirements-dev.txt deleted file mode 100644 index fb5597f..0000000 --- a/requirements-dev.txt +++ /dev/null @@ -1,2 +0,0 @@ -ruff>=0.9 -pyright>=1.1 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 595ad7c..0000000 --- a/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -configargparse>=1.7