diff --git a/src/cmd-build b/src/cmd-build index 3ee8d9a5c5..b8836c5d42 100755 --- a/src/cmd-build +++ b/src/cmd-build @@ -181,6 +181,84 @@ build_followup_targets() { done } +# Parse the passed config JSON and extract a mandatory value +getconfig() { + k=$1 + config=$2 + jq -re .\""$k"\" < "${config}" +} + +# Return a configuration value, or default if not set +getconfig_def() { + k=$1 + shift + default=$1 + config=$2 + jq -re .\""$k"\"//\""${default}"\" < "${config}" +} + +# Here we generate the input JSON we pass to runvm_osbuild for all of our image builds +generate_runvm_osbuild_config() { + # Grab a few values from $image_json + deploy_via_container=$(getconfig_def "deploy-via-container" "" "${image_json}") + extra_kargs="$(python3 -c 'import sys, json; args = json.load(sys.stdin)["extra-kargs"]; print(" ".join(args))' < "${image_json}")" + + # The OSTree repo is at $tmprepo and the commit is $commit + ostree_repo="${tmprepo}" + ostree_commit="${commit}" + + # OStree container ociarchive file path and container_imgref + builddir=$(get_build_dir "${buildid}") + ostree_container="${builddir}/${ostree_tarfile_path}" + # If no container_imgref was set let's just set it to some professional + # looking default. The name of the ociarchive file should suffice. + container_imgref_default="ostree-image-signed:oci-archive:/$(basename "${ostree_container}")" + container_imgref=$(getconfig_def "container_imgref" "${container_imgref_default}" "${image_json}") + + echo "Estimating disk size..." + # The additional 35% here is obviously a hack, but we can't easily completely fill the filesystem, + # and doing so has apparently negative performance implications. + ostree_size_json="$(/usr/lib/coreos-assembler/estimate-commit-disk-size --repo "$ostree_repo" "$commit" --add-percent 35)" + rootfs_size_mb="$(jq '."estimate-mb".final' <<< "${ostree_size_json}")" + # The minimum size of a disk image we'll need will be the rootfs_size + # estimate plus the size of the non-root partitions. We'll use this + # size for the metal images, but for the IaaS/virt image we'll use + # the size set in the configs since some of them have minimum sizes that + # the platforms require and we want a "default" disk size that has some + # free space. + nonroot_partition_sizes=513 + # On s390x there is one more build - Secure Execution case, which has + # different image layout. We add the sizes of the se and verity + # partitions so that they don't "eat into" the 35% buffer (though note + # this is all blown away on first boot anyway). For 's390x.mpp.yaml' + # simplicity all s390x images have same size (of secex image). + if [[ $basearch == "s390x" ]]; then + nonroot_partition_sizes=$((nonroot_partition_sizes + 200 + 128 + 256 + 1)) + fi + metal_image_size_mb="$(( rootfs_size_mb + nonroot_partition_sizes ))" + cloud_image_size_mb="$(jq -r ".size*1024" < "${image_json}")" + echo "Disk sizes: metal: ${metal_image_size_mb}M (estimated), cloud: ${cloud_image_size_mb}M" + + # Generate the JSON describing the disk we want to build + runvm_osbuild_config_json="runvm-osbuild-config.json" + yaml2json /dev/stdin "tmp/${runvm_osbuild_config_json}" < commitmeta.json.tmp /usr/lib/coreos-assembler/finalize-artifact commitmeta.json{.tmp,} +# Generate the runvm-osbuild config file that will be used for all +# later image builds. +generate_runvm_osbuild_config + # Clean up our temporary data saved_build_tmpdir="${workdir}/tmp/last-build-tmp" rm -rf "${saved_build_tmpdir}" diff --git a/src/cmd-buildextend-metal b/src/cmd-buildextend-metal index d7c373c393..d74ed5109d 100755 --- a/src/cmd-buildextend-metal +++ b/src/cmd-buildextend-metal @@ -8,18 +8,18 @@ dn=$(dirname "$0") # This script is used for creating both the bare metal and the canonical VM # image (qemu). `buildextend-qemu` is a symlink to `buildextend-metal`. case "$(basename "$0")" in - "cmd-buildextend-metal") image_type=metal;; - "cmd-buildextend-metal4k") image_type=metal4k;; - "cmd-buildextend-qemu") image_type=qemu;; - "cmd-buildextend-qemu-secex") image_type=qemu-secex;; - "cmd-buildextend-secex") image_type=qemu-secex;; + "cmd-buildextend-metal") platform=metal;; + "cmd-buildextend-metal4k") platform=metal4k;; + "cmd-buildextend-qemu") platform=qemu;; + "cmd-buildextend-qemu-secex") platform=qemu-secex;; + "cmd-buildextend-secex") platform=qemu-secex;; *) fatal "called as unexpected name $0";; esac print_help() { cat 1>&2 < "$PWD/tmp/ostree-size.json" -rootfs_size_mb="$(jq '."estimate-mb".final' "$PWD/tmp/ostree-size.json")" -# The minimum size of a disk image we'll need will be the rootfs_size -# estimate plus the size of the non-root partitions. We'll use this -# size for the metal images, but for the IaaS/virt image we'll use -# the size set in the configs since some of them have minimum sizes that -# the platforms require and we want a "default" disk size that has some -# free space. -nonroot_partition_sizes=513 -# On s390x there is one more build - Secure Execution case, which has -# different image layout. We add the sizes of the se and verity -# partitions so that they don't "eat into" the 35% buffer (though note -# this is all blown away on first boot anyway). For 's390x.mpp.yaml' -# simplicity all s390x images have same size (of secex image). -if [[ $basearch == "s390x" ]]; then - nonroot_partition_sizes=$((nonroot_partition_sizes + 200 + 128 + 256 + 1)) -fi -metal_image_size_mb="$(( rootfs_size_mb + nonroot_partition_sizes ))" -cloud_image_size_mb="$(jq -r ".size*1024" < "${image_json}")" -echo "Disk sizes: metal: ${metal_image_size_mb}M (estimated), cloud: ${cloud_image_size_mb}M" - -set -x -extra_kargs="$(python3 -c 'import sys, json; args = json.load(sys.stdin)["extra-kargs"]; print(" ".join(args))' < "${image_json}")" - -# Generate the JSON describing the disk we want to build -image_dynamic_yaml="${tmp_builddir}/image-dynamic.yaml" -image_dynamic_json="${tmp_builddir}/image-dynamic.json" -image_for_disk_json="${tmp_builddir}/image-for-disk.json" -cat >"${image_dynamic_yaml}" << EOF -container-imgref: "${container_imgref}" -deploy-via-container: "${deploy_via_container}" -osname: "${name}" -ostree-container: "${ostree_container}" -ostree-ref: "${ref}" -extra-kargs-string: "${extra_kargs}" -image-type: "${image_type}" -ostree-repo: "${ostree_repo}" -metal-image-size: "${metal_image_size_mb}" -cloud-image-size: "${cloud_image_size_mb}" -# Note: this is only used in the secex case; there, the rootfs is -# not the last partition on the disk so we need to explicitly size it -rootfs-size: "${rootfs_size_mb}" -EOF -yaml2json "${image_dynamic_yaml}" "${image_dynamic_json}" -cat "${image_json}" "${image_dynamic_json}" | jq -s add > "${image_for_disk_json}" -platforms_json="${tmp_builddir}/platforms.json" -yaml2json "${configdir}/platforms.yaml" "${platforms_json}" - # In the jenkins pipelines we build the qemu image first and that operation # will do a lot of the same work required for later artifacts (metal, metal4k, etc) # so we want the cached output from that run to persist. The later artifacts get # built in parallel, so we need to be able to access the cache by multiple processes, # so for those we'll set `snapshot=on` so that each will get their own disk image. # This is OK because we don't checkpoint (cache) any of those stages. -[ "${image_type}" == "qemu" ] && snapshot="off" || snapshot="on" +[ "${platform}" == "qemu" ] && snapshot="off" || snapshot="on" +outdir=$(mktemp -p "${tmp_builddir}" -d) runvm_with_cache_snapshot "$snapshot" -- /usr/lib/coreos-assembler/runvm-osbuild \ - --config "${image_for_disk_json}" \ + --config "${builddir}/runvm-osbuild-config.json" \ --mpp "/usr/lib/coreos-assembler/osbuild-manifests/coreos.osbuild.${basearch}.mpp.yaml" \ - --filepath "${imgpath}" + --outdir "${outdir}" \ + --platform "${platform}" + +mv "${outdir}/${platform}/${platform}" "${imgpath}" -if [[ "${image_type}" == "qemu-secex" ]]; then +if [[ "${platform}" == "qemu-secex" ]]; then if [ ! -f "${genprotimgvm}" ]; then fatal "No genprotimgvm provided at ${genprotimgvm}" fi # Basic qemu args: qemu_args=(); blk_size="512" - [[ $image_type == metal4k ]] && blk_size="4096" + [[ $platform == metal4k ]] && blk_size="4096" qemu_args+=("-drive" "if=none,id=target,format=${image_format},file=${imgpath},cache=unsafe" \ "-device" "virtio-blk,serial=target,drive=target,physical_block_size=${blk_size},logical_block_size=${blk_size}") @@ -242,7 +158,9 @@ if [[ "${image_type}" == "qemu-secex" ]]; then genprotimg_dir=$(mktemp -p "${tmp_builddir}" -d) cp "${se_script_dir}/genprotimg-script.sh" "${se_script_dir}/post-script.sh" "${genprotimg_dir}" # Extra kargs with dm-verity hashes - secex_kargs="ignition.firstboot rootfs.roothash=$(<"${PWD}"/rootfs_hash) bootfs.roothash=$(<"${PWD}"/bootfs_hash)" + secex_kargs="ignition.firstboot" + secex_kargs+=" rootfs.roothash=$(<"${outdir}/${platform}/rootfs_hash")" + secex_kargs+=" bootfs.roothash=$(<"${outdir}/${platform}/bootfs_hash")" echo "${secex_kargs}" > "${genprotimg_dir}/parmfile" virt-make-fs --format=raw --type=ext4 "${genprotimg_dir}" "${genprotimg_img}" rm -rf "${genprotimg_dir}" @@ -271,7 +189,7 @@ sha256=$(sha256sum_str < "${imgpath}") cosa meta --workdir "${workdir}" --build "${build}" --dump | python3 -c " import sys, json j = json.load(sys.stdin) -j['images']['${image_type}'] = { +j['images']['${platform}'] = { 'path': '${imgname}', 'sha256': '${sha256}', 'size': $(stat -c '%s' "${imgpath}") @@ -298,7 +216,7 @@ json.dump(j, sys.stdout, indent=4) fi # and now the crucial bits -cosa meta --workdir "${workdir}" --build "${build}" --artifact "${image_type}" --artifact-json "$(readlink -f meta.json.new)" +cosa meta --workdir "${workdir}" --build "${build}" --artifact "${platform}" --artifact-json "$(readlink -f meta.json.new)" /usr/lib/coreos-assembler/finalize-artifact "${imgpath}" "${builddir}/${imgname}" # Quiet for the rest of this so the last thing we see is a success message diff --git a/src/osbuild-manifests/platform.applehv.ipp.yaml b/src/osbuild-manifests/platform.applehv.ipp.yaml index 29f41b059f..dd22a97dab 100644 --- a/src/osbuild-manifests/platform.applehv.ipp.yaml +++ b/src/osbuild-manifests/platform.applehv.ipp.yaml @@ -65,5 +65,4 @@ pipelines: options: paths: - from: input://tree/disk.img - to: - mpp-format-string: 'tree:///{filename}' + to: tree:///applehv diff --git a/src/osbuild-manifests/platform.gcp.ipp.yaml b/src/osbuild-manifests/platform.gcp.ipp.yaml index b82a150540..eae22c3018 100644 --- a/src/osbuild-manifests/platform.gcp.ipp.yaml +++ b/src/osbuild-manifests/platform.gcp.ipp.yaml @@ -85,5 +85,4 @@ pipelines: name:raw-gcp-image-tar: file: disk.tar options: - filename: - mpp-format-string: '{filename}' + filename: gcp diff --git a/src/osbuild-manifests/platform.hyperv.ipp.yaml b/src/osbuild-manifests/platform.hyperv.ipp.yaml index 2d018b7b7a..c21d94ee02 100644 --- a/src/osbuild-manifests/platform.hyperv.ipp.yaml +++ b/src/osbuild-manifests/platform.hyperv.ipp.yaml @@ -64,7 +64,6 @@ pipelines: name:raw-hyperv-image: file: disk.img options: - filename: - mpp-format-string: '{filename}' + filename: hyperv format: type: vhdx diff --git a/src/osbuild-manifests/platform.metal.ipp.yaml b/src/osbuild-manifests/platform.metal.ipp.yaml index 5da47b35cb..0b0fcb59da 100644 --- a/src/osbuild-manifests/platform.metal.ipp.yaml +++ b/src/osbuild-manifests/platform.metal.ipp.yaml @@ -90,8 +90,7 @@ pipelines: options: paths: - from: input://tree/disk.img - to: - mpp-format-string: 'tree:///{filename}' + to: tree:///metal - name: raw-metal4k-image build: mpp-format-string: '{buildroot}' @@ -184,5 +183,4 @@ pipelines: options: paths: - from: input://tree/disk.img - to: - mpp-format-string: 'tree:///{filename}' + to: tree:///metal4k diff --git a/src/osbuild-manifests/platform.qemu-secex.ipp.yaml b/src/osbuild-manifests/platform.qemu-secex.ipp.yaml index f01a38f13b..eb13279a00 100644 --- a/src/osbuild-manifests/platform.qemu-secex.ipp.yaml +++ b/src/osbuild-manifests/platform.qemu-secex.ipp.yaml @@ -288,8 +288,7 @@ pipelines: name:raw-qemu-secex-image: file: disk.img options: - filename: - mpp-format-string: '{filename}' + filename: qemu-secex format: type: qcow2 compression: false diff --git a/src/osbuild-manifests/platform.qemu.ipp.yaml b/src/osbuild-manifests/platform.qemu.ipp.yaml index 13d0c96b36..c7d8342b1f 100644 --- a/src/osbuild-manifests/platform.qemu.ipp.yaml +++ b/src/osbuild-manifests/platform.qemu.ipp.yaml @@ -93,8 +93,7 @@ pipelines: name:raw-qemu-image: file: disk.img options: - filename: - mpp-format-string: '{filename}' + filename: qemu format: type: qcow2 compression: false diff --git a/src/runvm-osbuild b/src/runvm-osbuild index 4cedd7090f..6aa17213a0 100755 --- a/src/runvm-osbuild +++ b/src/runvm-osbuild @@ -10,7 +10,8 @@ Options: --config: JSON-formatted image.yaml --help: show this help --mpp: the path to the OSBuild mpp.yaml file - --filepath: where to write the created image file + --outdir: where to write the created files + --platform: the platform to generate an artifact for You probably don't want to run this script by hand. This script is run as part of 'coreos-assembler build'. @@ -38,17 +39,14 @@ do --config) config="${1}"; shift;; --help) usage; exit;; --mpp) mppyaml="${1}"; shift;; - --filepath) filepath="${1}"; shift;; + --outdir) outdir="${1}"; shift;; + --platform) platform="${1}"; shift;; *) echo "${flag} is not understood."; usage; exit 10;; esac; done -# Get the base filename of the desired file output path -filename=$(basename "$filepath") - ostree_container=$(getconfig "ostree-container") osname=$(getconfig "osname") -platform=$(getconfig "image-type") deploy_via_container=$(getconfig_def "deploy-via-container" "") metal_image_size_mb=$(getconfig "metal-image-size") cloud_image_size_mb=$(getconfig "cloud-image-size") @@ -69,12 +67,20 @@ fi # Since it doesn't exist create loop-control [ ! -e /dev/loop-control ] && mknod /dev/loop-control c 10 237 -# Put the store and the output dir on the cache. At the end we'll mv -# out the created artifact from the output dir to the place it's supposed -# to go. -outdir=cache/osbuild/out +# Put the store on the cache filesystem since in the case we are +# running unprivileged in COSA we won't be able to create files +# with SELinux labels that the host's policy doesn't know about. storedir=cache/osbuild/store +# Musical chairs for now with the outdir because osbuild does +# cp -a and that will fail to preserve ownership when copying +# from the cache qcow2 (where files are owned by root) to the +# virtiofs mount. Eventually we can use idmapped virtiofs mount +# inside the supermin VM and we won't need this. +# https://gitlab.com/virtio-fs/virtiofsd/-/merge_requests/245 +orig_outdir=$outdir +outdir=cache/osbuild/out + processed_json=$(mktemp -t osbuild-XXXX.json) # Run `podman images` here to initialize a few directories inside the @@ -89,7 +95,6 @@ osbuild-mpp \ -D arch=\""$(arch)"\" \ -D ostree_ref=\""${ostree_ref}"\" \ -D ostree_repo=\""${ostree_repo}"\" \ - -D filename=\""${filename}"\" \ -D ociarchive=\""${ostree_container}"\" \ -D osname=\""${osname}"\" \ -D container_imgref=\""${container_imgref}"\" \ @@ -111,16 +116,5 @@ osbuild \ --checkpoint raw-image \ --export "$platform" "${processed_json}" - -# Copy it out to the specified location. Use mv here so we remove it -# from the cache qcow2 so we don't cache it. -mv "${outdir}/${platform}/${filename}" "${filepath}" - -# In case of IBM Secure Execution there are more artifacts -if [ "${platform}" == 'qemu-secex' ]; then - dir=$(dirname "${filepath}") - mv "${outdir}/${platform}/bootfs_hash" "${dir}" - mv "${outdir}/${platform}/rootfs_hash" "${dir}" -fi - +mv "${outdir}"/* "${orig_outdir}/" rm -f "${processed_json}"