diff --git a/src/cmd-build-with-buildah b/src/cmd-build-with-buildah index ec6fdbf941..aabf2e9070 100755 --- a/src/cmd-build-with-buildah +++ b/src/cmd-build-with-buildah @@ -101,8 +101,6 @@ build_with_buildah() { # This is analogous to the chmod we do in cmdlib.sh in the legacy path. chmod -R gu-s "${tempdir}/src" - tmp_oci_archive_path=$(realpath "${tempdir}/out.ociarchive") - initconfig="src/config.json" if [ -f "${initconfig}" ]; then variant="$(jq --raw-output '."coreos-assembler.config-variant"' "${initconfig}")" @@ -141,6 +139,7 @@ build_with_buildah() { # For the source: check if there's only one remote, if so use it with get-url # For revision: rev-parse set -- build --security-opt=label=disable --cap-add=all --device /dev/fuse \ + --pull=newer --layers=true \ --build-arg-file "$argsfile" -v "$(realpath "${tempdir}/src")":/run/src \ --build-arg VERSION="${VERSION}" \ --label org.opencontainers.image.source="${source}" \ @@ -162,40 +161,45 @@ build_with_buildah() { fi if [ -d overrides ]; then - if [[ -n $(ls overrides/rpm/*.rpm 2> /dev/null) ]]; then - (cd overrides/rpm && rm -rf .repodata && createrepo_c .) + if [ -d overrides/rpm ]; then + # Clean up any previous repo metadata + rm -rf overrides/rpm/repodata + if [[ -n $(ls overrides/rpm/*.rpm 2> /dev/null) ]]; then + # Generate new repo metadata since there are RPMs + (cd overrides/rpm && createrepo_c .) + fi fi set -- "$@" -v "$(realpath overrides)":/run/src/overrides fi - if [ -n "$DIRECT" ]; then - # turn on layer caching in the direct case; it wouldn't hurt in the - # supermin path, but it'd be a waste of space on the rootfs - set -- "$@" --layers=true - # output to a tag since it's more convenient for development; - # buildah doesn't support doing both at once - # shellcheck disable=SC1090 - osname=$(source "src/config/${argsfile}"; echo "${NAME}") - final_ref="containers-storage:localhost/${osname}:${VERSION}" - else - # In the supermin path ensure the ociarchive gets compressed - set -- "$@" --disable-compression=false - final_ref="oci-archive:${tmp_oci_archive_path}" - fi + # We'll also copy to an intermediate ociarchive file before + # passing that ociarchive to cosa import + tmp_oci_archive="oci-archive:$(realpath "${tempdir}/out.ociarchive")" - # and finally, add the tag and context dir - set -- "$@" -t "${final_ref}" . + # Set the output tag to be something unique + # shellcheck disable=SC1090 + osname=$(source "src/config/${argsfile}"; echo "${NAME}") + final_ref="containers-storage:localhost/${osname}:${VERSION}" + # and add the unique tag and context dir to the command + set -- "$@" --tag "${final_ref}" . echo "Running:" buildah "$@" if [ -n "$DIRECT" ]; then - env -C "${tempdir}/src" buildah "$@" + cmd="bash" else - /usr/lib/coreos-assembler/cmd-supermin-run --cache \ - env -C "${tempdir}/src" TMPDIR="$(realpath cache)" buildah "$@" + cmd="/usr/lib/coreos-assembler/cmd-supermin-run --cache" fi + cat < "${tempdir}/build-with-buildah-script.sh" + set -euxo pipefail + env -C ${tempdir}/src TMPDIR=$(realpath cache) buildah $@ + skopeo copy --quiet "${final_ref}" "${tmp_oci_archive}" +EOF + chmod +x "${tempdir}/build-with-buildah-script.sh" + $cmd "${tempdir}/build-with-buildah-script.sh" - /usr/lib/coreos-assembler/cmd-import "${final_ref}" ${SKIP_PRUNE:+--skip-prune} - + # Finally import the ociarchive + /usr/lib/coreos-assembler/cmd-import \ + "${tmp_oci_archive}" ${SKIP_PRUNE:+--skip-prune} rm -rf "${tempdir}" } diff --git a/src/cmd-import b/src/cmd-import index 2d8bc1368c..1bf7aef3a4 100755 --- a/src/cmd-import +++ b/src/cmd-import @@ -47,7 +47,7 @@ def main(): tmp_oci_manifest = generate_oci_manifest(args, tmpd) # import into the tmp/repo to get the ostree-commit but also so it's cached - ostree_commit = import_oci_archive(tmpd, tmp_oci_archive, buildid) + ostree_commit = import_oci_archive(tmpd, 'tmp/repo', tmp_oci_archive, buildid) # artificially recreate generated lockfile tmp_lockfile = generate_lockfile(tmpd, ostree_commit) diff --git a/src/cmdlib.sh b/src/cmdlib.sh index 34e331b685..744de9f0a1 100755 --- a/src/cmdlib.sh +++ b/src/cmdlib.sh @@ -613,7 +613,7 @@ runcompose_tree() { # Run with cache disk. runvm_with_cache() { - local cache_size=${RUNVM_CACHE_SIZE:-45G} + local cache_size=${RUNVM_CACHE_SIZE:-50G} # "cache2" has an explicit label so we can find it in qemu easily if [ ! -f "${workdir}"/cache/cache2.qcow2 ]; then qemu-img create -f qcow2 cache2.qcow2.tmp "$cache_size" @@ -773,7 +773,7 @@ if [ -n "\${cachedev}" ]; then mount -o remount,ro ${workdir}/cache fsfreeze -f ${workdir}/cache fsfreeze -u ${workdir}/cache - umount ${workdir}/cache + umount -R ${workdir}/cache fi umount ${workdir} /sbin/reboot -f diff --git a/src/cosalib/cmdlib.py b/src/cosalib/cmdlib.py index 31e7e11db5..0bb9a4a0c4 100644 --- a/src/cosalib/cmdlib.py +++ b/src/cosalib/cmdlib.py @@ -337,7 +337,7 @@ def import_ostree_commit(workdir, buildpath, buildmeta, extract_json=True, parti if was_oci_imported: # This was initially imported using `cosa import`. Go through that # path again because it's not an encapsulated commit. - import_oci_archive(tmpdir, tarfile, buildmeta['buildid']) + import_oci_archive(tmpdir, repo, tarfile, buildmeta['buildid']) elif os.environ.get('COSA_PRIVILEGED', '') == '1': build_repo = os.path.join(repo, '../../cache/repo-build') # note: this actually is the same as `container unencapsulate` and @@ -362,46 +362,46 @@ def import_ostree_commit(workdir, buildpath, buildmeta, extract_json=True, parti extract_image_json(workdir, commit) -def import_oci_archive(parent_tmpd, ociarchive, ref): +def import_oci_archive(parent_tmpd, repo, ociarchive, ref): ''' - Imports layered/non-encapsulated OCI archive into the tmp/repo. Returns - the OSTree commit that was imported. + Imports layered/non-encapsulated OCI archive into the repo + (usually tmp/repo). Returns the OSTree commit that was imported. ''' - with tempfile.TemporaryDirectory(dir=parent_tmpd) as tmpd: - subprocess.check_call(['ostree', 'init', '--repo', tmpd, '--mode=bare-user']) + with tempfile.TemporaryDirectory(dir=parent_tmpd) as tmprepo: + subprocess.check_call(['ostree', 'init', '--repo', tmprepo, '--mode=bare-user']) # Init tmp/repo in case it doesn't exist. # If it exists, no problem. It's idempotent - subprocess.check_call(['ostree', 'init', '--repo', 'tmp/repo', '--mode=archive']) + subprocess.check_call(['ostree', 'init', '--repo', repo, '--mode=archive']) # import all the blob refs for more efficient import into bare-user repo - blob_refs = subprocess.check_output(['ostree', 'refs', '--repo', 'tmp/repo', + blob_refs = subprocess.check_output(['ostree', 'refs', '--repo', repo, '--list', 'ostree/container/blob'], encoding='utf-8').splitlines() if len(blob_refs) > 0: - subprocess.check_call(['ostree', 'pull-local', '--repo', tmpd, 'tmp/repo'] + blob_refs) + subprocess.check_call(['ostree', 'pull-local', '--repo', tmprepo, repo] + blob_refs) - subprocess.check_call(['ostree', 'container', 'image', 'pull', tmpd, + subprocess.check_call(['ostree', 'container', 'image', 'pull', tmprepo, f'ostree-unverified-image:oci-archive:{ociarchive}']) # awkwardly work around the fact that there is no --write-ref equivalent # XXX: we can make this better once we can rely on --ostree-digestfile # https://github.com/bootc-dev/bootc/pull/1421 - refs = subprocess.check_output(['ostree', 'refs', '--repo', tmpd, + refs = subprocess.check_output(['ostree', 'refs', '--repo', tmprepo, '--list', 'ostree/container/image'], encoding='utf-8').splitlines() assert len(refs) == 1 - subprocess.check_call(['ostree', 'refs', '--repo', tmpd, refs[0], '--create', ref]) - subprocess.check_call(['ostree', 'refs', '--repo', 'tmp/repo', ref, '--delete']) - subprocess.check_call(['ostree', 'pull-local', '--repo', 'tmp/repo', tmpd, ref]) + subprocess.check_call(['ostree', 'refs', '--repo', tmprepo, refs[0], '--create', ref]) + subprocess.check_call(['ostree', 'refs', '--repo', repo, ref, '--delete']) + subprocess.check_call(['ostree', 'pull-local', '--repo', repo, tmprepo, ref]) # export back all the blob refs for more efficient imports of next builds - blob_refs = subprocess.check_output(['ostree', 'refs', '--repo', tmpd, + blob_refs = subprocess.check_output(['ostree', 'refs', '--repo', tmprepo, '--list', 'ostree/container/blob'], encoding='utf-8').splitlines() - subprocess.check_call(['ostree', 'pull-local', '--repo', 'tmp/repo', tmpd] + blob_refs) + subprocess.check_call(['ostree', 'pull-local', '--repo', repo, tmprepo] + blob_refs) - ostree_commit = subprocess.check_output(['ostree', 'rev-parse', '--repo', 'tmp/repo', ref], encoding='utf-8').strip() + ostree_commit = subprocess.check_output(['ostree', 'rev-parse', '--repo', repo, ref], encoding='utf-8').strip() return ostree_commit diff --git a/src/supermin-init-prelude.sh b/src/supermin-init-prelude.sh index 0bb131d539..e17f7ff542 100644 --- a/src/supermin-init-prelude.sh +++ b/src/supermin-init-prelude.sh @@ -55,6 +55,20 @@ done cachedev=$(blkid -lt LABEL=cosa-cache -o device || true) if [ -n "${cachedev}" ]; then mount "${cachedev}" "${workdir}"/cache + # Also set up container storage on the cache. We use a symlink + # rather than configuring graphroot in containers/storage.conf + # because when osbuild runs it will use the /etc/containers/storage.conf + # from the host (if using host as buildroot) and then will run out + # of space in "${workdir}"/cache/cache-containers-storage inside + # the bwrap environment. Doing it with a symlink means we can + # still use the cache from the host, but inside osbuild it will + # just get a blank /var/lib/containers to operate on. + mkdir -p "${workdir}"/cache/cache-containers-storage + rm -rf /var/lib/containers + ln -s "${workdir}"/cache/cache-containers-storage /var/lib/containers + # Prune all containers and images more than a few days old. Our + # inputs here change daily so this should be reasonable. + podman system prune --all --force --filter until=72h else echo "No cosa-cache filesystem found!" fi