diff --git a/cmd/list.go b/cmd/list.go index d3e9acca..a8f9413e 100644 --- a/cmd/list.go +++ b/cmd/list.go @@ -3,9 +3,9 @@ package cmd import ( "os" + "gitlab.com/bootc-org/podman-bootc/pkg/cache" "gitlab.com/bootc-org/podman-bootc/pkg/config" "gitlab.com/bootc-org/podman-bootc/pkg/user" - "gitlab.com/bootc-org/podman-bootc/pkg/utils" "gitlab.com/bootc-org/podman-bootc/pkg/vm" "github.com/containers/common/pkg/report" @@ -79,24 +79,34 @@ func CollectVmList(user user.User, libvirtUri string) (vmList []vm.BootcVMConfig return vmList, nil } -func getVMInfo(user user.User, libvirtUri string, imageId string) (*vm.BootcVMConfig, error) { +func getVMInfo(user user.User, libvirtUri string, fullImageId string) (*vm.BootcVMConfig, error) { + cacheDir, err := cache.NewCache(fullImageId, user) + if err != nil { + return nil, err + } + err = cacheDir.Lock(cache.Shared) + if err != nil { + return nil, err + } + + defer func() { + if err := cacheDir.Unlock(); err != nil { + logrus.Warningf("unable to unlock VM %s: %v", fullImageId, err) + } + }() + bootcVM, err := vm.NewVM(vm.NewVMParameters{ - ImageID: imageId, + ImageID: fullImageId, User: user, LibvirtUri: libvirtUri, - Locking: utils.Shared, }) if err != nil { return nil, err } - // Let's be explicit instead of relying on the defer exec order defer func() { bootcVM.CloseConnection() - if err := bootcVM.Unlock(); err != nil { - logrus.Warningf("unable to unlock VM %s: %v", imageId, err) - } }() cfg, err := bootcVM.GetConfig() diff --git a/cmd/rm.go b/cmd/rm.go index 90c0ceeb..3fe206d6 100644 --- a/cmd/rm.go +++ b/cmd/rm.go @@ -4,6 +4,7 @@ import ( "fmt" "os" + "gitlab.com/bootc-org/podman-bootc/pkg/cache" "gitlab.com/bootc-org/podman-bootc/pkg/config" "gitlab.com/bootc-org/podman-bootc/pkg/user" "gitlab.com/bootc-org/podman-bootc/pkg/utils" @@ -44,6 +45,7 @@ func oneOrAll() cobra.PositionalArgs { } func doRemove(_ *cobra.Command, args []string) error { + if removeAll { return pruneAll() } @@ -57,20 +59,33 @@ func prune(id string) error { return err } + // take an exclusive lock on the cache directory + fullImageId, err := utils.FullImageIdFromPartial(id, user) + if err != nil { + return err + } + cacheDir, err := cache.NewCache(fullImageId, user) + if err != nil { + return err + } + err = cacheDir.Lock(cache.Exclusive) + if err != nil { + return err + } + bootcVM, err := vm.NewVM(vm.NewVMParameters{ ImageID: id, LibvirtUri: config.LibvirtUri, User: user, - Locking: utils.Exclusive, }) if err != nil { return fmt.Errorf("unable to get VM %s: %v", id, err) } - // Let's be explicit instead of relying on the defer exec order defer func() { + // Let's be explicit instead of relying on the defer exec order bootcVM.CloseConnection() - if err := bootcVM.Unlock(); err != nil { + if err := cacheDir.Unlock(); err != nil { logrus.Warningf("unable to unlock VM %s: %v", id, err) } }() diff --git a/cmd/run.go b/cmd/run.go index 6268e843..75c5f05d 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -9,7 +9,9 @@ import ( "time" "gitlab.com/bootc-org/podman-bootc/pkg/bootc" + "gitlab.com/bootc-org/podman-bootc/pkg/cache" "gitlab.com/bootc-org/podman-bootc/pkg/config" + "gitlab.com/bootc-org/podman-bootc/pkg/container" "gitlab.com/bootc-org/podman-bootc/pkg/user" "gitlab.com/bootc-org/podman-bootc/pkg/utils" "gitlab.com/bootc-org/podman-bootc/pkg/vm" @@ -100,41 +102,77 @@ func doRun(flags *cobra.Command, args []string) error { return err } - // create the disk image - idOrName := args[0] - bootcDisk := bootc.NewBootcDisk(idOrName, ctx, user) - err = bootcDisk.Install(vmConfig.Quiet, diskImageConfigInstance) - + // pull the container image + containerImage := container.NewContainerImage(args[0], ctx) + err = containerImage.Pull() if err != nil { - return fmt.Errorf("unable to install bootc image: %w", err) + return fmt.Errorf("unable to pull container image: %w", err) } - //start the VM - println("Booting the VM...") - sshPort, err := utils.GetFreeLocalTcpPort() + // create the cacheDir directory + cacheDir, err := cache.NewCache(containerImage.GetId(), user) if err != nil { - return fmt.Errorf("unable to get free port for SSH: %w", err) + return fmt.Errorf("unable to create cache: %w", err) + } + err = cacheDir.Lock(cache.Exclusive) + if err != nil { + return err + } + err = cacheDir.Create() + if err != nil { + return fmt.Errorf("unable to create cache: %w", err) } + // check if the vm is already running bootcVM, err := vm.NewVM(vm.NewVMParameters{ - ImageID: bootcDisk.GetImageId(), + ImageID: containerImage.GetId(), User: user, LibvirtUri: config.LibvirtUri, - Locking: utils.Shared, }) if err != nil { return fmt.Errorf("unable to initialize VM: %w", err) } - // Let's be explicit instead of relying on the defer exec order defer func() { + // Let's be explicit instead of relying on the defer exec order bootcVM.CloseConnection() - if err := bootcVM.Unlock(); err != nil { - logrus.Warningf("unable to unlock VM %s: %v", bootcDisk.GetImageId(), err) + if err := cacheDir.Unlock(); err != nil { + logrus.Warningf("unable to unlock cache %s: %v", cacheDir.ImageId, err) } }() + isRunning, err := bootcVM.IsRunning() + if err != nil { + return fmt.Errorf("unable to check if VM is running: %w", err) + } + if isRunning { + return fmt.Errorf("VM already running, use the ssh command to connect to it") + } + + // if any of these parameters are set, we need to rebuild the disk image if one exists + bustCache := false + if diskImageConfigInstance.DiskSize != "" || + diskImageConfigInstance.RootSizeMax != "" || + diskImageConfigInstance.Filesystem != "" { + bustCache = true + } + + // create the disk image + bootcDisk := bootc.NewBootcDisk(containerImage, ctx, user, cacheDir, bustCache) + err = bootcDisk.Install(vmConfig.Quiet, diskImageConfigInstance) + + if err != nil { + return fmt.Errorf("unable to install bootc image: %w", err) + } + + //start the VM + println("Booting the VM...") + sshPort, err := utils.GetFreeLocalTcpPort() + if err != nil { + return fmt.Errorf("unable to get free port for SSH: %w", err) + } + cmd := args[1:] err = bootcVM.Run(vm.RunVMParameters{ Cmd: cmd, @@ -153,7 +191,21 @@ func doRun(flags *cobra.Command, args []string) error { } // write down the config file - if err = bootcVM.WriteConfig(*bootcDisk); err != nil { + if err = bootcVM.WriteConfig(*bootcDisk, containerImage); err != nil { + return err + } + + // done modifying the cache, so remove the Exclusive lock + err = cacheDir.Unlock() + if err != nil { + return fmt.Errorf("unable to unlock cache: %w", err) + } + + // take a RO lock for the SSH connection + // it will be unlocked at the end of this function + // by the previous defer() + err = cacheDir.Lock(cache.Shared) + if err != nil { return err } @@ -198,6 +250,20 @@ func doRun(flags *cobra.Command, args []string) error { // Always remove when executing a command if vmConfig.RemoveVm || len(cmd) > 0 { + // remove the RO lock + err = cacheDir.Unlock() + if err != nil { + return err + } + + // take an exclusive lock to remove the VM + // it will be unlocked at the end of this function + // by the previous defer() + err = cacheDir.Lock(cache.Exclusive) + if err != nil { + return err + } + err = bootcVM.Delete() //delete the VM, but keep the disk image if err != nil { return fmt.Errorf("unable to remove VM from cache: %w", err) diff --git a/cmd/ssh.go b/cmd/ssh.go index a1fa02aa..168ba8aa 100644 --- a/cmd/ssh.go +++ b/cmd/ssh.go @@ -1,6 +1,7 @@ package cmd import ( + "gitlab.com/bootc-org/podman-bootc/pkg/cache" "gitlab.com/bootc-org/podman-bootc/pkg/config" "gitlab.com/bootc-org/podman-bootc/pkg/user" "gitlab.com/bootc-org/podman-bootc/pkg/utils" @@ -32,21 +33,34 @@ func doSsh(_ *cobra.Command, args []string) error { id := args[0] + //take a read only lock on the cache directory + fullImageId, err := utils.FullImageIdFromPartial(id, user) + if err != nil { + return err + } + cacheDir, err := cache.NewCache(fullImageId, user) + if err != nil { + return err + } + err = cacheDir.Lock(cache.Shared) + if err != nil { + return err + } + vm, err := vm.NewVM(vm.NewVMParameters{ ImageID: id, User: user, LibvirtUri: config.LibvirtUri, - Locking: utils.Shared, }) if err != nil { return err } - // Let's be explicit instead of relying on the defer exec order defer func() { + // Let's be explicit instead of relying on the defer exec order vm.CloseConnection() - if err := vm.Unlock(); err != nil { + if err := cacheDir.Unlock(); err != nil { logrus.Warningf("unable to unlock VM %s: %v", id, err) } }() diff --git a/cmd/stop.go b/cmd/stop.go index 7e10f7dd..e3536a86 100644 --- a/cmd/stop.go +++ b/cmd/stop.go @@ -1,6 +1,7 @@ package cmd import ( + "gitlab.com/bootc-org/podman-bootc/pkg/cache" "gitlab.com/bootc-org/podman-bootc/pkg/config" "gitlab.com/bootc-org/podman-bootc/pkg/user" "gitlab.com/bootc-org/podman-bootc/pkg/utils" @@ -23,26 +24,40 @@ func init() { } func doStop(_ *cobra.Command, args []string) (err error) { + id := args[0] + user, err := user.NewUser() if err != nil { return err } - id := args[0] + //take an exclusive lock on the cache directory + fullImageId, err := utils.FullImageIdFromPartial(id, user) + if err != nil { + return err + } + cacheDir, err := cache.NewCache(fullImageId, user) + if err != nil { + return err + } + err = cacheDir.Lock(cache.Exclusive) + if err != nil { + return err + } + bootcVM, err := vm.NewVM(vm.NewVMParameters{ ImageID: id, LibvirtUri: config.LibvirtUri, User: user, - Locking: utils.Exclusive, }) if err != nil { return err } - // Let's be explicit instead of relying on the defer exec order defer func() { + // Let's be explicit instead of relying on the defer exec order bootcVM.CloseConnection() - if err := bootcVM.Unlock(); err != nil { + if err := cacheDir.Unlock(); err != nil { logrus.Warningf("unable to unlock VM %s: %v", id, err) } }() diff --git a/pkg/bootc/bootc_disk.go b/pkg/bootc/bootc_disk.go index 73262de4..47cc39e9 100644 --- a/pkg/bootc/bootc_disk.go +++ b/pkg/bootc/bootc_disk.go @@ -13,12 +13,11 @@ import ( "syscall" "time" - "gitlab.com/bootc-org/podman-bootc/pkg/config" + "gitlab.com/bootc-org/podman-bootc/pkg/cache" + "gitlab.com/bootc-org/podman-bootc/pkg/container" "gitlab.com/bootc-org/podman-bootc/pkg/user" - "gitlab.com/bootc-org/podman-bootc/pkg/utils" "github.com/containers/podman/v5/pkg/bindings/containers" - "github.com/containers/podman/v5/pkg/bindings/images" "github.com/containers/podman/v5/pkg/domain/entities/types" "github.com/containers/podman/v5/pkg/specgen" "github.com/docker/go-units" @@ -62,16 +61,14 @@ type diskFromContainerMeta struct { } type BootcDisk struct { - ImageNameOrId string + ContainerImage container.ContainerImage + Cache cache.Cache User user.User Ctx context.Context - ImageId string - imageData *types.ImageInspectReport - RepoTag string CreatedAt time.Time - Directory string file *os.File bootcInstallContainerId string + bustCache bool } // create singleton for easy cleanup @@ -80,40 +77,37 @@ var ( instanceOnce sync.Once ) -func NewBootcDisk(imageNameOrId string, ctx context.Context, user user.User) *BootcDisk { +// NewBootcDisk creates a new BootcDisk instance +// +// Parameters: +// - imageNameOrId: the name or id of the container image +// - ctx: context for the podman machine connection +// - user: the user who is running the command, determines where the disk image is stored +// - cache: the cache to use for storing the disk image +// - bustCache: whether to force a new disk image to be created +func NewBootcDisk(containerImage container.ContainerImage, ctx context.Context, user user.User, cache cache.Cache, bustCache bool) *BootcDisk { instanceOnce.Do(func() { instance = &BootcDisk{ - ImageNameOrId: imageNameOrId, - Ctx: ctx, - User: user, + ContainerImage: containerImage, + Ctx: ctx, + User: user, + Cache: cache, + bustCache: bustCache, } }) return instance } -func (p *BootcDisk) GetDirectory() string { - return p.Directory -} - -func (p *BootcDisk) GetImageId() string { - return p.ImageId -} - // GetSize returns the virtual size of the disk in bytes; // this may be larger than the actual disk usage func (p *BootcDisk) GetSize() (int64, error) { - st, err := os.Stat(filepath.Join(p.Directory, config.DiskImage)) + st, err := os.Stat(p.Cache.GetDiskPath()) if err != nil { return 0, err } return st.Size(), nil } -// GetRepoTag returns the repository of the container image -func (p *BootcDisk) GetRepoTag() string { - return p.RepoTag -} - // GetCreatedAt returns the creation time of the disk image func (p *BootcDisk) GetCreatedAt() time.Time { return p.CreatedAt @@ -122,32 +116,6 @@ func (p *BootcDisk) GetCreatedAt() time.Time { func (p *BootcDisk) Install(quiet bool, config DiskImageConfig) (err error) { p.CreatedAt = time.Now() - err = p.pullImage() - if err != nil { - return - } - - // Create VM cache dir; one per oci bootc image - p.Directory = filepath.Join(p.User.CacheDir(), p.ImageId) - lock := utils.NewCacheLock(p.User.RunDir(), p.Directory) - locked, err := lock.TryLock(utils.Exclusive) - if err != nil { - return fmt.Errorf("error locking the VM cache path: %w", err) - } - if !locked { - return fmt.Errorf("unable to lock the VM cache path") - } - - defer func() { - if err := lock.Unlock(); err != nil { - logrus.Errorf("unable to unlock VM %s: %v", p.ImageId, err) - } - }() - - if err := os.MkdirAll(p.Directory, os.ModePerm); err != nil { - return fmt.Errorf("error while making bootc disk directory: %w", err) - } - err = p.getOrInstallImageToDisk(quiet, config) if err != nil { return @@ -173,7 +141,7 @@ func (p *BootcDisk) Cleanup() (err error) { // getOrInstallImageToDisk checks if the disk is present and if not, installs the image to a new disk func (p *BootcDisk) getOrInstallImageToDisk(quiet bool, diskConfig DiskImageConfig) error { - diskPath := filepath.Join(p.Directory, config.DiskImage) + diskPath := p.Cache.GetDiskPath() f, err := os.Open(diskPath) if err != nil { if !errors.Is(err, os.ErrNotExist) { @@ -182,13 +150,25 @@ func (p *BootcDisk) getOrInstallImageToDisk(quiet bool, diskConfig DiskImageConf logrus.Debugf("No existing disk image found") return p.bootcInstallImageToDisk(quiet, diskConfig) } + if p.bustCache { + logrus.Debug("Found existing disk image but cache busting is enabled, removing and recreating") + err = os.Remove(diskPath) + if err != nil { + return err + } + return p.bootcInstallImageToDisk(quiet, diskConfig) + } + logrus.Debug("Found existing disk image, comparing digest") defer f.Close() buf := make([]byte, 4096) len, err := unix.Fgetxattr(int(f.Fd()), imageMetaXattr, buf) if err != nil { // If there's no xattr, just remove it - os.Remove(diskPath) + err = os.Remove(diskPath) + if err != nil { + return err + } logrus.Debugf("No %s xattr found", imageMetaXattr) return p.bootcInstallImageToDisk(quiet, diskConfig) } @@ -199,8 +179,8 @@ func (p *BootcDisk) getOrInstallImageToDisk(quiet bool, diskConfig DiskImageConf return p.bootcInstallImageToDisk(quiet, diskConfig) } - logrus.Debugf("previous disk digest: %s current digest: %s", serializedMeta.ImageDigest, p.ImageId) - if serializedMeta.ImageDigest == p.ImageId { + logrus.Debugf("previous disk digest: %s current digest: %s", serializedMeta.ImageDigest, p.ContainerImage.GetId()) + if serializedMeta.ImageDigest == p.ContainerImage.GetId() { return nil } @@ -217,12 +197,12 @@ func align(size int64, align int64) int64 { // bootcInstallImageToDisk creates a disk image from a bootc container func (p *BootcDisk) bootcInstallImageToDisk(quiet bool, diskConfig DiskImageConfig) (err error) { - fmt.Printf("Executing `bootc install to-disk` from container image %s to create disk image\n", p.RepoTag) - p.file, err = os.CreateTemp(p.Directory, "podman-bootc-tempdisk") + fmt.Printf("Executing `bootc install to-disk` from container image %s to create disk image\n", p.ContainerImage.GetRepoTag()) + p.file, err = os.CreateTemp(p.Cache.GetDirectory(), "podman-bootc-tempdisk") if err != nil { return err } - size := p.imageData.Size * containerSizeToDiskSizeMultiplier + size := p.ContainerImage.GetSize() * containerSizeToDiskSizeMultiplier if size < diskSizeMinimum { size = diskSizeMinimum } @@ -237,7 +217,7 @@ func (p *BootcDisk) bootcInstallImageToDisk(quiet bool, diskConfig DiskImageConf } // Round up to 4k; loopback wants at least 512b alignment size = align(size, 4096) - humanContainerSize := units.HumanSize(float64(p.imageData.Size)) + humanContainerSize := units.HumanSize(float64(p.ContainerImage.GetSize())) humanSize := units.HumanSize(float64(size)) logrus.Infof("container size: %s, disk size: %s", humanContainerSize, humanSize) @@ -257,7 +237,7 @@ func (p *BootcDisk) bootcInstallImageToDisk(quiet bool, diskConfig DiskImageConf return fmt.Errorf("failed to create disk image: %w", err) } serializedMeta := diskFromContainerMeta{ - ImageDigest: p.ImageId, + ImageDigest: p.ContainerImage.GetId(), } buf, err := json.Marshal(serializedMeta) if err != nil { @@ -266,8 +246,8 @@ func (p *BootcDisk) bootcInstallImageToDisk(quiet bool, diskConfig DiskImageConf if err := unix.Fsetxattr(int(p.file.Fd()), imageMetaXattr, buf, 0); err != nil { return fmt.Errorf("failed to set xattr: %w", err) } - diskPath := filepath.Join(p.Directory, config.DiskImage) + diskPath := p.Cache.GetDiskPath() if err := os.Rename(p.file.Name(), diskPath); err != nil { return fmt.Errorf("failed to rename to %s: %w", diskPath, err) } @@ -276,39 +256,10 @@ func (p *BootcDisk) bootcInstallImageToDisk(quiet bool, diskConfig DiskImageConf return nil } -// pullImage fetches the container image if not present -func (p *BootcDisk) pullImage() (err error) { - pullPolicy := "missing" - ids, err := images.Pull(p.Ctx, p.ImageNameOrId, &images.PullOptions{Policy: &pullPolicy}) - if err != nil { - return fmt.Errorf("failed to pull image: %w", err) - } - - if len(ids) == 0 { - return fmt.Errorf("no ids returned from image pull") - } - - if len(ids) > 1 { - return fmt.Errorf("multiple ids returned from image pull") - } - - image, err := images.GetImage(p.Ctx, p.ImageNameOrId, &images.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to get image: %w", err) - } - p.imageData = image - - imageId := ids[0] - p.ImageId = imageId - p.RepoTag = image.RepoTags[0] - - return -} - // runInstallContainer runs the bootc installer in a container to create a disk image func (p *BootcDisk) runInstallContainer(quiet bool, config DiskImageConfig) (err error) { // Create a temporary external shell script with the contents of our losetup wrapper - losetupTemp, err := os.CreateTemp(p.Directory, "losetup-wrapper") + losetupTemp, err := os.CreateTemp(p.Cache.GetDirectory(), "losetup-wrapper") if err != nil { return fmt.Errorf("temp losetup wrapper: %w", err) } @@ -393,7 +344,7 @@ func (p *BootcDisk) createInstallContainer(config DiskImageConfig, tempLosetup s Terminal: &trueDat, }, ContainerStorageConfig: specgen.ContainerStorageConfig{ - Image: p.ImageNameOrId, + Image: p.ContainerImage.ImageNameOrId, Mounts: []specs.Mount{ { Source: "/var/lib/containers", @@ -406,7 +357,7 @@ func (p *BootcDisk) createInstallContainer(config DiskImageConfig, tempLosetup s Type: "bind", }, { - Source: p.Directory, + Source: p.Cache.GetDirectory(), Destination: "/output", Type: "bind", }, diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go new file mode 100644 index 00000000..d3426dd1 --- /dev/null +++ b/pkg/cache/cache.go @@ -0,0 +1,75 @@ +package cache + +import ( + "fmt" + "os" + "path/filepath" + + "gitlab.com/bootc-org/podman-bootc/pkg/user" +) + +// NewCache creates a new cache object +// Parameters: +// - id: the full image ID +// - user: the user who is running the podman-bootc command +func NewCache(id string, user user.User) (cache Cache, err error) { + return Cache{ + ImageId: id, + User: user, + Directory: filepath.Join(user.CacheDir(), id), + }, nil +} + +type Cache struct { + User user.User + ImageId string + Directory string + lock CacheLock +} + +// Exists checks if the cache directory Exists +// returns false if any error occurs while checking +func (p *Cache) Exists() bool { + _, err := os.Stat(p.Directory) + return err == nil +} + +// Create VM cache dir; one per oci bootc image +func (p *Cache) Create() (err error) { + if err := os.MkdirAll(p.Directory, os.ModePerm); err != nil { + return fmt.Errorf("error while making bootc disk directory: %w", err) + } + + return +} + +func (p *Cache) GetDirectory() string { + if !p.Exists() { + panic("cache does not exist") + } + return p.Directory +} + +func (p *Cache) GetDiskPath() string { + if !p.Exists() { + panic("cache does not exist") + } + return filepath.Join(p.GetDirectory(), "disk.raw") +} + +func (p *Cache) Lock(mode AccessMode) error { + p.lock = NewCacheLock(p.User.RunDir(), p.Directory) + locked, err := p.lock.TryLock(mode) + if err != nil { + return fmt.Errorf("error locking the cache path: %w", err) + } + if !locked { + return fmt.Errorf("unable to lock the cache path") + } + + return nil +} + +func (p *Cache) Unlock() error { + return p.lock.Unlock() +} diff --git a/pkg/utils/locks.go b/pkg/cache/lock.go similarity index 98% rename from pkg/utils/locks.go rename to pkg/cache/lock.go index e5c22657..a52e0ffb 100644 --- a/pkg/utils/locks.go +++ b/pkg/cache/lock.go @@ -1,4 +1,4 @@ -package utils +package cache import ( "path/filepath" diff --git a/pkg/container/image.go b/pkg/container/image.go new file mode 100644 index 00000000..5060802e --- /dev/null +++ b/pkg/container/image.go @@ -0,0 +1,76 @@ +package container + +import ( + "context" + "fmt" + + "github.com/containers/podman/v5/pkg/bindings/images" +) + +func NewContainerImage(imageNameOrId string, ctx context.Context) ContainerImage { + return ContainerImage{ + ImageNameOrId: imageNameOrId, + Ctx: ctx, + } +} + +type ContainerImage struct { + ImageNameOrId string + Ctx context.Context + Id string + RepoTag string + Size int64 + Pulled bool +} + +// pullImage fetches the container image if not present +func (p *ContainerImage) Pull() (err error) { + pullPolicy := "missing" + ids, err := images.Pull(p.Ctx, p.ImageNameOrId, &images.PullOptions{Policy: &pullPolicy}) + if err != nil { + return fmt.Errorf("failed to pull image: %w", err) + } + + if len(ids) == 0 { + return fmt.Errorf("no ids returned from image pull") + } + + if len(ids) > 1 { + return fmt.Errorf("multiple ids returned from image pull") + } + + image, err := images.GetImage(p.Ctx, p.ImageNameOrId, &images.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get image: %w", err) + } + p.Size = image.Size + p.Id = ids[0] + p.RepoTag = image.RepoTags[0] + p.Pulled = true + + return +} + +func (p *ContainerImage) GetId() string { + if !p.Pulled { + panic("image not pulled") + } + + return p.Id +} + +func (p *ContainerImage) GetRepoTag() string { + if !p.Pulled { + panic("image not pulled") + } + + return p.RepoTag +} + +func (p *ContainerImage) GetSize() int64 { + if !p.Pulled { + panic("image not pulled") + } + + return p.Size +} diff --git a/pkg/utils/file.go b/pkg/utils/file.go index d76cf0fd..f558b18f 100644 --- a/pkg/utils/file.go +++ b/pkg/utils/file.go @@ -3,8 +3,13 @@ package utils import ( "bytes" "errors" + "fmt" "os" "strconv" + "strings" + + "github.com/sirupsen/logrus" + "gitlab.com/bootc-org/podman-bootc/pkg/user" ) func ReadPidFile(pidFile string) (int, error) { @@ -35,3 +40,29 @@ func FileExists(path string) (bool, error) { } return exists, err } + +// FullImageIdFromPartial returns the full image ID given a partial image ID and the user. +// If the partial image ID is already a full image ID, it is returned as is. +func FullImageIdFromPartial(partialId string, user user.User) (fullImageId string, err error) { + if len(partialId) == 64 { + logrus.Debugf("Partial image ID '%s' is already a full image ID", partialId) + return partialId, nil + } + + files, err := os.ReadDir(user.CacheDir()) + if err != nil { + return "", err + } + + for _, f := range files { + if f.IsDir() && len(f.Name()) == 64 && strings.HasPrefix(f.Name(), partialId) { + fullImageId = f.Name() + } + } + + if fullImageId == "" { + return "", fmt.Errorf("local installation '%s' does not exists", partialId) + } + + return fullImageId, nil +} diff --git a/pkg/vm/vm.go b/pkg/vm/vm.go index c96eb5b1..5e84fe7a 100644 --- a/pkg/vm/vm.go +++ b/pkg/vm/vm.go @@ -9,11 +9,11 @@ import ( "os/exec" "path/filepath" "strconv" - "strings" "time" "gitlab.com/bootc-org/podman-bootc/pkg/bootc" "gitlab.com/bootc-org/podman-bootc/pkg/config" + "gitlab.com/bootc-org/podman-bootc/pkg/container" "gitlab.com/bootc-org/podman-bootc/pkg/user" "gitlab.com/bootc-org/podman-bootc/pkg/utils" @@ -26,20 +26,9 @@ var ErrVMInUse = errors.New("VM already in use") // GetVMCachePath returns the path to the VM cache directory func GetVMCachePath(imageId string, user user.User) (longID string, path string, err error) { - files, err := os.ReadDir(user.CacheDir()) + fullImageId, err := utils.FullImageIdFromPartial(imageId, user) if err != nil { - return "", "", err - } - - fullImageId := "" - for _, f := range files { - if f.IsDir() && len(f.Name()) == 64 && strings.HasPrefix(f.Name(), imageId) { - fullImageId = f.Name() - } - } - - if fullImageId == "" { - return "", "", fmt.Errorf("local installation '%s' does not exists", imageId) + return "", "", fmt.Errorf("error getting full image ID: %w", err) } return fullImageId, filepath.Join(user.CacheDir(), fullImageId), nil @@ -49,7 +38,6 @@ type NewVMParameters struct { ImageID string User user.User //user who is running the podman bootc command LibvirtUri string //linux only - Locking utils.AccessMode } type RunVMParameters struct { @@ -68,7 +56,7 @@ type BootcVM interface { Run(RunVMParameters) error Delete() error IsRunning() (bool, error) - WriteConfig(bootc.BootcDisk) error + WriteConfig(bootc.BootcDisk, container.ContainerImage) error WaitForSSHToBeReady() error RunSSH([]string) error DeleteFromCache() error @@ -76,7 +64,6 @@ type BootcVM interface { GetConfig() (*BootcVMConfig, error) CloseConnection() PrintConsole() error - Unlock() error } type BootcVMCommon struct { @@ -95,7 +82,6 @@ type BootcVMCommon struct { hasCloudInit bool cloudInitDir string cloudInitArgs string - cacheDirLock utils.CacheLock } type BootcVMConfig struct { @@ -109,7 +95,7 @@ type BootcVMConfig struct { } // writeConfig writes the configuration for the VM to the disk -func (v *BootcVMCommon) WriteConfig(bootcDisk bootc.BootcDisk) error { +func (v *BootcVMCommon) WriteConfig(bootcDisk bootc.BootcDisk, containerImage container.ContainerImage) error { size, err := bootcDisk.GetSize() if err != nil { return fmt.Errorf("get disk size: %w", err) @@ -118,7 +104,7 @@ func (v *BootcVMCommon) WriteConfig(bootcDisk bootc.BootcDisk) error { Id: v.imageID[0:12], SshPort: v.sshPort, SshIdentity: v.sshIdentity, - RepoTag: bootcDisk.GetRepoTag(), + RepoTag: containerImage.GetRepoTag(), Created: bootcDisk.GetCreatedAt().Format(time.RFC3339), DiskSize: strconv.FormatInt(size, 10), } @@ -280,31 +266,3 @@ func (b *BootcVMCommon) tmpFileInjectSshKeyEnc() (string, error) { tmpFileCmdEnc := base64.StdEncoding.EncodeToString([]byte(tmpFileCmd)) return tmpFileCmdEnc, nil } - -func lockVM(params NewVMParameters, cacheDir string) (utils.CacheLock, error) { - lock := utils.NewCacheLock(params.User.RunDir(), cacheDir) - locked, err := lock.TryLock(params.Locking) - if err != nil { - return lock, fmt.Errorf("unable to lock the VM cache path: %w", err) - } - - if !locked { - return lock, ErrVMInUse - } - - cacheDirExists, err := utils.FileExists(cacheDir) - if err != nil { - if err := lock.Unlock(); err != nil { - logrus.Debugf("unlock failed: %v", err) - } - return lock, fmt.Errorf("unable to check cache path: %w", err) - } - if !cacheDirExists { - if err := lock.Unlock(); err != nil { - logrus.Debugf("unlock failed: %v", err) - } - return lock, fmt.Errorf("'%s' does not exists", params.ImageID) - } - - return lock, nil -} diff --git a/pkg/vm/vm_darwin.go b/pkg/vm/vm_darwin.go index b381333c..b7ca7e56 100644 --- a/pkg/vm/vm_darwin.go +++ b/pkg/vm/vm_darwin.go @@ -30,11 +30,6 @@ func NewVM(params NewVMParameters) (vm *BootcVMMac, err error) { return nil, fmt.Errorf("unable to get VM cache path: %w", err) } - lock, err := lockVM(params, cacheDir) - if err != nil { - return nil, err - } - vm = &BootcVMMac{ socketFile: filepath.Join(params.User.CacheDir(), longId[:12]+"-console.sock"), BootcVMCommon: BootcVMCommon{ @@ -43,7 +38,6 @@ func NewVM(params NewVMParameters) (vm *BootcVMMac, err error) { diskImagePath: filepath.Join(cacheDir, config.DiskImage), pidFile: filepath.Join(cacheDir, config.RunPidFile), user: params.User, - cacheDirLock: lock, }, } @@ -244,7 +238,3 @@ func getQemuInstallPath() (string, error) { return "", errors.New("QEMU binary not found") } - -func (v *BootcVMMac) Unlock() error { - return v.cacheDirLock.Unlock() -} diff --git a/pkg/vm/vm_linux.go b/pkg/vm/vm_linux.go index 5e8f10c0..56326d26 100644 --- a/pkg/vm/vm_linux.go +++ b/pkg/vm/vm_linux.go @@ -44,11 +44,6 @@ func NewVM(params NewVMParameters) (vm *BootcVMLinux, err error) { return nil, fmt.Errorf("unable to get VM cache path: %w", err) } - lock, err := lockVM(params, cacheDir) - if err != nil { - return nil, err - } - vm = &BootcVMLinux{ libvirtUri: params.LibvirtUri, BootcVMCommon: BootcVMCommon{ @@ -57,15 +52,11 @@ func NewVM(params NewVMParameters) (vm *BootcVMLinux, err error) { cacheDir: cacheDir, diskImagePath: filepath.Join(cacheDir, config.DiskImage), user: params.User, - cacheDirLock: lock, }, } err = vm.loadExistingDomain() if err != nil { - if err := vm.Unlock(); err != nil { - logrus.Debugf("unlock failed: %v", err) - } return vm, fmt.Errorf("unable to load existing libvirt domain: %w", err) } @@ -380,7 +371,3 @@ func (v *BootcVMLinux) IsRunning() (exists bool, err error) { return false, nil } } - -func (v *BootcVMLinux) Unlock() error { - return v.cacheDirLock.Unlock() -} diff --git a/pkg/vm/vm_test.go b/pkg/vm/vm_test.go index a00d52b7..4b128470 100644 --- a/pkg/vm/vm_test.go +++ b/pkg/vm/vm_test.go @@ -12,8 +12,9 @@ import ( "gitlab.com/bootc-org/podman-bootc/cmd" "gitlab.com/bootc-org/podman-bootc/pkg/bootc" + "gitlab.com/bootc-org/podman-bootc/pkg/cache" + "gitlab.com/bootc-org/podman-bootc/pkg/container" "gitlab.com/bootc-org/podman-bootc/pkg/user" - "gitlab.com/bootc-org/podman-bootc/pkg/utils" "gitlab.com/bootc-org/podman-bootc/pkg/vm" . "github.com/onsi/ginkgo/v2" @@ -85,7 +86,6 @@ func createTestVM(imageId string) (bootcVM *vm.BootcVMLinux) { ImageID: imageId, User: testUser, LibvirtUri: testLibvirtUri, - Locking: utils.Shared, }) Expect(err).To(Not(HaveOccurred())) @@ -108,20 +108,34 @@ func runTestVM(bootcVM vm.BootcVM) { now := time.Now() now = now.Add(-time.Duration(1 * time.Minute)) - bootcDisk := bootc.BootcDisk{ + + containerImage := container.ContainerImage{ ImageNameOrId: testImageID, - User: testUser, Ctx: context.Background(), - ImageId: testImageID, + Id: testImageID, RepoTag: testRepoTag, - CreatedAt: now, - Directory: filepath.Join(testUser.CacheDir(), testImageID), + Size: 0, + Pulled: true, + } + + cache := cache.Cache{ + User: testUser, + ImageId: testImageID, + Directory: filepath.Join(testUser.CacheDir(), testImageID), + } + + bootcDisk := bootc.BootcDisk{ + Cache: cache, + ContainerImage: containerImage, + User: testUser, + Ctx: context.Background(), + CreatedAt: now, } err = os.WriteFile(filepath.Join(testUser.CacheDir(), testImageID, "disk.raw"), []byte(""), 0700) Expect(err).To(Not(HaveOccurred())) - err = bootcVM.WriteConfig(bootcDisk) + err = bootcVM.WriteConfig(bootcDisk, containerImage) Expect(err).To(Not(HaveOccurred())) } @@ -156,9 +170,6 @@ var _ = Describe("VM", func() { Context("does not exist", func() { It("should create and start the VM after calling Run", func() { bootcVM := createTestVM(testImageID) - defer func() { - _ = bootcVM.Unlock() - }() runTestVM(bootcVM) exists, err := bootcVM.Exists() @@ -172,10 +183,6 @@ var _ = Describe("VM", func() { It("should return false when calling Exists before Run", func() { bootcVM := createTestVM(testImageID) - defer func() { - _ = bootcVM.Unlock() - }() - exists, err := bootcVM.Exists() Expect(err).To(Not(HaveOccurred())) Expect(exists).To(BeFalse()) @@ -192,10 +199,6 @@ var _ = Describe("VM", func() { It("should remove the VM from the hypervisor after calling Delete", func() { //create vm and start it bootcVM := createTestVM(testImageID) - defer func() { - _ = bootcVM.Unlock() - }() - runTestVM(bootcVM) //assert that the VM exists @@ -215,10 +218,6 @@ var _ = Describe("VM", func() { It("should list the VM", func() { bootcVM := createTestVM(testImageID) - defer func() { - _ = bootcVM.Unlock() - }() - runTestVM(bootcVM) vmList, err := cmd.CollectVmList(testUser, testLibvirtUri) Expect(err).To(Not(HaveOccurred())) @@ -239,26 +238,14 @@ var _ = Describe("VM", func() { Context("multiple running", func() { It("should list all VMs", func() { bootcVM := createTestVM(testImageID) - defer func() { - _ = bootcVM.Unlock() - }() - runTestVM(bootcVM) id2 := "1234564b145ed339eeef86046aea3ee221a2a5a16f588aff4f43a42e5ca9f844" bootcVM2 := createTestVM(id2) - defer func() { - _ = bootcVM2.Unlock() - }() - runTestVM(bootcVM2) id3 := "2345674b145ed339eeef86046aea3ee221a2a5a16f588aff4f43a42e5ca9f844" bootcVM3 := createTestVM(id3) - defer func() { - _ = bootcVM3.Unlock() - }() - runTestVM(bootcVM3) vmList, err := cmd.CollectVmList(testUser, testLibvirtUri) diff --git a/podman-bootc.go b/podman-bootc.go index 20a40e69..cdd403e0 100644 --- a/podman-bootc.go +++ b/podman-bootc.go @@ -9,6 +9,8 @@ import ( "gitlab.com/bootc-org/podman-bootc/cmd" "gitlab.com/bootc-org/podman-bootc/pkg/bootc" + "gitlab.com/bootc-org/podman-bootc/pkg/cache" + "gitlab.com/bootc-org/podman-bootc/pkg/container" "gitlab.com/bootc-org/podman-bootc/pkg/user" "gitlab.com/bootc-org/podman-bootc/pkg/utils" @@ -57,7 +59,7 @@ func cleanup() { } //delete the disk image - err = bootc.NewBootcDisk("", ctx, user).Cleanup() + err = bootc.NewBootcDisk(container.ContainerImage{}, ctx, user, cache.Cache{}, false).Cleanup() if err != nil { logrus.Errorf("unable to get podman machine info: %s", err) os.Exit(0) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 04c52797..5ad02be0 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -315,4 +315,64 @@ var _ = Describe("E2E", func() { } }) }) + + Context("Modifying disk size", Ordered, func() { + var vm *e2e.TestVM + + BeforeAll(func() { + var err error + vm, err = e2e.BootVM(e2e.BaseImage, "--disk-size", "25G") + Expect(err).To(Not(HaveOccurred())) + }) + + It("Should create a new VM with a specific disk size", func() { + vmDirs, err := e2e.ListCacheDirs() + Expect(err).To(Not(HaveOccurred())) + Expect(vmDirs).To(HaveLen(1)) + + fileInfo, err := os.Stat(filepath.Join(vmDirs[0], config.DiskImage)) + Expect(err).To(Not(HaveOccurred())) + + Expect(fileInfo.Size()).To(Equal(int64(25000001536))) + }) + + It("Should fail to resize a VM's disk size while the VM is running", func() { + _, _, err := e2e.RunPodmanBootc("run", e2e.BaseImage, "--disk-size", "26G") + Expect(err).To(HaveOccurred()) + }) + + It("Should resize an existing VM's disk size", func() { + //stop the VM first + err := vm.StdIn.Close() + Expect(err).To(Not(HaveOccurred())) + _, _, err = e2e.RunPodmanBootc("stop", vm.Id) + Expect(err).To(Not(HaveOccurred())) + + Eventually(func() bool { + vmExists, err := e2e.VMExists(vm.Id) + Expect(err).To(Not(HaveOccurred())) + return vmExists + }).Should(BeFalse()) + + //resize the disk + vm, err = e2e.BootVM(e2e.BaseImage, "--disk-size", "27G") + Expect(err).To(Not(HaveOccurred())) + + //verify the disk size + vmDirs, err := e2e.ListCacheDirs() + Expect(err).To(Not(HaveOccurred())) + Expect(vmDirs).To(HaveLen(1)) + fileInfo, err := os.Stat(filepath.Join(vmDirs[0], config.DiskImage)) + Expect(err).To(Not(HaveOccurred())) + Expect(fileInfo.Size()).To(Equal(int64(27000000512))) + }) + + AfterAll(func() { + vm.StdIn.Close() + err := e2e.Cleanup() + if err != nil { + Fail(err.Error()) + } + }) + }) }) diff --git a/test/e2e/e2e_utils.go b/test/e2e/e2e_utils.go index 95ea17c6..5852f92e 100644 --- a/test/e2e/e2e_utils.go +++ b/test/e2e/e2e_utils.go @@ -105,8 +105,10 @@ func GetVMIdFromContainerImage(image string) (vmId string, err error) { return } -func BootVM(image string) (vm *TestVM, err error) { - runActiveCmd := exec.Command(PodmanBootcBinary(), "run", image) +func BootVM(image string, args ...string) (vm *TestVM, err error) { + cmd := []string{"run", image} + cmd = append(cmd, args...) + runActiveCmd := exec.Command(PodmanBootcBinary(), cmd...) stdIn, err := runActiveCmd.StdinPipe() if err != nil {