1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
|
package ocipull
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v5/pkg/machine/compression"
"github.com/containers/podman/v5/pkg/machine/define"
"github.com/containers/podman/v5/utils"
crc "github.com/crc-org/crc/v2/pkg/os"
"github.com/opencontainers/go-digest"
specV1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
const (
artifactRegistry = "quay.io"
artifactRepo = "podman"
artifactImageName = "machine-os"
artifactImageNameWSL = "machine-os-wsl"
artifactOriginalName = "org.opencontainers.image.title"
machineOS = "linux"
)
type OCIArtifactDisk struct {
cache bool
cachedCompressedDiskPath *define.VMFile
name string
ctx context.Context
dirs *define.MachineDirs
diskArtifactOpts *DiskArtifactOpts
finalPath string
imageEndpoint string
machineVersion *OSVersion
diskArtifactFileName string
pullOptions *PullOptions
vmType define.VMType
}
type DiskArtifactOpts struct {
arch string
diskType string
os string
}
/*
This interface is for automatically pulling a disk artifact(qcow2, raw, vhdx file) from a pre-determined
image location. The logic is tied to vmtypes (applehv, qemu, hyperv) and their understanding of the type of
disk they require. The process can be generally described as:
* Determine the flavor of artifact we are looking for (arch, compression, type)
* Grab the manifest list for the target
* Walk the artifacts to find a match based on flavor
* Check the hash of the artifact against the hash of our cached image
* If the cached image does not exist or match, pull the latest into an OCI directory
* Read the OCI blob's manifest to determine which blob is the artifact disk
* Rename/move the blob in the OCI directory to the image cache dir and append the type and compression
i.e. 91d1e51ddfac9d4afb1f96df878089cfdb9ab9be5886f8bccac0f0557ed28974.qcow2.xz
* Discard the OCI directory
* Decompress the cached image to the image dir in the form of <vmname>-<arch>.<raw|vhdx|qcow2>
*/
func NewOCIArtifactPull(ctx context.Context, dirs *define.MachineDirs, endpoint string, vmName string, vmType define.VMType, finalPath *define.VMFile) (*OCIArtifactDisk, error) {
var (
arch string
)
artifactVersion := getVersion()
switch runtime.GOARCH {
case "amd64":
arch = "x86_64"
case "arm64":
arch = "aarch64"
default:
return nil, fmt.Errorf("unsupported machine arch: %s", runtime.GOARCH)
}
diskOpts := DiskArtifactOpts{
arch: arch,
diskType: vmType.DiskType(),
os: machineOS,
}
cache := false
if endpoint == "" {
// The OCI artifact containing the OS image for WSL has a different
// image name. This should be temporary and dropped as soon as the
// OS image for WSL is built from fedora-coreos too (c.f. RUN-2178).
imageName := artifactImageName
if vmType == define.WSLVirt {
imageName = artifactImageNameWSL
}
endpoint = fmt.Sprintf("docker://%s/%s/%s:%s", artifactRegistry, artifactRepo, imageName, artifactVersion.majorMinor())
cache = true
}
ociDisk := OCIArtifactDisk{
ctx: ctx,
cache: cache,
dirs: dirs,
diskArtifactOpts: &diskOpts,
finalPath: finalPath.GetPath(),
imageEndpoint: endpoint,
machineVersion: artifactVersion,
name: vmName,
pullOptions: &PullOptions{},
vmType: vmType,
}
return &ociDisk, nil
}
func (o *OCIArtifactDisk) OriginalFileName() (string, string) {
return o.cachedCompressedDiskPath.GetPath(), o.diskArtifactFileName
}
func (o *OCIArtifactDisk) Get() error {
cleanCache, err := o.get()
if err != nil {
return err
}
if cleanCache != nil {
defer cleanCache()
}
return o.decompress()
}
func (o *OCIArtifactDisk) GetNoCompress() (func(), error) {
return o.get()
}
func (o *OCIArtifactDisk) get() (func(), error) {
cleanCache := func() {}
destRef, artifactDigest, err := o.getDestArtifact()
if err != nil {
return nil, err
}
// Note: the artifactDigest here is the hash of the most recent disk image available
cachedImagePath, err := o.dirs.ImageCacheDir.AppendToNewVMFile(fmt.Sprintf("%s.%s", artifactDigest.Encoded(), o.vmType.ImageFormat().KindWithCompression()), nil)
if err != nil {
return nil, err
}
// check if we have the latest and greatest disk image
if _, err = os.Stat(cachedImagePath.GetPath()); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("unable to access cached image path %q: %q", cachedImagePath.GetPath(), err)
}
// On cache misses, we clean out the cache
cleanCache = o.cleanCache(cachedImagePath.GetPath())
// pull the image down to our local filesystem
if err := o.pull(destRef, artifactDigest); err != nil {
return nil, fmt.Errorf("failed to pull %s: %w", destRef.DockerReference(), err)
}
// grab the artifact disk out of the cache and lay
// it into our local cache in the format of
// hash + disktype + compression
//
// in cache it will be used until it is "outdated"
//
// i.e. 91d1e51...d28974.qcow2.xz
if err := o.unpack(artifactDigest); err != nil {
return nil, err
}
} else {
logrus.Debugf("cached image exists and is latest: %s", cachedImagePath.GetPath())
o.cachedCompressedDiskPath = cachedImagePath
}
return cleanCache, nil
}
func (o *OCIArtifactDisk) cleanCache(cachedImagePath string) func() {
// cache miss while using an image that we cache, ie the default image
// clean out all old files from the cache dir
if o.cache {
files, err := os.ReadDir(o.dirs.ImageCacheDir.GetPath())
if err != nil {
logrus.Warn("failed to clean machine image cache: ", err)
return nil
}
return func() {
for _, file := range files {
path := filepath.Join(o.dirs.ImageCacheDir.GetPath(), file.Name())
logrus.Debugf("cleaning cached file: %s", path)
err := utils.GuardedRemoveAll(path)
if err != nil && !errors.Is(err, os.ErrNotExist) {
logrus.Warn("failed to clean machine image cache: ", err)
}
}
}
} else {
// using an image that we don't cache, ie not the default image
// delete image after use and don't cache
return func() {
logrus.Debugf("cleaning cache: %s", o.dirs.ImageCacheDir.GetPath())
err := os.Remove(cachedImagePath)
if err != nil && !errors.Is(err, os.ErrNotExist) {
logrus.Warn("failed to clean pulled machine image: ", err)
}
}
}
}
func (o *OCIArtifactDisk) getDestArtifact() (types.ImageReference, digest.Digest, error) {
imgRef, err := alltransports.ParseImageName(o.imageEndpoint)
if err != nil {
return nil, "", err
}
fmt.Printf("Looking up Podman Machine image at %s to create VM\n", imgRef.DockerReference())
sysCtx := &types.SystemContext{
DockerInsecureSkipTLSVerify: o.pullOptions.SkipTLSVerify,
}
imgSrc, err := imgRef.NewImageSource(o.ctx, sysCtx)
if err != nil {
return nil, "", err
}
defer func() {
if err := imgSrc.Close(); err != nil {
logrus.Warn(err)
}
}()
diskArtifactDigest, err := GetDiskArtifactReference(o.ctx, imgSrc, o.diskArtifactOpts)
if err != nil {
return nil, "", err
}
// create a ref now and return
named := imgRef.DockerReference()
digestedRef, err := reference.WithDigest(reference.TrimNamed(named), diskArtifactDigest)
if err != nil {
return nil, "", err
}
// Get and "store" the original filename the disk artifact had
originalFileName, err := getOriginalFileName(o.ctx, imgSrc, diskArtifactDigest)
if err != nil {
return nil, "", err
}
o.diskArtifactFileName = originalFileName
newRef, err := docker.NewReference(digestedRef)
if err != nil {
return nil, "", err
}
return newRef, diskArtifactDigest, err
}
func (o *OCIArtifactDisk) pull(destRef types.ImageReference, artifactDigest digest.Digest) error {
destFileName := artifactDigest.Encoded()
destFile, err := o.dirs.ImageCacheDir.AppendToNewVMFile(destFileName, nil)
if err != nil {
return err
}
return Pull(o.ctx, destRef, destFile, o.pullOptions)
}
func (o *OCIArtifactDisk) unpack(diskArtifactHash digest.Digest) error {
finalSuffix := extractKindAndCompression(o.diskArtifactFileName)
blobDir, err := o.dirs.ImageCacheDir.AppendToNewVMFile(diskArtifactHash.Encoded(), nil)
if err != nil {
return err
}
cachedCompressedPath, err := o.dirs.ImageCacheDir.AppendToNewVMFile(diskArtifactHash.Encoded()+finalSuffix, nil)
if err != nil {
return err
}
o.cachedCompressedDiskPath = cachedCompressedPath
blobInfo, err := GetLocalBlob(o.ctx, blobDir.GetPath())
if err != nil {
return fmt.Errorf("unable to get local manifest for %s: %q", blobDir.GetPath(), err)
}
diskBlobPath := filepath.Join(blobDir.GetPath(), "blobs", "sha256", blobInfo.Digest.Encoded())
// Rename and move the hashed blob file to the cache dir.
// If the rename fails, we do a sparsecopy instead
if err := os.Rename(diskBlobPath, cachedCompressedPath.GetPath()); err != nil {
logrus.Errorf("renaming compressed image %q failed: %q", cachedCompressedPath.GetPath(), err)
logrus.Error("trying again using copy")
if err := crc.CopyFileSparse(diskBlobPath, cachedCompressedPath.GetPath()); err != nil {
return err
}
}
// Clean up the oci dir which is no longer needed
return utils.GuardedRemoveAll(blobDir.GetPath())
}
func (o *OCIArtifactDisk) decompress() error {
return compression.Decompress(o.cachedCompressedDiskPath, o.finalPath)
}
func getOriginalFileName(ctx context.Context, imgSrc types.ImageSource, artifactDigest digest.Digest) (string, error) {
v1RawMannyfest, _, err := imgSrc.GetManifest(ctx, &artifactDigest)
if err != nil {
return "", err
}
v1MannyFest := specV1.Manifest{}
if err := json.Unmarshal(v1RawMannyfest, &v1MannyFest); err != nil {
return "", err
}
if layerLen := len(v1MannyFest.Layers); layerLen > 1 {
return "", fmt.Errorf("podman-machine images should only have 1 layer: %d found", layerLen)
}
// podman-machine-images should have an original file name
// stored in the annotations under org.opencontainers.image.title
// i.e. fedora-coreos-39.20240128.2.2-qemu.x86_64.qcow2.xz
originalFileName, ok := v1MannyFest.Layers[0].Annotations[artifactOriginalName]
if !ok {
return "", fmt.Errorf("unable to determine original artifact name: missing required annotation 'org.opencontainers.image.title'")
}
logrus.Debugf("original artifact file name: %s", originalFileName)
return originalFileName, nil
}
// extractKindAndCompression extracts the vmimage type and the compression type
// this is used for when we rename the blob from its hash to something real
// i.e. fedora-coreos-39.20240128.2.2-qemu.x86_64.qcow2.xz would return qcow2.xz
func extractKindAndCompression(name string) string {
compressAlgo := filepath.Ext(name)
compressStrippedName := strings.TrimSuffix(name, compressAlgo)
kind := filepath.Ext(compressStrippedName)
return kind + compressAlgo
}
|