1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
|
package copy
import (
"errors"
"fmt"
"io"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
var (
// defaultCompressionFormat is used if the destination transport requests
// compression, and the user does not explicitly instruct us to use an algorithm.
defaultCompressionFormat = &compression.Gzip
// compressionBufferSize is the buffer size used to compress a blob
compressionBufferSize = 1048576
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
// using the algorithm that the media type says it should be compressed with
expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
}
)
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
type bpDetectCompressionStepData struct {
isCompressed bool
format compressiontypes.Algorithm // Valid if isCompressed
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
}
// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
// srcInfo is only used for error messages.
// Returns data for other steps.
func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) {
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
}
stream.reader = reader
res := bpDetectCompressionStepData{
isCompressed: decompressor != nil,
format: format,
decompressor: decompressor,
}
if res.isCompressed {
res.srcCompressorName = format.Name()
} else {
res.srcCompressorName = internalblobinfocache.Uncompressed
}
if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() {
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name())
}
return res, nil
}
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
type bpCompressionStepData struct {
operation types.LayerCompression // Operation to use for updating the blob metadata.
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
closers []io.Closer // Objects to close after the upload is done, if any.
}
// blobPipelineCompressionStep updates *stream to compress and/or decompress it.
// srcInfo is primarily used for error messages.
// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData,
// and must eventually call close.
func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo,
detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
// short-circuit conditions
layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType)
if !layerCompressionChangeSupported {
logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType)
}
if canModifyBlob && layerCompressionChangeSupported {
for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){
ic.bpcPreserveEncrypted,
ic.bpcCompressUncompressed,
ic.bpcRecompressCompressed,
ic.bpcDecompressCompressed,
} {
res, err := fn(stream, detected)
if err != nil {
return nil, err
}
if res != nil {
return res, nil
}
}
}
return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil
}
// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if isOciEncrypted(stream.info.MediaType) {
logrus.Debugf("Using original blob without modification for encrypted blob")
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: nil,
srcCompressorName: internalblobinfocache.UnknownCompression,
uploadedCompressorName: internalblobinfocache.UnknownCompression,
}, nil
}
return nil, nil
}
// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
logrus.Debugf("Compressing blob on the fly")
var uploadedAlgorithm *compressiontypes.Algorithm
if ic.compressionFormat != nil {
uploadedAlgorithm = ic.compressionFormat
} else {
uploadedAlgorithm = defaultCompressionFormat
}
reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm)
// Note: reader must be closed on all return paths.
stream.reader = reader
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
Digest: "",
Size: -1,
}
return &bpCompressionStepData{
operation: types.Compress,
uploadedAlgorithm: uploadedAlgorithm,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: uploadedAlgorithm.Name(),
closers: []io.Closer{reader},
}, nil
}
return nil, nil
}
// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
decompressed, err := detected.decompressor(stream.reader)
if err != nil {
return nil, err
}
succeeded := false
defer func() {
if !succeeded {
decompressed.Close()
}
}()
recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat)
// Note: recompressed must be closed on all return paths.
stream.reader = recompressed
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
Digest: "",
Size: -1,
}
succeeded = true
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: ic.compressionFormat,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: ic.compressionFormat.Name(),
closers: []io.Closer{decompressed, recompressed},
}, nil
}
return nil, nil
}
// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed {
logrus.Debugf("Blob will be decompressed")
s, err := detected.decompressor(stream.reader)
if err != nil {
return nil, err
}
// Note: s must be closed on all return paths.
stream.reader = s
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations.
Digest: "",
Size: -1,
}
return &bpCompressionStepData{
operation: types.Decompress,
uploadedAlgorithm: nil,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: internalblobinfocache.Uncompressed,
closers: []io.Closer{s},
}, nil
}
return nil, nil
}
// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
// This does not change the sourceStream parameter; we include it for symmetry with other
// pipeline steps.
func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData,
layerCompressionChangeSupported bool) *bpCompressionStepData {
logrus.Debugf("Using original blob without modification")
// Remember if the original blob was compressed, and if so how, so that if
// LayerInfosForCopy() returned something that differs from what was in the
// source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
// it will be able to correctly derive the MediaType for the copied blob.
//
// But don’t touch blobs in objects where we can’t change compression,
// so that src.UpdatedImage() doesn’t fail; assume that for such blobs
// LayerInfosForCopy() should not be making any changes in the first place.
var algorithm *compressiontypes.Algorithm
if layerCompressionChangeSupported && detected.isCompressed {
algorithm = &detected.format
} else {
algorithm = nil
}
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: algorithm,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: detected.srcCompressorName,
}
}
// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary.
func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) {
*operation = d.operation
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
*algorithm = d.uploadedAlgorithm
if *annotations == nil {
*annotations = map[string]string{}
}
maps.Copy(*annotations, d.uploadedAnnotations)
}
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties.
func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
// Don’t record any associations that involve encrypted data. This is a bit crude,
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
// might be safe, but it’s not trivially obvious, so let’s be conservative for now.
// This crude approach also means we don’t need to record whether a blob is encrypted
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encryptionStep.encrypting && !decryptionStep.decrypting {
// If d.operation != types.PreserveOriginal, we now have two reliable digest values:
// srcinfo.Digest describes the pre-d.operation input, verified by digestingReader
// uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob
// (because stream.info.Digest == "", this must have been computed afresh).
switch d.operation {
case types.PreserveOriginal:
break // Do nothing, we have only one digest and we might not have even verified it.
case types.Compress:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
}
}
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
}
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
}
return nil
}
// close closes objects that carry state throughout the compression/decompression operation.
func (d *bpCompressionStepData) close() {
for _, c := range d.closers {
c.Close()
}
}
// doCompression reads all input from src and writes its compressed equivalent to dest.
func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
if err != nil {
return err
}
buf := make([]byte, compressionBufferSize)
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
if err != nil {
compressor.Close()
return err
}
return compressor.Close()
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel)
}
// compressedStream returns a stream the input reader compressed using format, and a metadata map.
// The caller must close the returned reader.
// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
pipeReader, pipeWriter := io.Pipe()
annotations := map[string]string{}
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we don’t care.
go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
return pipeReader, annotations
}
|