1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
|
package inline
import (
"context"
"encoding/json"
"github.com/containerd/containerd/labels"
"github.com/moby/buildkit/cache/remotecache"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/compression"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc {
return func(ctx context.Context, _ session.Group, _ map[string]string) (remotecache.Exporter, error) {
return NewExporter(), nil
}
}
func NewExporter() remotecache.Exporter {
cc := v1.NewCacheChains()
return &exporter{CacheExporterTarget: cc, chains: cc}
}
type exporter struct {
solver.CacheExporterTarget
chains *v1.CacheChains
}
func (*exporter) Name() string {
return "exporting inline cache"
}
func (ce *exporter) Config() remotecache.Config {
return remotecache.Config{
Compression: compression.New(compression.Default),
}
}
func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
return nil, nil
}
func (ce *exporter) reset() {
cc := v1.NewCacheChains()
ce.CacheExporterTarget = cc
ce.chains = cc
}
func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest) ([]byte, error) {
config, descs, err := ce.chains.Marshal(ctx)
if err != nil {
return nil, err
}
layerBlobDigests := make([]digest.Digest, len(layers))
descs2 := map[digest.Digest]v1.DescriptorProviderPair{}
for i, k := range layers {
if v, ok := descs[k]; ok {
descs2[k] = v
layerBlobDigests[i] = k
continue
}
// fallback for uncompressed digests
for _, v := range descs {
if uc := v.Descriptor.Annotations[labels.LabelUncompressed]; uc == string(k) {
descs2[v.Descriptor.Digest] = v
layerBlobDigests[i] = v.Descriptor.Digest
}
}
}
cc := v1.NewCacheChains()
if err := v1.ParseConfig(*config, descs2, cc); err != nil {
return nil, err
}
cfg, _, err := cc.Marshal(ctx)
if err != nil {
return nil, err
}
if len(cfg.Layers) == 0 {
bklog.G(ctx).Warn("failed to match any cache with layers")
return nil, nil
}
// reorder layers based on the order in the image
blobIndexes := make(map[digest.Digest]int, len(layers))
for i, blob := range layerBlobDigests {
blobIndexes[blob] = i
}
for i, r := range cfg.Records {
for j, rr := range r.Results {
resultBlobs := layerToBlobs(rr.LayerIndex, cfg.Layers)
// match being true means the result is in the same order as the image
var match bool
if len(resultBlobs) <= len(layers) {
match = true
for k, resultBlob := range resultBlobs {
layerBlob := layers[k]
if resultBlob != layerBlob {
match = false
break
}
}
}
if match {
// The layers of the result are in the same order as the image, so we can
// specify it just using the CacheResult struct and specifying LayerIndex
// as the top-most layer of the result.
rr.LayerIndex = len(resultBlobs) - 1
r.Results[j] = rr
} else {
// The layers of the result are not in the same order as the image, so we
// have to use ChainedResult to specify each layer of the result individually.
chainedResult := v1.ChainedResult{}
for _, resultBlob := range resultBlobs {
idx, ok := blobIndexes[resultBlob]
if !ok {
return nil, errors.Errorf("failed to find blob %s in layers", resultBlob)
}
chainedResult.LayerIndexes = append(chainedResult.LayerIndexes, idx)
}
r.Results[j] = v1.CacheResult{}
r.ChainedResults = append(r.ChainedResults, chainedResult)
}
// remove any CacheResults that had to be converted to the ChainedResult format.
var filteredResults []v1.CacheResult
for _, rr := range r.Results {
if rr != (v1.CacheResult{}) {
filteredResults = append(filteredResults, rr)
}
}
r.Results = filteredResults
cfg.Records[i] = r
}
}
dt, err := json.Marshal(cfg.Records)
if err != nil {
return nil, err
}
ce.reset()
return dt, nil
}
func layerToBlobs(idx int, layers []v1.CacheLayer) []digest.Digest {
var ds []digest.Digest
for idx != -1 {
layer := layers[idx]
ds = append(ds, layer.Blob)
idx = layer.ParentIndex
}
// reverse so they go lowest to highest
for i, j := 0, len(ds)-1; i < j; i, j = i+1, j-1 {
ds[i], ds[j] = ds[j], ds[i]
}
return ds
}
|