1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
|
//go:build linux
package archive
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"os"
"os/exec"
"strings"
"golang.org/x/sys/unix"
"github.com/lxc/incus/v6/shared/ioprogress"
"github.com/lxc/incus/v6/shared/logger"
"github.com/lxc/incus/v6/shared/subprocess"
)
// RunWrapper is an optional function that's used to wrap rsync, useful for confinement like AppArmor.
var RunWrapper func(cmd *exec.Cmd, output string, allowedCmds []string) (func(), error)
type nullWriteCloser struct {
*bytes.Buffer
}
func (nwc *nullWriteCloser) Close() error {
return nil
}
// ExtractWithFds runs extractor process under specific AppArmor profile.
// The allowedCmds argument specify commands which are allowed to run by apparmor.
// The cmd argument is automatically added to allowedCmds slice.
//
// This uses RunWrapper if set.
func ExtractWithFds(cmdName string, args []string, allowedCmds []string, stdin io.ReadCloser, output *os.File) error {
// Needed for RunWrapper.
outputPath := output.Name()
allowedCmds = append(allowedCmds, cmdName)
// Setup the command.
var buffer bytes.Buffer
cmd := exec.Command(cmdName, args...)
cmd.Stdin = stdin
cmd.Stdout = output
cmd.Stderr = &nullWriteCloser{&buffer}
// Call the wrapper if defined.
if RunWrapper != nil {
cleanup, err := RunWrapper(cmd, outputPath, allowedCmds)
if err != nil {
return err
}
defer cleanup()
}
// Run the command.
err := cmd.Run()
if err != nil {
return subprocess.NewRunError(cmdName, args, err, nil, &buffer)
}
return nil
}
// CompressedTarReader returns a tar reader from the supplied (optionally compressed) tarball stream.
// The unpacker arguments are those returned by DetectCompressionFile().
// The returned cancelFunc should be called when finished with reader to clean up any resources used.
// This can be done before reading to the end of the tarball if desired.
//
// This uses RunWrapper if set.
func CompressedTarReader(ctx context.Context, r io.ReadSeeker, unpacker []string, outputPath string) (*tar.Reader, context.CancelFunc, error) {
_, cancelFunc := context.WithCancel(ctx)
_, err := r.Seek(0, io.SeekStart)
if err != nil {
return nil, cancelFunc, err
}
var tr *tar.Reader
if len(unpacker) > 0 {
// Setup the command.
var buffer bytes.Buffer
pipeReader, pipeWriter := io.Pipe()
cmd := exec.Command(unpacker[0], unpacker[1:]...)
cmd.Stdin = io.NopCloser(r)
cmd.Stdout = pipeWriter
cmd.Stderr = &nullWriteCloser{&buffer}
// Call the wrapper if defined.
var cleanup func()
if RunWrapper != nil {
cleanup, err = RunWrapper(cmd, outputPath, []string{unpacker[0]})
if err != nil {
return nil, cancelFunc, err
}
}
// Run the command.
err := cmd.Start()
if err != nil {
return nil, cancelFunc, subprocess.NewRunError(unpacker[0], unpacker[1:], err, nil, &buffer)
}
ctxCancelFunc := cancelFunc
// Now that unpacker process has started, wrap context cancel function with one that waits for
// the unpacker process to complete.
cancelFunc = func() {
ctxCancelFunc()
_ = pipeWriter.Close()
_ = cmd.Wait()
if cleanup != nil {
cleanup()
}
}
tr = tar.NewReader(pipeReader)
} else {
tr = tar.NewReader(r)
}
return tr, cancelFunc, nil
}
// Unpack extracts image from archive.
func Unpack(file string, path string, blockBackend bool, maxMemory int64, tracker *ioprogress.ProgressTracker) error {
extractArgs, extension, unpacker, err := DetectCompression(file)
if err != nil {
return err
}
command := ""
args := []string{}
var allowedCmds []string
var reader io.Reader
if strings.HasPrefix(extension, ".tar") {
command = "tar"
// We can't create char/block devices in unpriv containers so avoid extracting them.
args = append(args, "--anchored")
args = append(args, "--wildcards")
args = append(args, "--exclude=dev/*")
args = append(args, "--exclude=/dev/*")
args = append(args, "--exclude=./dev/*")
args = append(args, "--exclude=rootfs/dev/*")
args = append(args, "--exclude=/rootfs/dev/*")
args = append(args, "--exclude=./rootfs/dev/*")
args = append(args, "--restrict", "--force-local")
args = append(args, "-C", path, "--numeric-owner", "--xattrs-include=*")
args = append(args, extractArgs...)
args = append(args, "-")
f, err := os.Open(file)
if err != nil {
return err
}
defer func() { _ = f.Close() }()
reader = f
// Attach the ProgressTracker if supplied.
if tracker != nil {
fsinfo, err := f.Stat()
if err != nil {
return err
}
tracker.Length = fsinfo.Size()
reader = &ioprogress.ProgressReader{
ReadCloser: f,
Tracker: tracker,
}
}
// Allow supplementary commands for the unpacker to use.
if len(unpacker) > 0 {
allowedCmds = append(allowedCmds, unpacker[0])
}
} else if strings.HasPrefix(extension, ".squashfs") {
// unsquashfs does not support reading from stdin,
// so ProgressTracker is not possible.
command = "unsquashfs"
args = append(args, "-f", "-d", path, "-n")
if maxMemory != 0 {
// If maximum memory consumption is less than 256MiB, restrict unsquashfs and limit to a single thread.
mem := maxMemory / 1024 / 1024
if err == nil && mem < 256 {
args = append(args, "-da", fmt.Sprintf("%d", mem), "-fr", fmt.Sprintf("%d", mem), "-p", "1")
}
}
args = append(args, file)
} else {
return fmt.Errorf("Unsupported image format: %s", extension)
}
outputDir, err := os.OpenFile(path, os.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("Error opening directory: %w", err)
}
defer func() { _ = outputDir.Close() }()
var readCloser io.ReadCloser
if reader != nil {
readCloser = io.NopCloser(reader)
}
err = ExtractWithFds(command, args, allowedCmds, readCloser, outputDir)
if err != nil {
// We can't create char/block devices in unpriv containers so ignore related errors.
if command == "unsquashfs" {
runError, ok := err.(subprocess.RunError)
if !ok {
return err
}
stdErr := runError.StdErr().String()
if stdErr == "" {
return err
}
// Confirm that all errors are related to character or block devices.
found := false
for _, line := range strings.Split(stdErr, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
if strings.Contains(line, "failed to create block device") {
continue
}
if strings.Contains(line, "failed to create character device") {
continue
}
// We found an actual error.
found = true
}
if !found {
// All good, assume everything unpacked.
return nil
}
}
// Check if we ran out of space
fs := unix.Statfs_t{}
err1 := unix.Statfs(path, &fs)
if err1 != nil {
return err1
}
// Check if we're running out of space
if int64(fs.Bfree) < 10 {
if blockBackend {
return fmt.Errorf("Unable to unpack image, run out of disk space (consider increasing your pool's volume.size)")
}
return fmt.Errorf("Unable to unpack image, run out of disk space")
}
logger.Warn("Unpack failed", logger.Ctx{"file": file, "allowedCmds": allowedCmds, "extension": extension, "path": path, "err": err})
return fmt.Errorf("Unpack failed: %w", err)
}
return nil
}
|