File: container.go

package info (click to toggle)
golang-github-checkpoint-restore-checkpointctl 1.3.0%2Bds1-2
  • links: PTS, VCS
  • area: main
  • in suites: sid, trixie
  • size: 524 kB
  • sloc: ansic: 208; makefile: 172; sh: 40
file content (359 lines) | stat: -rw-r--r-- 9,937 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
// SPDX-License-Identifier: Apache-2.0

// This file is used to handle container checkpoint archives

package internal

import (
	"archive/tar"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"os"
	"path/filepath"
	"strings"
	"time"

	metadata "github.com/checkpoint-restore/checkpointctl/lib"
	"github.com/checkpoint-restore/go-criu/v7/crit"
	"github.com/containers/storage/pkg/archive"
	"github.com/olekukonko/tablewriter"
	spec "github.com/opencontainers/runtime-spec/specs-go"
)

var pageSize = os.Getpagesize()

type containerMetadata struct {
	Name    string `json:"name,omitempty"`
	Attempt uint32 `json:"attempt,omitempty"`
}

type containerInfo struct {
	Name      string
	IP        string
	MAC       string
	Created   string
	Engine    string
	Namespace string
	Pod       string
}

type checkpointInfo struct {
	containerInfo *containerInfo
	specDump      *spec.Spec
	configDump    *metadata.ContainerConfig
	archiveSizes  *archiveSizes
}

func getPodmanInfo(containerConfig *metadata.ContainerConfig, _ *spec.Spec) *containerInfo {
	return &containerInfo{
		Name:    containerConfig.Name,
		Created: containerConfig.CreatedTime.Format(time.RFC3339),
		Engine:  "Podman",
	}
}

func getContainerdInfo(containerConfig *metadata.ContainerConfig, specDump *spec.Spec) *containerInfo {
	return &containerInfo{
		Name:      specDump.Annotations["io.kubernetes.cri.container-name"],
		Created:   containerConfig.CreatedTime.Format(time.RFC3339),
		Engine:    "containerd",
		Namespace: specDump.Annotations["io.kubernetes.cri.sandbox-namespace"],
		Pod:       specDump.Annotations["io.kubernetes.cri.sandbox-name"],
	}
}

func getCRIOInfo(_ *metadata.ContainerConfig, specDump *spec.Spec) (*containerInfo, error) {
	cm := containerMetadata{}
	if err := json.Unmarshal([]byte(specDump.Annotations["io.kubernetes.cri-o.Metadata"]), &cm); err != nil {
		return nil, fmt.Errorf("failed to read io.kubernetes.cri-o.Metadata: %w", err)
	}

	return &containerInfo{
		IP:        specDump.Annotations["io.kubernetes.cri-o.IP.0"],
		Name:      cm.Name,
		Created:   specDump.Annotations["io.kubernetes.cri-o.Created"],
		Engine:    "CRI-O",
		Namespace: specDump.Annotations["io.kubernetes.pod.namespace"],
		Pod:       specDump.Annotations["io.kubernetes.pod.name"],
	}, nil
}

func getCheckpointInfo(task Task) (*checkpointInfo, error) {
	info := &checkpointInfo{}
	var err error

	info.configDump, _, err = metadata.ReadContainerCheckpointConfigDump(task.OutputDir)
	if err != nil {
		return nil, err
	}
	info.specDump, _, err = metadata.ReadContainerCheckpointSpecDump(task.OutputDir)
	if err != nil {
		return nil, err
	}

	info.containerInfo, err = getContainerInfo(info.specDump, info.configDump)
	if err != nil {
		return nil, err
	}

	info.archiveSizes, err = getArchiveSizes(task.CheckpointFilePath)
	if err != nil {
		return nil, err
	}

	return info, nil
}

func ShowContainerCheckpoints(tasks []Task) error {
	table := tablewriter.NewWriter(os.Stdout)
	header := []string{
		"Container",
		"Image",
		"ID",
		"Runtime",
		"Created",
		"Engine",
	}
	// Set all columns in the table header upfront when displaying more than one checkpoint
	if len(tasks) > 1 {
		header = append(header, "IP", "MAC", "CHKPT Size", "Root Fs Diff Size")
	}

	for _, task := range tasks {
		info, err := getCheckpointInfo(task)
		if err != nil {
			return err
		}

		var row []string
		row = append(row, info.containerInfo.Name)
		row = append(row, info.configDump.RootfsImageName)
		if len(info.configDump.ID) > 12 {
			row = append(row, info.configDump.ID[:12])
		} else {
			row = append(row, info.configDump.ID)
		}

		row = append(row, info.configDump.OCIRuntime)
		row = append(row, info.containerInfo.Created)
		row = append(row, info.containerInfo.Engine)

		if len(tasks) == 1 {
			fmt.Printf("\nDisplaying container checkpoint data from %s\n\n", task.CheckpointFilePath)

			if info.containerInfo.IP != "" {
				header = append(header, "IP")
				row = append(row, info.containerInfo.IP)
			}
			if info.containerInfo.MAC != "" {
				header = append(header, "MAC")
				row = append(row, info.containerInfo.MAC)
			}

			header = append(header, "CHKPT Size")
			row = append(row, metadata.ByteToString(info.archiveSizes.checkpointSize))

			// Display root fs diff size if available
			if info.archiveSizes.rootFsDiffTarSize != 0 {
				header = append(header, "Root Fs Diff Size")
				row = append(row, metadata.ByteToString(info.archiveSizes.rootFsDiffTarSize))
			}
		} else {
			row = append(row, info.containerInfo.IP)
			row = append(row, info.containerInfo.MAC)
			row = append(row, metadata.ByteToString(info.archiveSizes.checkpointSize))
			row = append(row, metadata.ByteToString(info.archiveSizes.rootFsDiffTarSize))
		}

		table.Append(row)
	}

	table.SetHeader(header)
	table.SetAutoMergeCells(false)
	table.SetRowLine(true)
	table.Render()

	return nil
}

func getContainerInfo(specDump *spec.Spec, containerConfig *metadata.ContainerConfig) (*containerInfo, error) {
	var ci *containerInfo
	switch m := specDump.Annotations["io.container.manager"]; m {
	case "libpod":
		ci = getPodmanInfo(containerConfig, specDump)
	case "cri-o":
		var err error
		ci, err = getCRIOInfo(containerConfig, specDump)
		if err != nil {
			return nil, fmt.Errorf("getting container checkpoint information failed: %w", err)
		}
	default:
		ci = getContainerdInfo(containerConfig, specDump)
	}

	return ci, nil
}

func hasPrefix(path, prefix string) bool {
	return strings.HasPrefix(strings.TrimPrefix(path, "./"), prefix)
}

type archiveSizes struct {
	checkpointSize    int64
	rootFsDiffTarSize int64
	pagesSize         int64
	amdgpuPagesSize   int64
}

// getArchiveSizes calculates the sizes of different components within a container checkpoint.
func getArchiveSizes(archiveInput string) (*archiveSizes, error) {
	result := &archiveSizes{}

	err := iterateTarArchive(archiveInput, func(r *tar.Reader, header *tar.Header) error {
		if header.FileInfo().Mode().IsRegular() {
			if hasPrefix(header.Name, metadata.CheckpointDirectory) {
				// Add the file size to the total checkpoint size
				result.checkpointSize += header.Size
				if hasPrefix(header.Name, filepath.Join(metadata.CheckpointDirectory, metadata.PagesPrefix)) {
					result.pagesSize += header.Size
				} else if hasPrefix(header.Name, filepath.Join(metadata.CheckpointDirectory, metadata.AmdgpuPagesPrefix)) {
					result.amdgpuPagesSize += header.Size
				}
			} else if hasPrefix(header.Name, metadata.RootFsDiffTar) {
				// Read the size of rootfs diff
				result.rootFsDiffTarSize = header.Size
			}
		}
		return nil
	})
	return result, err
}

// UntarFiles unpack only specified files from an archive to the destination directory.
func UntarFiles(src, dest string, files []string) error {
	archiveFile, err := os.Open(src)
	if err != nil {
		return err
	}
	defer archiveFile.Close()

	if err := iterateTarArchive(src, func(r *tar.Reader, header *tar.Header) error {
		// Check if the current entry is one of the target files
		for _, file := range files {
			if strings.Contains(header.Name, file) {
				// Create the destination folder
				if err := os.MkdirAll(filepath.Join(dest, filepath.Dir(header.Name)), 0o644); err != nil {
					return err
				}
				// Create the destination file
				destFile, err := os.Create(filepath.Join(dest, header.Name))
				if err != nil {
					return err
				}
				defer destFile.Close()

				// Copy the contents of the entry to the destination file
				_, err = io.Copy(destFile, r)
				if err != nil {
					return err
				}

				// File successfully extracted, move to the next file
				break
			}
		}
		return nil
	}); err != nil {
		return fmt.Errorf("unpacking of checkpoint archive failed: %w", err)
	}

	return nil
}

// isFileInArchive checks if a file or directory with the specified pattern exists in the archive.
// It returns true if the file or directory is found, and false otherwise.
func isFileInArchive(archiveInput, pattern string, isDir bool) (bool, error) {
	found := false

	err := iterateTarArchive(archiveInput, func(_ *tar.Reader, header *tar.Header) error {
		// Check if the current file or directory matches the pattern and type
		if hasPrefix(header.Name, pattern) && header.FileInfo().Mode().IsDir() == isDir {
			found = true
		}
		return nil
	})
	return found, err
}

// iterateTarArchive reads a tar archive from the specified input file,
// decompresses it, and iterates through each entry, invoking the provided callback function.
func iterateTarArchive(archiveInput string, callback func(r *tar.Reader, header *tar.Header) error) error {
	archiveFile, err := os.Open(archiveInput)
	if err != nil {
		return err
	}
	defer archiveFile.Close()

	// Decompress the archive
	stream, err := archive.DecompressStream(archiveFile)
	if err != nil {
		return err
	}
	defer stream.Close()

	// Create a tar reader to read the files from the decompressed archive
	tarReader := tar.NewReader(stream)

	for {
		header, err := tarReader.Next()
		if err != nil {
			if errors.Is(err, io.EOF) {
				break
			}
			return err
		}

		if err = callback(tarReader, header); err != nil {
			return err
		}
	}

	return nil
}

func getCmdline(checkpointOutputDir string, pid uint32) (cmdline string, err error) {
	mr, err := crit.NewMemoryReader(filepath.Join(checkpointOutputDir, metadata.CheckpointDirectory), pid, pageSize)
	if err != nil {
		return
	}

	buffer, err := mr.GetPsArgs()
	if err != nil {
		return
	}

	cmdline = strings.Join(strings.Split(buffer.String(), "\x00"), " ")
	return
}

func getPsEnvVars(checkpointOutputDir string, pid uint32) (envVars []string, err error) {
	mr, err := crit.NewMemoryReader(filepath.Join(checkpointOutputDir, metadata.CheckpointDirectory), pid, pageSize)
	if err != nil {
		return
	}

	buffer, err := mr.GetPsEnvVars()
	if err != nil {
		return
	}

	for _, envVar := range strings.Split(buffer.String(), "\x00") {
		if envVar != "" {
			envVars = append(envVars, envVar)
		}
	}

	return
}