File: update.go

package info (click to toggle)
docker.io 27.5.1%2Bdfsg4-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 67,384 kB
  • sloc: sh: 5,847; makefile: 1,146; ansic: 664; python: 162; asm: 133
file content (107 lines) | stat: -rw-r--r-- 2,953 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
package daemon // import "github.com/docker/docker/daemon"

import (
	"context"
	"fmt"

	"github.com/docker/docker/api/types/container"
	"github.com/docker/docker/api/types/events"
	"github.com/docker/docker/errdefs"
	"github.com/pkg/errors"
)

// ContainerUpdate updates configuration of the container
func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) {
	var warnings []string

	daemonCfg := daemon.config()
	warnings, err := daemon.verifyContainerSettings(daemonCfg, hostConfig, nil, true)
	if err != nil {
		return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err)
	}

	if err := daemon.update(name, hostConfig); err != nil {
		return container.ContainerUpdateOKBody{Warnings: warnings}, err
	}

	return container.ContainerUpdateOKBody{Warnings: warnings}, nil
}

func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error {
	if hostConfig == nil {
		return nil
	}

	ctr, err := daemon.GetContainer(name)
	if err != nil {
		return err
	}

	restoreConfig := false
	backupHostConfig := *ctr.HostConfig

	defer func() {
		if restoreConfig {
			ctr.Lock()
			if !ctr.RemovalInProgress && !ctr.Dead {
				ctr.HostConfig = &backupHostConfig
				ctr.CheckpointTo(context.WithoutCancel(context.TODO()), daemon.containersReplica)
			}
			ctr.Unlock()
		}
	}()

	ctr.Lock()

	if ctr.RemovalInProgress || ctr.Dead {
		ctr.Unlock()
		return errCannotUpdate(ctr.ID, fmt.Errorf(`container is marked for removal and cannot be "update"`))
	}

	if err := ctr.UpdateContainer(hostConfig); err != nil {
		restoreConfig = true
		ctr.Unlock()
		return errCannotUpdate(ctr.ID, err)
	}
	if err := ctr.CheckpointTo(context.TODO(), daemon.containersReplica); err != nil {
		restoreConfig = true
		ctr.Unlock()
		return errCannotUpdate(ctr.ID, err)
	}

	ctr.Unlock()

	// if Restart Policy changed, we need to update container monitor
	if hostConfig.RestartPolicy.Name != "" {
		ctr.UpdateMonitor(hostConfig.RestartPolicy)
	}

	defer daemon.LogContainerEvent(ctr, events.ActionUpdate)

	// If container is not running, update hostConfig struct is enough,
	// resources will be updated when the container is started again.
	// If container is running (including paused), we need to update configs
	// to the real world.
	ctr.Lock()
	isRestarting := ctr.Restarting
	tsk, err := ctr.GetRunningTask()
	ctr.Unlock()
	if errdefs.IsConflict(err) || isRestarting {
		return nil
	}
	if err != nil {
		return err
	}

	if err := tsk.UpdateResources(context.TODO(), toContainerdResources(hostConfig.Resources)); err != nil {
		restoreConfig = true
		// TODO: it would be nice if containerd responded with better errors here so we can classify this better.
		return errCannotUpdate(ctr.ID, errdefs.System(err))
	}

	return nil
}

func errCannotUpdate(containerID string, err error) error {
	return errors.Wrap(err, "Cannot update container "+containerID)
}