File: migrate.go

package info (click to toggle)
incus 6.0.5-6
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 25,788 kB
  • sloc: sh: 16,313; ansic: 3,121; python: 457; makefile: 337; ruby: 51; sql: 50; lisp: 6
file content (253 lines) | stat: -rw-r--r-- 6,699 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
// Package migration provides the primitives for server to server migration.
//
// See https://github.com/lxc/incus/blob/main/doc/migration.md for a complete
// description.

package main

import (
	"context"
	"fmt"
	"net/http"
	"sync"
	"time"

	"github.com/gorilla/websocket"
	"google.golang.org/protobuf/proto"

	"github.com/lxc/incus/v6/internal/jmap"
	"github.com/lxc/incus/v6/internal/migration"
	"github.com/lxc/incus/v6/internal/server/instance"
	localMigration "github.com/lxc/incus/v6/internal/server/migration"
	"github.com/lxc/incus/v6/internal/server/operations"
	"github.com/lxc/incus/v6/shared/api"
	"github.com/lxc/incus/v6/shared/idmap"
)

type migrationFields struct {
	controlLock sync.Mutex

	conns map[string]*migrationConn

	// container specific fields
	live         bool
	instanceOnly bool
	instance     instance.Instance

	// storage specific fields
	volumeOnly        bool
	allowInconsistent bool
	storagePool       string
}

func (c *migrationFields) send(m proto.Message) error {
	/* gorilla websocket doesn't allow concurrent writes, and
	 * panic()s if it sees them (which is reasonable). If e.g. we
	 * happen to fail, get scheduled, start our write, then get
	 * unscheduled before the write is bit to a new thread which is
	 * receiving an error from the other side (due to our previous
	 * close), we can engage in these concurrent writes, which
	 * casuses the whole daemon to panic.
	 *
	 * Instead, let's lock sends to the controlConn so that we only ever
	 * write one message at the time.
	 */
	c.controlLock.Lock()
	defer c.controlLock.Unlock()

	conn, err := c.conns[api.SecretNameControl].WebSocket(context.TODO())
	if err != nil {
		return fmt.Errorf("Control connection not initialized: %w", err)
	}

	_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))
	err = migration.ProtoSend(conn, m)
	if err != nil {
		return err
	}

	return nil
}

func (c *migrationFields) recv(m proto.Message) error {
	conn, err := c.conns[api.SecretNameControl].WebSocket(context.TODO())
	if err != nil {
		return fmt.Errorf("Control connection not initialized: %w", err)
	}

	return migration.ProtoRecv(conn, m)
}

func (c *migrationFields) disconnect() {
	c.controlLock.Lock()
	ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
	defer cancel()
	conn, _ := c.conns[api.SecretNameControl].WebSocket(ctx)
	if conn != nil {
		closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
		_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))
		_ = conn.WriteMessage(websocket.CloseMessage, closeMsg)
	}

	c.controlLock.Unlock()

	/* Below we just Close(), which doesn't actually write to the
	 * websocket, it just closes the underlying connection. If e.g. there
	 * is still a filesystem transfer going on, but the other side has run
	 * out of disk space, writing an actual CloseMessage here will cause
	 * gorilla websocket to panic. Instead, we just force close this
	 * connection, since we report the error over the control channel
	 * anyway.
	 */
	for _, conn := range c.conns {
		conn.Close()
	}
}

func (c *migrationFields) sendControl(err error) {
	c.controlLock.Lock()
	conn, _ := c.conns[api.SecretNameControl].WebSocket(context.TODO())
	if conn != nil {
		_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
		migration.ProtoSendControl(conn, err)
	}

	c.controlLock.Unlock()

	if err != nil {
		c.disconnect()
	}
}

func (c *migrationFields) controlChannel() <-chan *localMigration.ControlResponse {
	ch := make(chan *localMigration.ControlResponse)
	go func() {
		resp := localMigration.ControlResponse{}
		err := c.recv(&resp.MigrationControl)
		if err != nil {
			resp.Err = err
			ch <- &resp

			return
		}

		ch <- &resp
	}()

	return ch
}

type migrationSourceWs struct {
	migrationFields

	clusterMoveSourceName string

	pushCertificate  string
	pushOperationURL string
	pushSecrets      map[string]string
}

func (s *migrationSourceWs) Metadata() any {
	secrets := make(jmap.Map, len(s.conns))
	for connName, conn := range s.conns {
		secrets[connName] = conn.Secret()
	}

	return secrets
}

func (s *migrationSourceWs) Connect(op *operations.Operation, r *http.Request, w http.ResponseWriter) error {
	incomingSecret := r.FormValue("secret")
	if incomingSecret == "" {
		return api.StatusErrorf(http.StatusBadRequest, "Missing migration source secret")
	}

	for connName, conn := range s.conns {
		if incomingSecret != conn.Secret() {
			continue
		}

		err := conn.AcceptIncoming(r, w)
		if err != nil {
			return fmt.Errorf("Failed accepting incoming migration source %q connection: %w", connName, err)
		}

		return nil
	}

	// If we didn't find the right secret, the user provided a bad one, so return 403, not 404, since this
	// operation actually exists.
	return api.StatusErrorf(http.StatusForbidden, "Invalid migration source secret")
}

type migrationSink struct {
	migrationFields

	url                   string
	push                  bool
	clusterMoveSourceName string
	refresh               bool
	refreshExcludeOlder   bool
}

// MigrationSinkArgs arguments to configure migration sink.
type migrationSinkArgs struct {
	// General migration fields
	Dialer  *websocket.Dialer
	Push    bool
	Secrets map[string]string
	URL     string

	// Instance specific fields
	Instance              instance.Instance
	InstanceOnly          bool
	Idmap                 *idmap.Set
	Live                  bool
	Refresh               bool
	RefreshExcludeOlder   bool
	ClusterMoveSourceName string
	Snapshots             []*migration.Snapshot

	// Storage specific fields
	StoragePool string
	VolumeOnly  bool
	VolumeSize  int64

	// Transport specific fields
	RsyncFeatures []string
}

// Metadata returns metadata for the migration sink.
func (s *migrationSink) Metadata() any {
	secrets := make(jmap.Map, len(s.conns))
	for connName, conn := range s.conns {
		secrets[connName] = conn.Secret()
	}

	return secrets
}

// Connect connects to the migration source.
func (s *migrationSink) Connect(op *operations.Operation, r *http.Request, w http.ResponseWriter) error {
	incomingSecret := r.FormValue("secret")
	if incomingSecret == "" {
		return api.StatusErrorf(http.StatusBadRequest, "Missing migration sink secret")
	}

	for connName, conn := range s.conns {
		if incomingSecret != conn.Secret() {
			continue
		}

		err := conn.AcceptIncoming(r, w)
		if err != nil {
			return fmt.Errorf("Failed accepting incoming migration sink %q connection: %w", connName, err)
		}

		return nil
	}

	// If we didn't find the right secret, the user provided a bad one, so return 403, not 404, since this
	// operation actually exists.
	return api.StatusErrorf(http.StatusForbidden, "Invalid migration sink secret")
}